Commit Hash
stringlengths
40
40
Author
stringclasses
38 values
Date
stringlengths
19
19
Description
stringlengths
8
113
Body
stringlengths
10
22.2k
Footers
stringclasses
56 values
Commit Message
stringlengths
28
22.3k
Git Diff
stringlengths
140
3.61M
b299937d180cf6964053d9bab27f83dde8ea34b8
Stuart Carnie
2023-05-17 14:55:10
Resolve tags in GROUP BY clause
Any `VarRef`s in the `Select` where the `data_type` field is `None` indicates there is no field or tag found in the schema of any children in the `FROM` clause
null
chore: Resolve tags in GROUP BY clause Any `VarRef`s in the `Select` where the `data_type` field is `None` indicates there is no field or tag found in the schema of any children in the `FROM` clause
diff --git a/iox_query_influxql/src/plan/rewriter.rs b/iox_query_influxql/src/plan/rewriter.rs index d8b3819e1f..304dec5349 100644 --- a/iox_query_influxql/src/plan/rewriter.rs +++ b/iox_query_influxql/src/plan/rewriter.rs @@ -101,9 +101,9 @@ impl RewriteSelect { check_features(stmt)?; let from = self.expand_from(s, stmt)?; - let (fields, group_by) = self.expand_projection(s, stmt, &from)?; - let condition = self.rewrite_condition(s, stmt, &from)?; - let tag_set = select_tag_set(s, &from); + let tag_set = from_tag_set(s, &from); + let (fields, group_by) = self.expand_projection(s, stmt, &from, &tag_set)?; + let condition = self.condition_resolve_types(s, stmt, &from)?; let SelectStatementInfo { projection_type } = select_statement_info(&fields, &group_by, stmt.fill)?; @@ -166,6 +166,7 @@ impl RewriteSelect { s: &dyn SchemaProvider, stmt: &SelectStatement, from: &[DataSource], + from_tag_set: &TagSet, ) -> Result<(Vec<Field>, Option<GroupByClause>)> { let tv = TypeEvaluator::new(s, from); let fields = stmt @@ -214,7 +215,7 @@ impl RewriteSelect { let (has_field_wildcard, has_group_by_wildcard) = has_wildcards(stmt); - let (fields, group_by) = if has_field_wildcard || has_group_by_wildcard { + let (fields, mut group_by) = if has_field_wildcard || has_group_by_wildcard { let (field_set, mut tag_set) = from_field_and_dimensions(s, from)?; if !has_group_by_wildcard { @@ -296,6 +297,16 @@ impl RewriteSelect { (fields, stmt.group_by.clone()) }; + // resolve possible tag references in group_by + if let Some(group_by) = group_by.as_mut() { + for dim in group_by.iter_mut() { + let Dimension::VarRef(var_ref) = dim else { continue }; + if from_tag_set.contains(var_ref.name.as_str()) { + var_ref.data_type = Some(VarRefDataType::Tag); + } + } + } + Ok((fields_resolve_aliases_and_types(s, fields, from)?, group_by)) } @@ -337,7 +348,7 @@ impl RewriteSelect { } /// Resolve the data types of any [`VarRef`] expressions in the `WHERE` condition. - fn rewrite_condition( + fn condition_resolve_types( &self, s: &dyn SchemaProvider, stmt: &SelectStatement, @@ -470,7 +481,7 @@ fn from_drop_empty(s: &dyn SchemaProvider, stmt: &mut Select) { } /// Determine the combined tag set for the specified `from`. -fn select_tag_set(s: &dyn SchemaProvider, from: &[DataSource]) -> TagSet { +fn from_tag_set(s: &dyn SchemaProvider, from: &[DataSource]) -> TagSet { let mut tag_set = TagSet::new(); for ds in from { @@ -2119,7 +2130,24 @@ mod test { let stmt = rewrite_select_statement(&namespace, &stmt).unwrap(); assert_eq!( stmt.to_string(), - "SELECT time::timestamp AS time, usage_idle::float AS usage_idle FROM cpu GROUP BY host" + "SELECT time::timestamp AS time, usage_idle::float AS usage_idle FROM cpu GROUP BY host::tag" + ); + + // resolves tag types from multiple measurements + let stmt = + parse_select("SELECT usage_idle, bytes_free FROM cpu, disk GROUP BY host, device"); + let stmt = rewrite_select_statement(&namespace, &stmt).unwrap(); + assert_eq!( + stmt.to_string(), + "SELECT time::timestamp AS time, usage_idle::float AS usage_idle, bytes_free::integer AS bytes_free FROM cpu, disk GROUP BY host::tag, device::tag" + ); + + // does not resolve non-existent tag + let stmt = parse_select("SELECT usage_idle FROM cpu GROUP BY host, non_existent"); + let stmt = rewrite_select_statement(&namespace, &stmt).unwrap(); + assert_eq!( + stmt.to_string(), + "SELECT time::timestamp AS time, usage_idle::float AS usage_idle FROM cpu GROUP BY host::tag, non_existent" ); let stmt = parse_select("SELECT usage_idle FROM cpu GROUP BY *"); @@ -2278,7 +2306,7 @@ mod test { let stmt = rewrite_select_statement(&namespace, &stmt).unwrap(); assert_eq!( stmt.to_string(), - "SELECT time::timestamp AS time, cpu::tag AS cpu, usage_system::float AS usage_system FROM (SELECT time::timestamp AS time, usage_system::float AS usage_system FROM cpu GROUP BY cpu)" + "SELECT time::timestamp AS time, cpu::tag AS cpu, usage_system::float AS usage_system FROM (SELECT time::timestamp AS time, usage_system::float AS usage_system FROM cpu GROUP BY cpu::tag)" ); // Specifically project cpu tag from GROUP BY @@ -2288,7 +2316,7 @@ mod test { let stmt = rewrite_select_statement(&namespace, &stmt).unwrap(); assert_eq!( stmt.to_string(), - "SELECT time::timestamp AS time, cpu::tag AS cpu, usage_system::float AS usage_system FROM (SELECT time::timestamp AS time, usage_system::float AS usage_system FROM cpu GROUP BY cpu)" + "SELECT time::timestamp AS time, cpu::tag AS cpu, usage_system::float AS usage_system FROM (SELECT time::timestamp AS time, usage_system::float AS usage_system FROM cpu GROUP BY cpu::tag)" ); // Projects cpu tag in outer query separately from aliased cpu tag "foo" @@ -2298,7 +2326,7 @@ mod test { let stmt = rewrite_select_statement(&namespace, &stmt).unwrap(); assert_eq!( stmt.to_string(), - "SELECT time::timestamp AS time, cpu::tag AS cpu, foo::tag AS foo, usage_system::float AS usage_system FROM (SELECT time::timestamp AS time, cpu::tag AS foo, usage_system::float AS usage_system FROM cpu GROUP BY cpu)" + "SELECT time::timestamp AS time, cpu::tag AS cpu, foo::tag AS foo, usage_system::float AS usage_system FROM (SELECT time::timestamp AS time, cpu::tag AS foo, usage_system::float AS usage_system FROM cpu GROUP BY cpu::tag)" ); // Projects non-existent foo as a tag in the outer query @@ -2308,7 +2336,7 @@ mod test { let stmt = rewrite_select_statement(&namespace, &stmt).unwrap(); assert_eq!( stmt.to_string(), - "SELECT time::timestamp AS time, foo::tag AS foo, usage_idle::float AS usage_idle FROM (SELECT time::timestamp AS time, usage_idle::float AS usage_idle FROM cpu GROUP BY foo) GROUP BY cpu" + "SELECT time::timestamp AS time, foo::tag AS foo, usage_idle::float AS usage_idle FROM (SELECT time::timestamp AS time, usage_idle::float AS usage_idle FROM cpu GROUP BY foo) GROUP BY cpu::tag" ); // Normalises time to all leaf subqueries let stmt = parse_select( @@ -2317,7 +2345,7 @@ mod test { let stmt = rewrite_select_statement(&namespace, &stmt).unwrap(); assert_eq!( stmt.to_string(), - "SELECT time::timestamp AS time, max::float AS max FROM (SELECT time::timestamp AS time, max(value::float) AS max FROM (SELECT time::timestamp AS time, distinct(usage_idle::float) AS value FROM cpu FILL(NONE)) FILL(NONE)) GROUP BY cpu" + "SELECT time::timestamp AS time, max::float AS max FROM (SELECT time::timestamp AS time, max(value::float) AS max FROM (SELECT time::timestamp AS time, distinct(usage_idle::float) AS value FROM cpu FILL(NONE)) FILL(NONE)) GROUP BY cpu::tag" ); // Projects non-existent tag, "bytes_free" from cpu and also bytes_free field from disk @@ -2355,7 +2383,7 @@ mod test { let stmt = rewrite_select_statement(&namespace, &stmt).unwrap(); assert_eq!( stmt.to_string(), - "SELECT time::timestamp AS time, bytes_free::integer AS bytes_free, bytes_free::tag AS bytes_free_1, usage_idle::float AS usage_idle FROM (SELECT time::timestamp AS time, usage_idle::float AS usage_idle FROM cpu GROUP BY bytes_free), (SELECT time::timestamp AS time, bytes_free::integer AS bytes_free FROM disk) GROUP BY cpu" + "SELECT time::timestamp AS time, bytes_free::integer AS bytes_free, bytes_free::tag AS bytes_free_1, usage_idle::float AS usage_idle FROM (SELECT time::timestamp AS time, usage_idle::float AS usage_idle FROM cpu GROUP BY bytes_free), (SELECT time::timestamp AS time, bytes_free::integer AS bytes_free FROM disk) GROUP BY cpu::tag" ); }
b74a81e79217f1cbbb0b29e2db486a066f3aba3b
Nga Tran
2023-04-25 06:16:21
add tests on month and year date_bin (#7648)
* test: add tests on month and year date_bin * fix: add IOX_COMPARE: uuid to get deterministics name for output parquet_file in the explain ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
test: add tests on month and year date_bin (#7648) * test: add tests on month and year date_bin * fix: add IOX_COMPARE: uuid to get deterministics name for output parquet_file in the explain --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/influxdb_iox/tests/query_tests2/cases.rs b/influxdb_iox/tests/query_tests2/cases.rs index 1b6a7cdd4b..46345f4cb9 100644 --- a/influxdb_iox/tests/query_tests2/cases.rs +++ b/influxdb_iox/tests/query_tests2/cases.rs @@ -49,6 +49,18 @@ async fn basic() { .await; } +#[tokio::test] +async fn date_bin() { + test_helpers::maybe_start_logging(); + + TestCase { + input: "cases/in/date_bin.sql", + chunk_stage: ChunkStage::All, + } + .run() + .await; +} + #[tokio::test] async fn dedup_and_predicates_parquet() { test_helpers::maybe_start_logging(); diff --git a/influxdb_iox/tests/query_tests2/cases/in/date_bin.sql b/influxdb_iox/tests/query_tests2/cases/in/date_bin.sql new file mode 100644 index 0000000000..6e15ecde38 --- /dev/null +++ b/influxdb_iox/tests/query_tests2/cases/in/date_bin.sql @@ -0,0 +1,104 @@ +-- Date_bin tests +-- IOX_SETUP: OneMeasurementTwoSeries + +-- CONSTANT DATA or ARRAY DATA + +-- 1 month +select date_bin(INTERVAL '1 month', column1) + from (values + (timestamp '2022-01-01 00:00:00'), + (timestamp '2022-01-01 01:00:00'), + (timestamp '2022-01-02 00:00:00'), + (timestamp '2022-02-02 00:00:00'), + (timestamp '2022-02-15 00:00:00'), + (timestamp '2022-03-31 00:00:00') + ) as sq; + +-- 1 year + select date_bin('1 year', column1) + from (values + (timestamp '2022-01-01 00:00:00'), + (timestamp '2023-01-01 01:00:00'), + (timestamp '2022-01-02 00:00:00'), + (timestamp '2022-02-02 00:00:00'), + (timestamp '2022-02-15 00:00:00'), + (timestamp '2022-03-31 00:00:00') + ) as sq; + +-- origin is last date of the month 1970-12-31T00:15:00Z and not at midnight + select date_bin('1 month', column1, '1970-12-31T00:15:00Z') + from (values + (timestamp '2022-01-01 00:00:00'), + (timestamp '2022-01-01 01:00:00'), + (timestamp '2022-01-02 00:00:00'), + (timestamp '2022-02-02 00:00:00'), + (timestamp '2022-02-15 00:00:00'), + (timestamp '2022-03-31 00:00:00') + ) as sq; + + -- five months interval on constant + SELECT DATE_BIN('5 month', '2022-01-01T00:00:00Z'); + + -- origin is May 31 (last date of the month) to produce bin on Feb 28 + SELECT DATE_BIN('3 month', '2022-04-01T00:00:00Z', '2021-05-31T00:04:00Z'); + +-- origin is on Feb 29 and interval is one month. The bins will be: +-- # '2000-02-29T00:00:00' +-- # '2000-01-29T00:00:00' +-- # '1999-12-29T00:00:00' +-- # .... +-- # Reason: Even though 29 (or 28 for non-leap year) is the last date of Feb but it +-- # is not last date of other month. Months' chrono consider a month before or after that +-- # will land on the same 29th date. +select date_bin('1 month', timestamp '2000-01-31T00:00:00', timestamp '2000-02-29T00:00:00'); + +-- similar for the origin March 29 +select date_bin('1 month', timestamp '2000-01-31T00:00:00', timestamp '2000-03-29T00:00:00'); + +-- 3 year 1 months = 37 months +SELECT DATE_BIN('3 years 1 months', '2022-09-01 00:00:00Z'); + +-- DATA FORM TABLE + +-- Input data (by region, time) +SELECT * +FROM cpu +ORDER BY REGION, TIME; + +-- Input data (by time) +SELECT * +FROM cpu +ORDER BY TIME; + +-- 1 month +SELECT + date_bin('1 month', time) as month, + count(cpu.user) +from cpu +where time between timestamp '2000-05-05T12:00:00Z' and timestamp '2000-05-05T12:59:00Z' +group by month; + +-- 1 month with origin +SELECT + date_bin('1 month', time, '1970-12-31T00:15:00Z') as month, + count(cpu.user) +from cpu +where time between timestamp '2000-05-05T12:00:00Z' and timestamp '2000-05-05T12:59:00Z' +group by month; + +-- 3 months with origin on the last date of the month +select + date_bin('2 month', time, timestamp '2000-02-29T00:00:00') as month, + count(cpu.user) +from cpu +where time between timestamp '2000-05-05T12:00:00Z' and timestamp '2000-05-05T12:59:00Z' +group by month; + +-- EXPLAIN +-- IOX_COMPARE: uuid +EXPLAIN SELECT + date_bin('1 month', time, '1970-12-31T00:15:00Z') as month, + count(cpu.user) +from cpu +where time between timestamp '2000-05-05T12:00:00Z' and timestamp '2000-05-05T12:59:00Z' +group by month; \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/date_bin.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/date_bin.sql.expected new file mode 100644 index 0000000000..000b433ed3 --- /dev/null +++ b/influxdb_iox/tests/query_tests2/cases/in/date_bin.sql.expected @@ -0,0 +1,119 @@ +-- Test Setup: OneMeasurementTwoSeries +-- SQL: select date_bin(INTERVAL '1 month', column1) from (values (timestamp '2022-01-01 00:00:00'), (timestamp '2022-01-01 01:00:00'), (timestamp '2022-01-02 00:00:00'), (timestamp '2022-02-02 00:00:00'), (timestamp '2022-02-15 00:00:00'), (timestamp '2022-03-31 00:00:00') ) as sq; ++---------------------------------------------------------------------------+ +| datebin(IntervalMonthDayNano("79228162514264337593543950336"),sq.column1) | ++---------------------------------------------------------------------------+ +| 2022-01-01T00:00:00Z | +| 2022-01-01T00:00:00Z | +| 2022-01-01T00:00:00Z | +| 2022-02-01T00:00:00Z | +| 2022-02-01T00:00:00Z | +| 2022-03-01T00:00:00Z | ++---------------------------------------------------------------------------+ +-- SQL: select date_bin('1 year', column1) from (values (timestamp '2022-01-01 00:00:00'), (timestamp '2023-01-01 01:00:00'), (timestamp '2022-01-02 00:00:00'), (timestamp '2022-02-02 00:00:00'), (timestamp '2022-02-15 00:00:00'), (timestamp '2022-03-31 00:00:00') ) as sq; ++------------------------------------+ +| datebin(Utf8("1 year"),sq.column1) | ++------------------------------------+ +| 2022-01-01T00:00:00Z | +| 2023-01-01T00:00:00Z | +| 2022-01-01T00:00:00Z | +| 2022-01-01T00:00:00Z | +| 2022-01-01T00:00:00Z | +| 2022-01-01T00:00:00Z | ++------------------------------------+ +-- SQL: select date_bin('1 month', column1, '1970-12-31T00:15:00Z') from (values (timestamp '2022-01-01 00:00:00'), (timestamp '2022-01-01 01:00:00'), (timestamp '2022-01-02 00:00:00'), (timestamp '2022-02-02 00:00:00'), (timestamp '2022-02-15 00:00:00'), (timestamp '2022-03-31 00:00:00') ) as sq; ++------------------------------------------------------------------+ +| datebin(Utf8("1 month"),sq.column1,Utf8("1970-12-31T00:15:00Z")) | ++------------------------------------------------------------------+ +| 2021-12-31T00:15:00Z | +| 2021-12-31T00:15:00Z | +| 2021-12-31T00:15:00Z | +| 2022-01-31T00:15:00Z | +| 2022-01-31T00:15:00Z | +| 2022-02-28T00:15:00Z | ++------------------------------------------------------------------+ +-- SQL: SELECT DATE_BIN('5 month', '2022-01-01T00:00:00Z'); ++-------------------------------------------------------+ +| datebin(Utf8("5 month"),Utf8("2022-01-01T00:00:00Z")) | ++-------------------------------------------------------+ +| 2021-09-01T00:00:00Z | ++-------------------------------------------------------+ +-- SQL: SELECT DATE_BIN('3 month', '2022-04-01T00:00:00Z', '2021-05-31T00:04:00Z'); ++------------------------------------------------------------------------------------+ +| datebin(Utf8("3 month"),Utf8("2022-04-01T00:00:00Z"),Utf8("2021-05-31T00:04:00Z")) | ++------------------------------------------------------------------------------------+ +| 2022-02-28T00:04:00Z | ++------------------------------------------------------------------------------------+ +-- SQL: select date_bin('1 month', timestamp '2000-01-31T00:00:00', timestamp '2000-02-29T00:00:00'); ++----------------------------------------------------------------------------------+ +| datebin(Utf8("1 month"),Utf8("2000-01-31T00:00:00"),Utf8("2000-02-29T00:00:00")) | ++----------------------------------------------------------------------------------+ +| 2000-01-29T00:00:00Z | ++----------------------------------------------------------------------------------+ +-- SQL: select date_bin('1 month', timestamp '2000-01-31T00:00:00', timestamp '2000-03-29T00:00:00'); ++----------------------------------------------------------------------------------+ +| datebin(Utf8("1 month"),Utf8("2000-01-31T00:00:00"),Utf8("2000-03-29T00:00:00")) | ++----------------------------------------------------------------------------------+ +| 2000-01-29T00:00:00Z | ++----------------------------------------------------------------------------------+ +-- SQL: SELECT DATE_BIN('3 years 1 months', '2022-09-01 00:00:00Z'); ++----------------------------------------------------------------+ +| datebin(Utf8("3 years 1 months"),Utf8("2022-09-01 00:00:00Z")) | ++----------------------------------------------------------------+ +| 2022-06-01T00:00:00Z | ++----------------------------------------------------------------+ +-- SQL: SELECT * FROM cpu ORDER BY REGION, TIME; ++------+--------+----------------------+------+ +| idle | region | time | user | ++------+--------+----------------------+------+ +| 70.0 | a | 2000-05-05T12:20:00Z | 23.2 | +| | a | 2000-05-05T12:40:00Z | 21.0 | +| | b | 2000-05-05T12:31:00Z | 25.2 | +| 60.0 | b | 2000-05-05T12:39:00Z | 28.9 | ++------+--------+----------------------+------+ +-- SQL: SELECT * FROM cpu ORDER BY TIME; ++------+--------+----------------------+------+ +| idle | region | time | user | ++------+--------+----------------------+------+ +| 70.0 | a | 2000-05-05T12:20:00Z | 23.2 | +| | b | 2000-05-05T12:31:00Z | 25.2 | +| 60.0 | b | 2000-05-05T12:39:00Z | 28.9 | +| | a | 2000-05-05T12:40:00Z | 21.0 | ++------+--------+----------------------+------+ +-- SQL: SELECT date_bin('1 month', time) as month, count(cpu.user) from cpu where time between timestamp '2000-05-05T12:00:00Z' and timestamp '2000-05-05T12:59:00Z' group by month; ++----------------------+-----------------+ +| month | COUNT(cpu.user) | ++----------------------+-----------------+ +| 2000-05-01T00:00:00Z | 4 | ++----------------------+-----------------+ +-- SQL: SELECT date_bin('1 month', time, '1970-12-31T00:15:00Z') as month, count(cpu.user) from cpu where time between timestamp '2000-05-05T12:00:00Z' and timestamp '2000-05-05T12:59:00Z' group by month; ++----------------------+-----------------+ +| month | COUNT(cpu.user) | ++----------------------+-----------------+ +| 2000-04-30T00:15:00Z | 4 | ++----------------------+-----------------+ +-- SQL: select date_bin('2 month', time, timestamp '2000-02-29T00:00:00') as month, count(cpu.user) from cpu where time between timestamp '2000-05-05T12:00:00Z' and timestamp '2000-05-05T12:59:00Z' group by month; ++----------------------+-----------------+ +| month | COUNT(cpu.user) | ++----------------------+-----------------+ +| 2000-04-29T00:00:00Z | 4 | ++----------------------+-----------------+ +-- SQL: EXPLAIN SELECT date_bin('1 month', time, '1970-12-31T00:15:00Z') as month, count(cpu.user) from cpu where time between timestamp '2000-05-05T12:00:00Z' and timestamp '2000-05-05T12:59:00Z' group by month; +-- Results After Normalizing UUIDs +---------- +| plan_type | plan | +---------- +| logical_plan | Projection: datebin(Utf8("1 month"),cpu.time,Utf8("1970-12-31T00:15:00Z")) AS month, COUNT(cpu.user) | +| | Aggregate: groupBy=[[datebin(IntervalMonthDayNano("79228162514264337593543950336"), cpu.time, TimestampNanosecond(31450500000000000, None)) AS datebin(Utf8("1 month"),cpu.time,Utf8("1970-12-31T00:15:00Z"))]], aggr=[[COUNT(cpu.user)]] | +| | TableScan: cpu projection=[time, user], full_filters=[cpu.time >= TimestampNanosecond(957528000000000000, None), cpu.time <= TimestampNanosecond(957531540000000000, None)] | +| physical_plan | ProjectionExec: expr=[datebin(Utf8("1 month"),cpu.time,Utf8("1970-12-31T00:15:00Z"))@0 as month, COUNT(cpu.user)@1 as COUNT(cpu.user)] | +| | AggregateExec: mode=FinalPartitioned, gby=[datebin(Utf8("1 month"),cpu.time,Utf8("1970-12-31T00:15:00Z"))@0 as datebin(Utf8("1 month"),cpu.time,Utf8("1970-12-31T00:15:00Z"))], aggr=[COUNT(cpu.user)] | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | RepartitionExec: partitioning=Hash([Column { name: "datebin(Utf8(\"1 month\"),cpu.time,Utf8(\"1970-12-31T00:15:00Z\"))", index: 0 }], 4), input_partitions=4 | +| | AggregateExec: mode=Partial, gby=[datebin(79228162514264337593543950336, time@0, 31450500000000000) as datebin(Utf8("1 month"),cpu.time,Utf8("1970-12-31T00:15:00Z"))], aggr=[COUNT(cpu.user)] | +| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: time@0 >= 957528000000000000 AND time@0 <= 957531540000000000 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=time@2 >= 957528000000000000 AND time@2 <= 957531540000000000, pruning_predicate=time_max@0 >= 957528000000000000 AND time_min@1 <= 957531540000000000, output_ordering=[time@0 ASC], projection=[time, user] | +| | | +---------- \ No newline at end of file
1f0ac4fbcfcfd2023d5afe00f8e99180f2b6051d
Dom Dwyer
2023-08-16 16:54:28
topics & topic sets
Adds an (unused) Topic and TopicSet type to the gossip crate. A Topic is a number from 0 to 63 inclusive that uniquely identifies a "type" of application payload. The TopicSet efficiently encodes the set of topics a node is interested in as a 64-bit wide bitmap, with each Topic encoded as a single bit, shifted right N times for the topic value. This allows for very cheap "is this node interested" set evaluation and space-efficient propagation of interest sets during PEX.
null
feat(gossip): topics & topic sets Adds an (unused) Topic and TopicSet type to the gossip crate. A Topic is a number from 0 to 63 inclusive that uniquely identifies a "type" of application payload. The TopicSet efficiently encodes the set of topics a node is interested in as a 64-bit wide bitmap, with each Topic encoded as a single bit, shifted right N times for the topic value. This allows for very cheap "is this node interested" set evaluation and space-efficient propagation of interest sets during PEX.
diff --git a/gossip/src/lib.rs b/gossip/src/lib.rs index d7a2299480..bc90347baa 100644 --- a/gossip/src/lib.rs +++ b/gossip/src/lib.rs @@ -127,6 +127,7 @@ mod peers; mod proto; mod reactor; pub(crate) mod seed; +mod topic_set; use std::time::Duration; diff --git a/gossip/src/topic_set.rs b/gossip/src/topic_set.rs new file mode 100644 index 0000000000..a94e50fe42 --- /dev/null +++ b/gossip/src/topic_set.rs @@ -0,0 +1,155 @@ +/// A [`Topic`] contains a user-provided topic ID in the range 0 to 63 +/// inclusive, encoded into an internal bitmap form. +/// +/// A topic sets the Nth bit (from the LSB) to a 1, where N is the user-provided +/// topic ID. +#[derive(Clone, Copy, PartialEq, Eq)] +pub(crate) struct Topic(u64); + +impl std::fmt::Debug for Topic { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("Topic") + .field(&format_args!("{:0>64b}", self.0)) + .finish() + } +} + +impl Topic { + /// Encode a topic ID (ranging from 0 to 63 inclusive) into a [`Topic`] + /// bitmap. + pub(crate) fn encode<T>(v: T) -> Self + where + T: Into<u64>, + { + let v = v.into(); + + // Validate the topic ID can be mapped to a single bit in a u64 + assert!(v <= (u64::BITS as u64 - 1), "topic ID must be less than 64"); + + // Map the topic ID into a bitset. + Self(1 << v) + } + + /// Construct a [`Topic`] from an encoded topic u64 containing single set + /// bit. + /// + /// # Panics + /// + /// Panics if there's no topic bit set in `v`. + pub(crate) fn from_encoded(v: u64) -> Self { + assert_eq!(v.count_ones(), 1, "encoded topic must contain 1 set bit"); + assert_ne!(v, 0, "topic ID must be non-zero"); + Self(v) + } + + /// Map a topic bitmap into an application-provided topic ID. + pub(crate) fn as_id(&self) -> u64 { + u64::BITS as u64 - 1 - self.0.leading_zeros() as u64 + } +} + +impl From<Topic> for u64 { + fn from(value: Topic) -> Self { + value.0 + } +} + +/// A set of [`Topic`] interests stored as a bitmap over 64 bits. +/// +/// Each [`Topic`] contains exactly 1 set bit in a u64, and a [`TopicSet`] +/// contains a set of [`Topic`] bits. +#[derive(Clone, Copy, PartialEq, Eq)] +pub(crate) struct TopicSet(u64); + +impl std::fmt::Debug for TopicSet { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("TopicSet") + .field(&format_args!("{:0>64b}", self.0)) + .finish() + } +} + +// Default to being interested in all topics (all bits set). +impl Default for TopicSet { + fn default() -> Self { + Self(u64::MAX) + } +} + +impl From<TopicSet> for u64 { + fn from(v: TopicSet) -> Self { + v.0 + } +} + +impl From<u64> for TopicSet { + fn from(v: u64) -> Self { + Self(v) + } +} + +impl TopicSet { + /// Initialise a [`TopicSet`] that has no registered interests. + pub(crate) fn empty() -> Self { + Self(0) + } + + /// Mark this [`TopicSet`] as interested in receiving messages from the + /// specified [`Topic`]. + /// + /// This method is idempotent. + pub(crate) fn set_interested(&mut self, v: Topic) { + debug_assert_eq!(v.0.count_ones(), 1); + + self.0 |= v.0; + } + + /// Check if this [`TopicSet`] is interested in receiving messages from the + /// specified [`Topic`]. + pub(crate) fn is_interested(&self, v: Topic) -> bool { + debug_assert_eq!(v.0.count_ones(), 1); + + (self.0 & v.0) != 0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_interests() { + let mut set = TopicSet::empty(); + + let topic_a = Topic::encode(0_u64); + let topic_b = Topic::encode(42_u64); + + assert!(!set.is_interested(topic_a)); + assert!(!set.is_interested(topic_b)); + + set.set_interested(topic_b); + set.set_interested(topic_b); // idempotent + + assert!(!set.is_interested(topic_a)); + assert!(set.is_interested(topic_b)); + + set.set_interested(topic_a); + + assert!(set.is_interested(topic_a)); + assert!(set.is_interested(topic_b)); + } + + #[test] + fn test_topic_round_trip() { + for i in 0..64 { + let topic = Topic::from_encoded(u64::from(Topic::encode(i))); + assert_eq!(i, topic.as_id()); + } + } + + #[test] + #[should_panic(expected = "topic ID must be less than 64")] + fn test_topic_64() { + Topic::encode(64_u64); + } +}
7f06f524eb6f0f1cea72b4b6b4ecc68f16f909b3
Marco Neumann
2023-09-08 10:07:22
i->q V2 metrics integration (#8687)
Same metric names as V1, so dashboards don't need any migration.
null
feat: i->q V2 metrics integration (#8687) Same metric names as V1, so dashboards don't need any migration.
diff --git a/Cargo.lock b/Cargo.lock index cdeb2dd51e..82b36214ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2771,6 +2771,8 @@ dependencies = [ "futures", "http", "ingester_query_grpc", + "iox_time", + "metric", "observability_deps", "snafu", "test_helpers", diff --git a/ingester_query_client/Cargo.toml b/ingester_query_client/Cargo.toml index 68f6d857a3..f6156acf1c 100644 --- a/ingester_query_client/Cargo.toml +++ b/ingester_query_client/Cargo.toml @@ -14,6 +14,8 @@ datafusion = { workspace = true } futures = "0.3" http = "0.2.9" ingester_query_grpc = { path = "../ingester_query_grpc" } +iox_time = { path = "../iox_time" } +metric = { path = "../metric" } observability_deps = { path = "../observability_deps" } snafu = "0.7" tokio = { version = "1.32" } diff --git a/ingester_query_client/src/layers/metrics.rs b/ingester_query_client/src/layers/metrics.rs new file mode 100644 index 0000000000..7b77552de4 --- /dev/null +++ b/ingester_query_client/src/layers/metrics.rs @@ -0,0 +1,265 @@ +//! Metrics layer. + +use std::{borrow::Cow, sync::Arc, task::Poll}; + +use async_trait::async_trait; +use futures::StreamExt; +use iox_time::{Time, TimeProvider}; +use metric::{DurationHistogram, Metric, Registry}; + +use crate::{ + error::DynError, + layer::{Layer, QueryResponse}, +}; + +/// Metrics layer. +#[derive(Debug)] +pub struct MetricsLayer<L> +where + L: Layer, +{ + /// Inner layer. + inner: L, + + /// Metrics. + metrics: Arc<Metrics>, + + /// Time provider. + time_provider: Arc<dyn TimeProvider>, +} + +impl<L> MetricsLayer<L> +where + L: Layer, +{ + /// Create new metrics wrapper. + pub fn new( + inner: L, + registry: &Registry, + addr: &str, + time_provider: Arc<dyn TimeProvider>, + ) -> Self { + let ingester_duration: Metric<DurationHistogram> = registry.register_metric( + "ingester_duration", + "ingester request query execution duration", + ); + let ingester_duration_success = ingester_duration.recorder([ + ("result", Cow::from("success")), + ("addr", Cow::from(addr.to_owned())), + ]); + let ingester_duration_error = ingester_duration.recorder([ + ("result", Cow::from("error")), + ("addr", Cow::from(addr.to_owned())), + ]); + let ingester_duration_cancelled = ingester_duration.recorder([ + ("result", Cow::from("cancelled")), + ("addr", Cow::from(addr.to_owned())), + ]); + + Self { + inner, + metrics: Arc::new(Metrics { + ingester_duration_success, + ingester_duration_error, + ingester_duration_cancelled, + }), + time_provider, + } + } +} + +#[async_trait] +impl<L> Layer for MetricsLayer<L> +where + L: Layer, +{ + type Request = L::Request; + type ResponseMetadata = L::ResponseMetadata; + type ResponsePayload = L::ResponsePayload; + + async fn query( + &self, + request: Self::Request, + ) -> Result<QueryResponse<Self::ResponseMetadata, Self::ResponsePayload>, DynError> { + let mut tracker = Tracker { + t_start: self.time_provider.now(), + metrics: Arc::clone(&self.metrics), + time_provider: Arc::clone(&self.time_provider), + res: None, + }; + + match self.inner.query(request).await { + Ok(QueryResponse { + metadata, + mut payload, + }) => Ok(QueryResponse { + metadata, + payload: futures::stream::poll_fn(move |cx| { + let res = payload.poll_next_unpin(cx); + + match &res { + Poll::Ready(Some(Ok(_))) => {} + Poll::Ready(Some(Err(_))) => { + tracker.res = Some(Err(tracker.time_provider.now())); + } + Poll::Ready(None) => { + tracker.res = Some(Ok(tracker.time_provider.now())); + } + Poll::Pending => {} + } + + res + }) + .boxed(), + }), + Err(e) => { + tracker.res = Some(Err(self.time_provider.now())); + Err(e) + } + } + } +} + +/// All the metrics. +#[derive(Debug)] +struct Metrics { + /// Time spent waiting for successful ingester queries + ingester_duration_success: DurationHistogram, + + /// Time spent waiting for unsuccessful ingester queries + ingester_duration_error: DurationHistogram, + + /// Time spent waiting for a request that was cancelled. + ingester_duration_cancelled: DurationHistogram, +} + +struct Tracker { + t_start: Time, + metrics: Arc<Metrics>, + time_provider: Arc<dyn TimeProvider>, + res: Option<Result<Time, Time>>, +} + +impl Drop for Tracker { + fn drop(&mut self) { + let (t_end, metric) = match &self.res { + Some(Ok(t_end)) => (*t_end, &self.metrics.ingester_duration_success), + Some(Err(t_end)) => (*t_end, &self.metrics.ingester_duration_error), + None => ( + self.time_provider.now(), + &self.metrics.ingester_duration_cancelled, + ), + }; + + if let Some(duration) = t_end.checked_duration_since(self.t_start) { + metric.record(duration); + } + } +} + +#[cfg(test)] +mod tests { + use futures::TryStreamExt; + use iox_time::SystemProvider; + use metric::Attributes; + use tokio::{sync::Barrier, task::JoinSet}; + + use crate::layers::testing::{TestLayer, TestResponse}; + + use super::*; + + #[tokio::test] + async fn test() { + const N_CANCELLED: u64 = 20; + const N_SUCCESSFUL: u64 = 3; + const N_ERROR_EARLY: u64 = 5; + const N_ERROR_LATE: u64 = 7; + + let barrier_1 = Arc::new(Barrier::new(N_CANCELLED as usize + 1)); + let barrier_2 = Arc::new(Barrier::new(N_CANCELLED as usize + 1)); + + let registry = Registry::new(); + + let l = TestLayer::<(), (), ()>::default(); + for _ in 0..N_ERROR_EARLY { + l.mock_response(TestResponse::err(DynError::from("error 1"))); + } + for _ in 0..N_ERROR_LATE { + l.mock_response( + TestResponse::ok(()) + .with_ok_payload(()) + .with_err_payload(DynError::from("error 2")), + ); + } + for _ in 0..N_SUCCESSFUL { + l.mock_response(TestResponse::ok(()).with_ok_payload(()).with_ok_payload(())); + } + for _ in 0..N_CANCELLED { + l.mock_response( + TestResponse::ok(()) + .with_initial_barrier(Arc::clone(&barrier_1)) + .with_initial_barrier(Arc::clone(&barrier_2)), + ); + } + let l = Arc::new(MetricsLayer::new( + l, + &registry, + "foo.bar", + Arc::new(SystemProvider::new()), + )); + + for _ in 0..N_ERROR_EARLY { + l.query(()).await.unwrap_err(); + } + for _ in 0..N_ERROR_LATE { + l.query(()) + .await + .unwrap() + .payload + .try_collect::<Vec<_>>() + .await + .unwrap_err(); + } + for _ in 0..N_SUCCESSFUL { + l.query(()) + .await + .unwrap() + .payload + .try_collect::<Vec<_>>() + .await + .unwrap(); + } + + let mut join_set = JoinSet::new(); + for _ in 0..N_CANCELLED { + let l = Arc::clone(&l); + join_set.spawn(async move { + l.query(()).await.unwrap(); + unreachable!("request should have been cancelled"); + }); + } + + barrier_1.wait().await; + join_set.shutdown().await; + + assert_eq!(sample_count(&registry, "success"), N_SUCCESSFUL,); + assert_eq!( + sample_count(&registry, "error"), + N_ERROR_EARLY + N_ERROR_LATE, + ); + assert_eq!(sample_count(&registry, "cancelled"), N_CANCELLED,); + } + + fn sample_count(registry: &Registry, result: &'static str) -> u64 { + registry + .get_instrument::<Metric<DurationHistogram>>("ingester_duration") + .expect("failed to read metric") + .get_observer(&Attributes::from(&[ + ("result", result), + ("addr", "foo.bar"), + ])) + .expect("failed to get observer") + .fetch() + .sample_count() + } +} diff --git a/ingester_query_client/src/layers/mod.rs b/ingester_query_client/src/layers/mod.rs index e489d7907d..a175ff6bbe 100644 --- a/ingester_query_client/src/layers/mod.rs +++ b/ingester_query_client/src/layers/mod.rs @@ -2,6 +2,7 @@ pub mod deserialize; pub mod logging; +pub mod metrics; pub mod network; pub mod serialize; pub mod testing;
84e5c2a0ee3859e45461fa7a38d5b6637cd38b6a
Nga Tran
2022-10-24 11:36:33
cardinality of each batch should use row count of the batch (#5946)
* fix: cardinality of each batch should use row count of the batch * chore: cleanup * fix: auto-merge conflict
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
fix: cardinality of each batch should use row count of the batch (#5946) * fix: cardinality of each batch should use row count of the batch * chore: cleanup * fix: auto-merge conflict Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/compactor/src/compact.rs b/compactor/src/compact.rs index 1386d1c4e2..6658697cc3 100644 --- a/compactor/src/compact.rs +++ b/compactor/src/compact.rs @@ -491,12 +491,10 @@ impl PartitionCompactionCandidateWithInfo { pub fn estimated_arrow_bytes( &self, min_num_rows_allocated_per_record_batch_to_datafusion_plan: u64, - row_count: i64, ) -> u64 { estimate_arrow_bytes_for_file( &self.column_type_counts, min_num_rows_allocated_per_record_batch_to_datafusion_plan, - row_count, ) } } @@ -504,7 +502,6 @@ impl PartitionCompactionCandidateWithInfo { fn estimate_arrow_bytes_for_file( columns: &[ColumnTypeCount], min_num_rows_allocated_per_record_batch_to_datafusion_plan: u64, - row_count: i64, ) -> u64 { const AVERAGE_TAG_VALUE_LENGTH: i64 = 200; const STRING_LENGTH: i64 = 1000; @@ -513,12 +510,12 @@ fn estimate_arrow_bytes_for_file( const BOOL_BYTE: i64 = 1; const AVERAGE_ROW_COUNT_CARDINALITY_RATIO: i64 = 2; - let average_cardinality = row_count / AVERAGE_ROW_COUNT_CARDINALITY_RATIO; - // Since DataFusion streams files and allocates a fixed (configurable) number of rows per batch, // we always use that number to estimate the memory usage per batch. let row_count_per_batch = min_num_rows_allocated_per_record_batch_to_datafusion_plan as i64; + let average_cardinality = row_count_per_batch / AVERAGE_ROW_COUNT_CARDINALITY_RATIO; + // Bytes needed for number columns let mut value_bytes = 0; let mut string_bytes = 0; @@ -582,7 +579,6 @@ pub mod tests { #[test] fn test_estimate_arrow_bytes_for_file_small_row_count() { - let row_count = 11; // Always use this config param to estimate memory usage for each batch // no matter it is larger or smaller than row_count let min_num_rows_allocated_per_record_batch = 20; @@ -594,38 +590,26 @@ pub mod tests { ColumnTypeCount::new(ColumnType::F64, 3), ColumnTypeCount::new(ColumnType::I64, 4), ]; - let bytes = estimate_arrow_bytes_for_file( - &columns, - min_num_rows_allocated_per_record_batch, - row_count, - ); + let bytes = + estimate_arrow_bytes_for_file(&columns, min_num_rows_allocated_per_record_batch); assert_eq!(bytes, 1600); // 20 * (1+2+3+4) * 8 // Tag let columns = vec![ColumnTypeCount::new(ColumnType::Tag, 1)]; - let bytes = estimate_arrow_bytes_for_file( - &columns, - min_num_rows_allocated_per_record_batch, - row_count, - ); - assert_eq!(bytes, 1160); // 5 * 200 + 20 * 8 + let bytes = + estimate_arrow_bytes_for_file(&columns, min_num_rows_allocated_per_record_batch); + assert_eq!(bytes, 2160); // 10 * 200 + 20 * 8 // String let columns = vec![ColumnTypeCount::new(ColumnType::String, 1)]; - let bytes = estimate_arrow_bytes_for_file( - &columns, - min_num_rows_allocated_per_record_batch, - row_count, - ); + let bytes = + estimate_arrow_bytes_for_file(&columns, min_num_rows_allocated_per_record_batch); assert_eq!(bytes, 20000); // 20 * 1000 // Bool let columns = vec![ColumnTypeCount::new(ColumnType::Bool, 1)]; - let bytes = estimate_arrow_bytes_for_file( - &columns, - min_num_rows_allocated_per_record_batch, - row_count, - ); + let bytes = + estimate_arrow_bytes_for_file(&columns, min_num_rows_allocated_per_record_batch); assert_eq!(bytes, 20); // 20 * 1 // all types @@ -638,17 +622,13 @@ pub mod tests { ColumnTypeCount::new(ColumnType::String, 1), ColumnTypeCount::new(ColumnType::Bool, 1), ]; - let bytes = estimate_arrow_bytes_for_file( - &columns, - min_num_rows_allocated_per_record_batch, - row_count, - ); - assert_eq!(bytes, 22780); // 1600 + 1160 + 20000 + 20 + let bytes = + estimate_arrow_bytes_for_file(&columns, min_num_rows_allocated_per_record_batch); + assert_eq!(bytes, 23780); // 1600 + 2160 + 20000 + 20 } #[test] fn test_estimate_arrow_bytes_for_file_large_row_count() { - let row_count = 11; // Always use this config param to estimate memory usage for each batch // even if it is smaller than row_count let min_num_rows_allocated_per_record_batch = 10; @@ -660,38 +640,26 @@ pub mod tests { ColumnTypeCount::new(ColumnType::F64, 3), ColumnTypeCount::new(ColumnType::I64, 4), ]; - let bytes = estimate_arrow_bytes_for_file( - &columns, - min_num_rows_allocated_per_record_batch, - row_count, - ); + let bytes = + estimate_arrow_bytes_for_file(&columns, min_num_rows_allocated_per_record_batch); assert_eq!(bytes, 800); // 10 * (1+2+3+4) * 8 // Tag let columns = vec![ColumnTypeCount::new(ColumnType::Tag, 1)]; - let bytes = estimate_arrow_bytes_for_file( - &columns, - min_num_rows_allocated_per_record_batch, - row_count, - ); + let bytes = + estimate_arrow_bytes_for_file(&columns, min_num_rows_allocated_per_record_batch); assert_eq!(bytes, 1080); // 5 * 200 + 10 * 8 // String let columns = vec![ColumnTypeCount::new(ColumnType::String, 1)]; - let bytes = estimate_arrow_bytes_for_file( - &columns, - min_num_rows_allocated_per_record_batch, - row_count, - ); + let bytes = + estimate_arrow_bytes_for_file(&columns, min_num_rows_allocated_per_record_batch); assert_eq!(bytes, 10000); // 10 * 1000 // Bool let columns = vec![ColumnTypeCount::new(ColumnType::Bool, 1)]; - let bytes = estimate_arrow_bytes_for_file( - &columns, - min_num_rows_allocated_per_record_batch, - row_count, - ); + let bytes = + estimate_arrow_bytes_for_file(&columns, min_num_rows_allocated_per_record_batch); assert_eq!(bytes, 10); // 10 * 1 // all types @@ -704,11 +672,8 @@ pub mod tests { ColumnTypeCount::new(ColumnType::String, 1), ColumnTypeCount::new(ColumnType::Bool, 1), ]; - let bytes = estimate_arrow_bytes_for_file( - &columns, - min_num_rows_allocated_per_record_batch, - row_count, - ); + let bytes = + estimate_arrow_bytes_for_file(&columns, min_num_rows_allocated_per_record_batch); assert_eq!(bytes, 11890); // 800 + 1080 + 10000 + 10 } diff --git a/compactor/src/handler.rs b/compactor/src/handler.rs index 6f86700500..78db326055 100644 --- a/compactor/src/handler.rs +++ b/compactor/src/handler.rs @@ -275,7 +275,7 @@ impl Drop for CompactorHandlerImpl { #[cfg(test)] mod tests { use super::*; - use crate::tests::{test_setup, TestSetup}; + use crate::tests::{test_setup_with_default_budget, TestSetup}; #[tokio::test] async fn list_skipped_compactions() { @@ -284,7 +284,7 @@ mod tests { table, shard, .. - } = test_setup().await; + } = test_setup_with_default_budget().await; let compactor_handler = CompactorHandlerImpl::new(Arc::clone(&compactor)); @@ -318,7 +318,7 @@ mod tests { table, shard, .. - } = test_setup().await; + } = test_setup_with_default_budget().await; let compactor_handler = CompactorHandlerImpl::new(Arc::clone(&compactor)); diff --git a/compactor/src/lib.rs b/compactor/src/lib.rs index 7e072853a9..ffd10b8145 100644 --- a/compactor/src/lib.rs +++ b/compactor/src/lib.rs @@ -485,7 +485,7 @@ pub mod tests { compactor, mock_compactor, .. - } = test_setup().await; + } = test_setup(14350).await; let sorted_candidates = VecDeque::new(); @@ -548,7 +548,7 @@ pub mod tests { } } - fn make_compactor_config() -> CompactorConfig { + fn make_compactor_config(budget: u64) -> CompactorConfig { // All numbers in here are chosen carefully for many tests. // Change them will break the tests CompactorConfig { @@ -558,7 +558,7 @@ pub mod tests { max_number_partitions_per_shard: 100, min_number_recent_ingested_files_per_partition: 1, hot_multiple: 4, - memory_budget_bytes: 14 * 1025, // 14,350 + memory_budget_bytes: budget, min_num_rows_allocated_per_record_batch_to_datafusion_plan: 2, max_num_compacting_files: 20, } @@ -571,7 +571,11 @@ pub mod tests { pub(crate) table: Arc<TestTable>, } - pub(crate) async fn test_setup() -> TestSetup { + pub(crate) async fn test_setup_with_default_budget() -> TestSetup { + test_setup(14350).await + } + + pub(crate) async fn test_setup(budget: u64) -> TestSetup { let catalog = TestCatalog::new(); let namespace = catalog .create_namespace("namespace_hot_partitions_to_compact") @@ -580,7 +584,8 @@ pub mod tests { // Create a scenario of a table of 5 columns: tag, time, field int, field string, field // bool. Thus, given min_num_rows_allocated_per_record_batch_to_datafusion_plan = 2, - // each file will have estimated memory bytes = 2050 + 100 * row_count (for even row_counts) + //// todo each file will have estimated memory bytes = 2050 + 100 * row_count (for even row_counts) + // each file will have estimated memory bytes = 2,250 let table = namespace.create_table("test_table").await; table.create_column("tag", ColumnType::Tag).await; @@ -592,9 +597,8 @@ pub mod tests { table.create_column("field_bool", ColumnType::Bool).await; // Create a compactor - // Compactor budget : 13,500 let time_provider = Arc::new(SystemProvider::new()); - let config = make_compactor_config(); + let config = make_compactor_config(budget); let compactor = Arc::new(Compactor::new( vec![shard.shard.id], Arc::clone(&catalog.catalog), @@ -626,17 +630,18 @@ pub mod tests { shard, table, .. - } = test_setup().await; + } = test_setup(14350).await; // Some times in the past to set to created_at of the files let hot_time_one_hour_ago = compactor.time_provider.hours_ago(1); // P1: - // L0 2 rows. bytes: 2050 + 100*2 = 2,250 - // L1 2 rows. bytes: 2050 + 100*2 = 2,250 + // L0 2 rows. bytes: 2,250 + // L1 2 rows. bytes: 2,250 // total = 2,250 + 2,250 = 4,500 let partition1 = table.with_shard(&shard).create_partition("one").await; + // 2 files with IDs 1 and 2 let pf1_1 = TestParquetFileBuilder::default() .with_min_time(1) .with_max_time(5) @@ -654,11 +659,12 @@ pub mod tests { partition1.create_parquet_file_catalog_record(pf1_2).await; // P2: - // L0 2 rows. bytes: 2050 + 100*2 = 2,250 - // L1 2 rows. bytes: 2050 + 100*2 = 2,250 + // L0 2 rows. bytes: 2,250 + // L1 2 rows. bytes: 2,250 // total = 2,250 + 2,250 = 4,500 let partition2 = table.with_shard(&shard).create_partition("two").await; + // 2 files with IDs 3 and 4 let pf2_1 = TestParquetFileBuilder::default() .with_min_time(1) .with_max_time(5) @@ -676,52 +682,63 @@ pub mod tests { partition2.create_parquet_file_catalog_record(pf2_2).await; // P3: bytes >= 90% of full budget = 90% * 14,350 = 12,915 - // L0 40 rows. bytes: 2050 + 100 * 50 = 7,050 - // L1 24 rows. bytes: 2050 + 100 * 40 = 6,050 - // total = 7,050 + 6,050 = 13,100 + // L0 40 rows. bytes: 2,250 + // Five L1s. bytes: 2,250 each + // total = 2,250 * 6 = 13,500 let partition3 = table.with_shard(&shard).create_partition("three").await; + + // 6 files with IDs 5, 6, 7, 8, 9, 10 let pf3_1 = TestParquetFileBuilder::default() .with_min_time(1) - .with_max_time(5) + .with_max_time(6) .with_row_count(40) .with_compaction_level(CompactionLevel::Initial) .with_creation_time(hot_time_one_hour_ago); partition3.create_parquet_file_catalog_record(pf3_1).await; - let pf3_2 = TestParquetFileBuilder::default() - .with_min_time(4) // overlapped with pf3_1 - .with_max_time(6) - .with_row_count(24) - .with_compaction_level(CompactionLevel::FileNonOverlapped) - .with_creation_time(hot_time_one_hour_ago); - partition3.create_parquet_file_catalog_record(pf3_2).await; + // Five overlapped L1 files + for i in 1..6 { + let pf3_i = TestParquetFileBuilder::default() + .with_min_time(i) // overlapped with pf3_1 + .with_max_time(i) + .with_row_count(24) + .with_compaction_level(CompactionLevel::FileNonOverlapped) + .with_creation_time(hot_time_one_hour_ago); + partition3.create_parquet_file_catalog_record(pf3_i).await; + } // P4: Over the full budget - // L0 with 70 rows.bytes = 2050 + 100 * 70 = 9,050 - // L1 with 40 rows.bytes = 2050 + 100 * 40 = 6,050 - // total = 16,000 + // L0 40 rows. bytes: 2,250 + // Six L1s. bytes: 2,250 each + // total = 2,250 * 7 = 15,750 > 14350 let partition4 = table.with_shard(&shard).create_partition("four").await; + + // 7 files with IDs 11, 12, 13, 14, 15, 16, 17 let pf4_1 = TestParquetFileBuilder::default() .with_min_time(1) - .with_max_time(5) + .with_max_time(7) .with_row_count(70) .with_compaction_level(CompactionLevel::Initial) .with_creation_time(hot_time_one_hour_ago); partition4.create_parquet_file_catalog_record(pf4_1).await; - let pf4_2 = TestParquetFileBuilder::default() - .with_min_time(4) // overlapped with pf4_1 - .with_max_time(6) - .with_row_count(40) - .with_compaction_level(CompactionLevel::FileNonOverlapped) - .with_creation_time(hot_time_one_hour_ago); - partition4.create_parquet_file_catalog_record(pf4_2).await; + // Six overlapped L1 files + for i in 1..7 { + let pf4_i = TestParquetFileBuilder::default() + .with_min_time(i) // overlapped with pf4_1 + .with_max_time(i) + .with_row_count(40) + .with_compaction_level(CompactionLevel::FileNonOverlapped) + .with_creation_time(hot_time_one_hour_ago); + partition4.create_parquet_file_catalog_record(pf4_i).await; + } // P5: - // L0 2 rows. bytes: 2050 + 100*2 = 2,250 - // L1 2 rows. bytes: 2050 + 100*2 = 2,250 + // L0 2 rows. bytes: 2,250 + // L1 2 rows. bytes: 2,250 // total = 2,250 + 2,250 = 4,500 let partition5 = table.with_shard(&shard).create_partition("five").await; + // 2 files with IDs 18, 19 let pf5_1 = TestParquetFileBuilder::default() .with_min_time(1) .with_max_time(5) @@ -739,10 +756,11 @@ pub mod tests { partition5.create_parquet_file_catalog_record(pf5_2).await; // P6: - // L0 2 rows. bytes: 2050 + 100*2 = 2,250 - // L1 2 rows. bytes: 2050 + 100*2 = 2,250 + // L0 2 rows. bytes: 2,250 + // L1 2 rows. bytes: 2,250 // total = 2,250 + 2,250 = 4,500 let partition6 = table.with_shard(&shard).create_partition("six").await; + // 2 files with IDs 20, 21 let pf6_1 = TestParquetFileBuilder::default() .with_min_time(1) .with_max_time(5) @@ -816,7 +834,7 @@ pub mod tests { assert_eq!(g1_candidate3.partition.id(), partition5.partition.id); let g1_candidate3_pf_ids: Vec<_> = g1_candidate3.files.iter().map(|pf| pf.id().get()).collect(); - assert_eq!(g1_candidate3_pf_ids, vec![10, 9]); + assert_eq!(g1_candidate3_pf_ids, vec![19, 18]); // Round 2 let group2 = &compaction_groups[1]; @@ -826,7 +844,7 @@ pub mod tests { assert_eq!(g2_candidate1.partition.id(), partition6.partition.id); let g2_candidate1_pf_ids: Vec<_> = g2_candidate1.files.iter().map(|pf| pf.id().get()).collect(); - assert_eq!(g2_candidate1_pf_ids, vec![12, 11]); + assert_eq!(g2_candidate1_pf_ids, vec![21, 20]); // Round 3 let group3 = &compaction_groups[2]; @@ -836,7 +854,8 @@ pub mod tests { assert_eq!(g3_candidate1.partition.id(), partition3.partition.id); let g3_candidate1_pf_ids: Vec<_> = g3_candidate1.files.iter().map(|pf| pf.id().get()).collect(); - assert_eq!(g3_candidate1_pf_ids, vec![6, 5]); + // all IDs of level-1 firts then level-0 + assert_eq!(g3_candidate1_pf_ids, vec![6, 7, 8, 9, 10, 5]); { let mut repos = compactor.catalog.repositories().await; diff --git a/compactor/src/parquet_file_lookup.rs b/compactor/src/parquet_file_lookup.rs index f08fa2354a..9010b9e17b 100644 --- a/compactor/src/parquet_file_lookup.rs +++ b/compactor/src/parquet_file_lookup.rs @@ -89,10 +89,8 @@ impl ParquetFilesForCompaction { for parquet_file in parquet_files { // Estimate the bytes DataFusion needs when scan this file - let estimated_arrow_bytes = partition.estimated_arrow_bytes( - min_num_rows_allocated_per_record_batch_to_datafusion_plan, - parquet_file.row_count, - ); + let estimated_arrow_bytes = partition + .estimated_arrow_bytes(min_num_rows_allocated_per_record_batch_to_datafusion_plan); // Estimated bytes to store this file in memory let estimated_bytes_to_store_in_memory = 2 * parquet_file.file_size_bytes as u64; let parquet_file = match size_overrides.get(&parquet_file.id) {
86a2c249ecba22f73879ad66630a42e7d6273a8b
Marco Neumann
2023-06-01 18:17:28
faster PG `ParquetFileRepo` (#7907)
* refactor: remove `ParquetFileRepo::flag_for_delete` * refactor: batch update parquet files in catalog * refactor: avoid data roundtrips through postgres * refactor: do not return ID from PG when we do not need it ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
refactor: faster PG `ParquetFileRepo` (#7907) * refactor: remove `ParquetFileRepo::flag_for_delete` * refactor: batch update parquet files in catalog * refactor: avoid data roundtrips through postgres * refactor: do not return ID from PG when we do not need it --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/iox_catalog/src/interface.rs b/iox_catalog/src/interface.rs index b9055692ce..602bda1c8a 100644 --- a/iox_catalog/src/interface.rs +++ b/iox_catalog/src/interface.rs @@ -448,9 +448,6 @@ pub trait ParquetFileRepo: Send + Sync { /// This is mostly useful for testing and will likely not succeed in production. async fn list_all(&mut self) -> Result<Vec<ParquetFile>>; - /// Flag the parquet file for deletion - async fn flag_for_delete(&mut self, id: ParquetFileId) -> Result<()>; - /// Flag all parquet files for deletion that are older than their namespace's retention period. async fn flag_for_delete_by_retention(&mut self) -> Result<Vec<ParquetFileId>>; @@ -1755,7 +1752,7 @@ pub(crate) mod test_helpers { // verify to_delete can be updated to a timestamp repos .parquet_files() - .flag_for_delete(parquet_file.id) + .create_upgrade_delete(&[parquet_file.id], &[], &[], CompactionLevel::Initial) .await .unwrap(); @@ -1883,7 +1880,11 @@ pub(crate) mod test_helpers { .unwrap(); assert_eq!(vec![f1.clone(), f2.clone(), f3.clone()], files); - repos.parquet_files().flag_for_delete(f2.id).await.unwrap(); + repos + .parquet_files() + .create_upgrade_delete(&[f2.id], &[], &[], CompactionLevel::Initial) + .await + .unwrap(); let files = repos .parquet_files() .list_by_namespace_not_to_delete(namespace2.id) @@ -2235,7 +2236,7 @@ pub(crate) mod test_helpers { .unwrap(); repos .parquet_files() - .flag_for_delete(delete_l0_file.id) + .create_upgrade_delete(&[delete_l0_file.id], &[], &[], CompactionLevel::Initial) .await .unwrap(); let partitions = repos @@ -2587,7 +2588,7 @@ pub(crate) mod test_helpers { .unwrap(); repos .parquet_files() - .flag_for_delete(delete_file.id) + .create_upgrade_delete(&[delete_file.id], &[], &[], CompactionLevel::Initial) .await .unwrap(); let level1_file_params = ParquetFileParams { diff --git a/iox_catalog/src/mem.rs b/iox_catalog/src/mem.rs index 1443362e32..b6e804ffd3 100644 --- a/iox_catalog/src/mem.rs +++ b/iox_catalog/src/mem.rs @@ -721,11 +721,6 @@ impl ParquetFileRepo for MemTxn { Ok(stage.parquet_files.clone()) } - async fn flag_for_delete(&mut self, id: ParquetFileId) -> Result<()> { - let marked_at = Timestamp::from(self.time_provider.now()); - flag_for_delete(self.stage(), id, marked_at).await - } - async fn flag_for_delete_by_retention(&mut self) -> Result<Vec<ParquetFileId>> { let now = Timestamp::from(self.time_provider.now()); let stage = self.stage(); diff --git a/iox_catalog/src/metrics.rs b/iox_catalog/src/metrics.rs index 6ea7a96b5b..f94375651d 100644 --- a/iox_catalog/src/metrics.rs +++ b/iox_catalog/src/metrics.rs @@ -187,7 +187,6 @@ decorate!( methods = [ "parquet_create" = create(&mut self, parquet_file_params: ParquetFileParams) -> Result<ParquetFile>; "parquet_list_all" = list_all(&mut self) -> Result<Vec<ParquetFile>>; - "parquet_flag_for_delete" = flag_for_delete(&mut self, id: ParquetFileId) -> Result<()>; "parquet_flag_for_delete_by_retention" = flag_for_delete_by_retention(&mut self) -> Result<Vec<ParquetFileId>>; "parquet_list_by_namespace_not_to_delete" = list_by_namespace_not_to_delete(&mut self, namespace_id: NamespaceId) -> Result<Vec<ParquetFile>>; "parquet_list_by_table_not_to_delete" = list_by_table_not_to_delete(&mut self, table_id: TableId) -> Result<Vec<ParquetFile>>; diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs index 7e89021136..aae8786ff6 100644 --- a/iox_catalog/src/postgres.rs +++ b/iox_catalog/src/postgres.rs @@ -1271,7 +1271,8 @@ RETURNING * impl ParquetFileRepo for PostgresTxn { async fn create(&mut self, parquet_file_params: ParquetFileParams) -> Result<ParquetFile> { let executor = &mut self.inner; - create_parquet_file(executor, parquet_file_params).await + let id = create_parquet_file(executor, &parquet_file_params).await?; + Ok(ParquetFile::from_params(parquet_file_params, id)) } async fn list_all(&mut self) -> Result<Vec<ParquetFile>> { @@ -1291,13 +1292,6 @@ FROM parquet_file; .map_err(|e| Error::SqlxError { source: e }) } - async fn flag_for_delete(&mut self, id: ParquetFileId) -> Result<()> { - let marked_at = Timestamp::from(self.time_provider.now()); - let executor = &mut self.inner; - - flag_for_delete(executor, id, marked_at).await - } - async fn flag_for_delete_by_retention(&mut self) -> Result<Vec<ParquetFileId>> { let flagged_at = Timestamp::from(self.time_provider.now()); // TODO - include check of table retention period once implemented @@ -1461,16 +1455,14 @@ WHERE object_store_id = $1; .map_err(|e| Error::StartTransaction { source: e })?; let marked_at = Timestamp::from(self.time_provider.now()); - for id in delete { - flag_for_delete(&mut tx, *id, marked_at).await?; - } + flag_for_delete(&mut tx, delete, marked_at).await?; update_compaction_level(&mut tx, upgrade, target_level).await?; let mut ids = Vec::with_capacity(create.len()); for file in create { - let pf = create_parquet_file(&mut tx, file.clone()).await?; - ids.push(pf.id); + let id = create_parquet_file(&mut tx, file).await?; + ids.push(id); } tx.commit() @@ -1484,8 +1476,8 @@ WHERE object_store_id = $1; // They are also used by the respective create/flag_for_delete/update_compaction_level methods. async fn create_parquet_file<'q, E>( executor: E, - parquet_file_params: ParquetFileParams, -) -> Result<ParquetFile> + parquet_file_params: &ParquetFileParams, +) -> Result<ParquetFileId> where E: Executor<'q, Database = Postgres>, { @@ -1504,17 +1496,14 @@ where max_l0_created_at, } = parquet_file_params; - let query = sqlx::query_as::<_, ParquetFile>( + let query = sqlx::query_scalar::<_, ParquetFileId>( r#" INSERT INTO parquet_file ( shard_id, table_id, partition_id, object_store_id, min_time, max_time, file_size_bytes, row_count, compaction_level, created_at, namespace_id, column_set, max_l0_created_at ) VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13 ) -RETURNING - id, table_id, partition_id, object_store_id, - min_time, max_time, to_delete, file_size_bytes, - row_count, compaction_level, created_at, namespace_id, column_set, max_l0_created_at; +RETURNING id; "#, ) .bind(TRANSITION_SHARD_ID) // $1 @@ -1530,9 +1519,11 @@ RETURNING .bind(namespace_id) // $11 .bind(column_set) // $12 .bind(max_l0_created_at); // $13 - let parquet_file = query.fetch_one(executor).await.map_err(|e| { + let parquet_file_id = query.fetch_one(executor).await.map_err(|e| { if is_unique_violation(&e) { - Error::FileExists { object_store_id } + Error::FileExists { + object_store_id: *object_store_id, + } } else if is_fk_violation(&e) { Error::ForeignKeyViolation { source: e } } else { @@ -1540,16 +1531,23 @@ RETURNING } })?; - Ok(parquet_file) + Ok(parquet_file_id) } -async fn flag_for_delete<'q, E>(executor: E, id: ParquetFileId, marked_at: Timestamp) -> Result<()> +async fn flag_for_delete<'q, E>( + executor: E, + ids: &[ParquetFileId], + marked_at: Timestamp, +) -> Result<()> where E: Executor<'q, Database = Postgres>, { - let query = sqlx::query(r#"UPDATE parquet_file SET to_delete = $1 WHERE id = $2;"#) + // If I try to do `.bind(parquet_file_ids)` directly, I get a compile error from sqlx. + // See https://github.com/launchbadge/sqlx/issues/1744 + let ids: Vec<_> = ids.iter().map(|p| p.get()).collect(); + let query = sqlx::query(r#"UPDATE parquet_file SET to_delete = $1 WHERE id = ANY($2);"#) .bind(marked_at) // $1 - .bind(id); // $2 + .bind(&ids[..]); // $2 query .execute(executor) .await @@ -1562,7 +1560,7 @@ async fn update_compaction_level<'q, E>( executor: E, parquet_file_ids: &[ParquetFileId], compaction_level: CompactionLevel, -) -> Result<Vec<ParquetFileId>> +) -> Result<()> where E: Executor<'q, Database = Postgres>, { @@ -1573,19 +1571,17 @@ where r#" UPDATE parquet_file SET compaction_level = $1 -WHERE id = ANY($2) -RETURNING id; +WHERE id = ANY($2); "#, ) .bind(compaction_level) // $1 .bind(&ids[..]); // $2 - let updated = query - .fetch_all(executor) + query + .execute(executor) .await .map_err(|e| Error::SqlxError { source: e })?; - let updated = updated.into_iter().map(|row| row.get("id")).collect(); - Ok(updated) + Ok(()) } /// The error code returned by Postgres for a unique constraint violation. @@ -2173,7 +2169,7 @@ mod tests { // flag f1 for deletion and assert that the total file size is reduced accordingly. repos .parquet_files() - .flag_for_delete(f1.id) + .create_upgrade_delete(&[f1.id], &[], &[], CompactionLevel::Initial) .await .expect("flag parquet file for deletion should succeed"); let total_file_size_bytes: i64 = diff --git a/iox_catalog/src/sqlite.rs b/iox_catalog/src/sqlite.rs index b4efcd6119..bec243d0dc 100644 --- a/iox_catalog/src/sqlite.rs +++ b/iox_catalog/src/sqlite.rs @@ -1143,13 +1143,6 @@ FROM parquet_file; .collect()) } - async fn flag_for_delete(&mut self, id: ParquetFileId) -> Result<()> { - let marked_at = Timestamp::from(self.time_provider.now()); - let executor = self.inner.get_mut(); - - flag_for_delete(executor, id, marked_at).await - } - async fn flag_for_delete_by_retention(&mut self) -> Result<Vec<ParquetFileId>> { let flagged_at = Timestamp::from(self.time_provider.now()); // TODO - include check of table retention period once implemented @@ -1776,7 +1769,7 @@ mod tests { // flag f1 for deletion and assert that the total file size is reduced accordingly. repos .parquet_files() - .flag_for_delete(f1.id) + .create_upgrade_delete(&[f1.id], &[], &[], CompactionLevel::Initial) .await .expect("flag parquet file for deletion should succeed"); let total_file_size_bytes: i64 = diff --git a/iox_tests/src/catalog.rs b/iox_tests/src/catalog.rs index 392d3e854c..d2f796d386 100644 --- a/iox_tests/src/catalog.rs +++ b/iox_tests/src/catalog.rs @@ -588,7 +588,7 @@ impl TestPartition { if to_delete { repos .parquet_files() - .flag_for_delete(parquet_file.id) + .create_upgrade_delete(&[parquet_file.id], &[], &[], CompactionLevel::Initial) .await .unwrap(); } @@ -817,9 +817,9 @@ impl TestParquetFile { repos .parquet_files() - .flag_for_delete(self.parquet_file.id) + .create_upgrade_delete(&[self.parquet_file.id], &[], &[], CompactionLevel::Initial) .await - .unwrap() + .unwrap(); } /// Get Parquet file schema.
d2658d2e329811f0bc5a8d0ee28bb9f862c6e310
Marco Neumann
2023-07-04 16:14:46
update heappy to `1de977a241cdd768acc5b6c82c0728b30c7db7b4` (#8145)
Removes some redundant deps.
null
chore: update heappy to `1de977a241cdd768acc5b6c82c0728b30c7db7b4` (#8145) Removes some redundant deps.
diff --git a/Cargo.lock b/Cargo.lock index c42510f921..ce791449e1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2149,13 +2149,13 @@ dependencies = [ [[package]] name = "heappy" version = "0.1.0" -source = "git+https://github.com/mkmik/heappy?rev=1d6ac77a4026fffce8680a7b31a9f6e9859b5e73#1d6ac77a4026fffce8680a7b31a9f6e9859b5e73" +source = "git+https://github.com/mkmik/heappy?rev=1de977a241cdd768acc5b6c82c0728b30c7db7b4#1de977a241cdd768acc5b6c82c0728b30c7db7b4" dependencies = [ "backtrace", "bytes", "lazy_static", "libc", - "pprof 0.11.1", + "pprof", "spin 0.9.8", "thiserror", "tikv-jemalloc-sys", @@ -2947,7 +2947,7 @@ dependencies = [ "metric_exporters", "observability_deps", "parking_lot 0.12.1", - "pprof 0.12.0", + "pprof", "reqwest", "serde", "serde_json", @@ -4127,32 +4127,6 @@ version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" -[[package]] -name = "pprof" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196ded5d4be535690899a4631cc9f18cdc41b7ebf24a79400f46f48e49a11059" -dependencies = [ - "backtrace", - "cfg-if", - "findshlibs", - "inferno", - "libc", - "log", - "nix", - "once_cell", - "parking_lot 0.12.1", - "prost", - "prost-build", - "prost-derive", - "protobuf", - "sha2", - "smallvec", - "symbolic-demangle 10.2.1", - "tempfile", - "thiserror", -] - [[package]] name = "pprof" version = "0.12.0" @@ -4171,9 +4145,10 @@ dependencies = [ "prost", "prost-build", "prost-derive", + "protobuf", "sha2", "smallvec", - "symbolic-demangle 12.2.0", + "symbolic-demangle", "tempfile", "thiserror", ] @@ -5470,18 +5445,6 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" -[[package]] -name = "symbolic-common" -version = "10.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b55cdc318ede251d0957f07afe5fed912119b8c1bc5a7804151826db999e737" -dependencies = [ - "debugid", - "memmap2", - "stable_deref_trait", - "uuid", -] - [[package]] name = "symbolic-common" version = "12.2.0" @@ -5494,25 +5457,15 @@ dependencies = [ "uuid", ] -[[package]] -name = "symbolic-demangle" -version = "10.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79be897be8a483a81fff6a3a4e195b4ac838ef73ca42d348b3f722da9902e489" -dependencies = [ - "cpp_demangle", - "rustc-demangle", - "symbolic-common 10.2.1", -] - [[package]] name = "symbolic-demangle" version = "12.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec64922563a36e3fe686b6d99f06f25dacad2a202ac7502ed642930a188fb20a" dependencies = [ + "cpp_demangle", "rustc-demangle", - "symbolic-common 12.2.0", + "symbolic-common", ] [[package]] diff --git a/ioxd_common/Cargo.toml b/ioxd_common/Cargo.toml index 19c8abfd08..1a641c459b 100644 --- a/ioxd_common/Cargo.toml +++ b/ioxd_common/Cargo.toml @@ -12,7 +12,7 @@ license.workspace = true authz = { path = "../authz", features = ["http"] } clap_blocks = { path = "../clap_blocks" } generated_types = { path = "../generated_types" } -heappy = { git = "https://github.com/mkmik/heappy", rev = "1d6ac77a4026fffce8680a7b31a9f6e9859b5e73", features = ["enable_heap_profiler", "jemalloc_shim", "measure_free"], optional = true } +heappy = { git = "https://github.com/mkmik/heappy", rev = "1de977a241cdd768acc5b6c82c0728b30c7db7b4", features = ["enable_heap_profiler", "jemalloc_shim", "measure_free"], optional = true } metric = { path = "../metric" } metric_exporters = { path = "../metric_exporters" } observability_deps = { path = "../observability_deps" }
d454c66b4b40cb5e08aaf8dba16a2de53afc6766
Carol (Nichols || Goulding)
2022-11-04 10:33:11
Use a HashMap for column lookup instead of Vec ordering
The checks for whether a column already exists with a different type were relying on ordering of the input matching the ordering of the columns returned from inserting the columns in Postgres. Rather than trying to match the new ordering that is required to avoid Postgres deadlocks, switch from a Vec to a HashMap and look up the column type from the name. This also reduces some allocations that weren't really needed.
null
fix: Use a HashMap for column lookup instead of Vec ordering The checks for whether a column already exists with a different type were relying on ordering of the input matching the ordering of the columns returned from inserting the columns in Postgres. Rather than trying to match the new ordering that is required to avoid Postgres deadlocks, switch from a Vec to a HashMap and look up the column type from the name. This also reduces some allocations that weren't really needed.
diff --git a/import/src/aggregate_tsm_schema/update_catalog.rs b/import/src/aggregate_tsm_schema/update_catalog.rs index f371df8abf..c944a4544f 100644 --- a/import/src/aggregate_tsm_schema/update_catalog.rs +++ b/import/src/aggregate_tsm_schema/update_catalog.rs @@ -6,7 +6,7 @@ use data_types::{ Partition, PartitionKey, QueryPoolId, ShardId, TableSchema, TopicId, }; use influxdb_iox_client::connection::{Connection, GrpcConnection}; -use iox_catalog::interface::{get_schema_by_name, Catalog, ColumnUpsertRequest, RepoCollection}; +use iox_catalog::interface::{get_schema_by_name, Catalog, RepoCollection}; use schema::{ sort::{adjust_sort_key_columns, SortKey, SortKeyBuilder}, InfluxColumnType, InfluxFieldType, TIME_COLUMN_NAME, @@ -201,7 +201,7 @@ where } }; // batch of columns to add into the schema at the end - let mut column_batch = Vec::default(); + let mut column_batch = HashMap::new(); // fields and tags are both columns; tag is a special type of column. // check that the schema has all these columns or update accordingly. for tag in measurement.tags.values() { @@ -218,10 +218,7 @@ where } None => { // column doesn't exist; add it - column_batch.push(ColumnUpsertRequest { - name: tag.name.as_str(), - column_type: ColumnType::Tag, - }); + column_batch.insert(tag.name.as_str(), ColumnType::Tag); } } } @@ -254,10 +251,7 @@ where } None => { // column doesn't exist; add it - column_batch.push(ColumnUpsertRequest { - name: field.name.as_str(), - column_type: ColumnType::from(influx_column_type), - }); + column_batch.insert(field.name.as_str(), ColumnType::from(influx_column_type)); } } } @@ -270,7 +264,7 @@ where // figure it's okay. repos .columns() - .create_or_get_many_unchecked(table.id, &column_batch) + .create_or_get_many_unchecked(table.id, column_batch) .await?; } // create a partition for every day in the date range. diff --git a/iox_catalog/src/interface.rs b/iox_catalog/src/interface.rs index e9196b87d4..72a6863f47 100644 --- a/iox_catalog/src/interface.rs +++ b/iox_catalog/src/interface.rs @@ -334,16 +334,6 @@ pub trait TableRepo: Send + Sync { async fn list(&mut self) -> Result<Vec<Table>>; } -/// Parameters necessary to perform a batch insert of -/// [`ColumnRepo::create_or_get()`] for one table (specified separately) -#[derive(Debug)] -pub struct ColumnUpsertRequest<'a> { - /// The name of the column. - pub name: &'a str, - /// The data type of the column. - pub column_type: ColumnType, -} - /// Functions for working with columns in the catalog #[async_trait] pub trait ColumnRepo: Send + Sync { @@ -357,7 +347,7 @@ pub trait ColumnRepo: Send + Sync { column_type: ColumnType, ) -> Result<Column>; - /// Perform a bulk upsert of columns. + /// Perform a bulk upsert of columns specified by a map of column name to column type. /// /// Implementations make no guarantees as to the ordering or atomicity of /// the batch of column upsert operations - a batch upsert may partially @@ -369,7 +359,7 @@ pub trait ColumnRepo: Send + Sync { async fn create_or_get_many_unchecked( &mut self, table_id: TableId, - columns: &[ColumnUpsertRequest<'_>], + columns: HashMap<&str, ColumnType>, ) -> Result<Vec<Column>>; /// Lists all columns in the passed in namespace id. @@ -1264,21 +1254,12 @@ pub(crate) mod test_helpers { assert_eq!(list, want); // test create_or_get_many_unchecked, below column limit + let mut columns = HashMap::new(); + columns.insert("column_test", ColumnType::Tag); + columns.insert("new_column", ColumnType::Tag); let table1_columns = repos .columns() - .create_or_get_many_unchecked( - table.id, - &[ - ColumnUpsertRequest { - name: "column_test", - column_type: ColumnType::Tag, - }, - ColumnUpsertRequest { - name: "new_column", - column_type: ColumnType::Tag, - }, - ], - ) + .create_or_get_many_unchecked(table.id, columns) .await .unwrap(); let mut table1_column_names: Vec<_> = table1_columns.iter().map(|c| &c.name).collect(); @@ -1310,21 +1291,12 @@ pub(crate) mod test_helpers { .create_or_get("test_table_3", namespace.id) .await .unwrap(); + let mut columns = HashMap::new(); + columns.insert("apples", ColumnType::Tag); + columns.insert("oranges", ColumnType::Tag); let table3_columns = repos .columns() - .create_or_get_many_unchecked( - table3.id, - &[ - ColumnUpsertRequest { - name: "apples", - column_type: ColumnType::Tag, - }, - ColumnUpsertRequest { - name: "oranges", - column_type: ColumnType::Tag, - }, - ], - ) + .create_or_get_many_unchecked(table3.id, columns) .await .unwrap(); let mut table3_column_names: Vec<_> = table3_columns.iter().map(|c| &c.name).collect(); diff --git a/iox_catalog/src/lib.rs b/iox_catalog/src/lib.rs index 471ef23314..51aaa54ac3 100644 --- a/iox_catalog/src/lib.rs +++ b/iox_catalog/src/lib.rs @@ -13,14 +13,15 @@ clippy::dbg_macro )] -use crate::interface::{ - ColumnTypeMismatchSnafu, ColumnUpsertRequest, Error, RepoCollection, Result, Transaction, -}; +use crate::interface::{ColumnTypeMismatchSnafu, Error, RepoCollection, Result, Transaction}; use data_types::{ ColumnType, NamespaceSchema, QueryPool, Shard, ShardId, ShardIndex, TableSchema, TopicMetadata, }; use mutable_batch::MutableBatch; -use std::{borrow::Cow, collections::BTreeMap}; +use std::{ + borrow::Cow, + collections::{BTreeMap, HashMap}, +}; use thiserror::Error; const SHARED_TOPIC_NAME: &str = "iox-shared"; @@ -149,47 +150,41 @@ where // If the table itself needs to be updated during column validation it // becomes a Cow::owned() copy and the modified copy should be inserted into // the schema before returning. - let column_batch: Vec<_> = mb - .columns() - .filter_map(|(name, col)| { - // Check if the column exists in the cached schema. - // - // If it does, validate it. If it does not exist, create it and insert - // it into the cached schema. - match table.columns.get(name.as_str()) { - Some(existing) if existing.matches_type(col.influx_type()) => { - // No action is needed as the column matches the existing column - // schema. - None - } - Some(existing) => { - // The column schema, and the column in the mutable batch are of - // different types. - Some( - ColumnTypeMismatchSnafu { - name, - existing: existing.column_type, - new: col.influx_type(), - } - .fail(), - ) - } - None => { - // The column does not exist in the cache, add it to the column - // batch to be bulk inserted later. - Some(Ok(ColumnUpsertRequest { - name: name.as_str(), - column_type: ColumnType::from(col.influx_type()), - })) + let mut column_batch: HashMap<&str, ColumnType> = HashMap::new(); + + for (name, col) in mb.columns() { + // Check if the column exists in the cached schema. + // + // If it does, validate it. If it does not exist, create it and insert + // it into the cached schema. + + match table.columns.get(name.as_str()) { + Some(existing) if existing.matches_type(col.influx_type()) => { + // No action is needed as the column matches the existing column + // schema. + } + Some(existing) => { + // The column schema, and the column in the mutable batch are of + // different types. + return ColumnTypeMismatchSnafu { + name, + existing: existing.column_type, + new: col.influx_type(), } + .fail(); } - }) - .collect::<Result<Vec<_>>>()?; + None => { + // The column does not exist in the cache, add it to the column + // batch to be bulk inserted later. + column_batch.insert(name.as_str(), ColumnType::from(col.influx_type())); + } + } + } if !column_batch.is_empty() { repos .columns() - .create_or_get_many_unchecked(table.id, &column_batch) + .create_or_get_many_unchecked(table.id, column_batch) .await? .into_iter() .for_each(|c| table.to_mut().add_column(&c)); diff --git a/iox_catalog/src/mem.rs b/iox_catalog/src/mem.rs index 2ebfd368b7..4a93dffda6 100644 --- a/iox_catalog/src/mem.rs +++ b/iox_catalog/src/mem.rs @@ -3,10 +3,10 @@ use crate::{ interface::{ - sealed::TransactionFinalize, Catalog, ColumnRepo, ColumnTypeMismatchSnafu, - ColumnUpsertRequest, Error, NamespaceRepo, ParquetFileRepo, PartitionRepo, - ProcessedTombstoneRepo, QueryPoolRepo, RepoCollection, Result, ShardRepo, TableRepo, - TombstoneRepo, TopicMetadataRepo, Transaction, + sealed::TransactionFinalize, Catalog, ColumnRepo, ColumnTypeMismatchSnafu, Error, + NamespaceRepo, ParquetFileRepo, PartitionRepo, ProcessedTombstoneRepo, QueryPoolRepo, + RepoCollection, Result, ShardRepo, TableRepo, TombstoneRepo, TopicMetadataRepo, + Transaction, }, metrics::MetricDecorator, DEFAULT_MAX_COLUMNS_PER_TABLE, DEFAULT_MAX_TABLES, @@ -527,7 +527,7 @@ impl ColumnRepo for MemTxn { async fn create_or_get_many_unchecked( &mut self, table_id: TableId, - columns: &[ColumnUpsertRequest<'_>], + columns: HashMap<&str, ColumnType>, ) -> Result<Vec<Column>> { // Explicitly NOT using `create_or_get` in this function: the Postgres catalog doesn't // check column limits when inserting many columns because it's complicated and expensive, @@ -537,19 +537,19 @@ impl ColumnRepo for MemTxn { let out: Vec<_> = columns .iter() - .map(|column| { + .map(|(&column_name, &column_type)| { match stage .columns .iter() - .find(|t| t.name == column.name && t.table_id == table_id) + .find(|t| t.name == column_name && t.table_id == table_id) { Some(c) => { ensure!( - column.column_type == c.column_type, + column_type == c.column_type, ColumnTypeMismatchSnafu { - name: column.name, + name: column_name, existing: c.column_type, - new: column.column_type + new: column_type } ); Ok(c.clone()) @@ -558,8 +558,8 @@ impl ColumnRepo for MemTxn { let new_column = Column { id: ColumnId::new(stage.columns.len() as i64 + 1), table_id, - name: column.name.to_string(), - column_type: column.column_type, + name: column_name.to_string(), + column_type, }; stage.columns.push(new_column); Ok(stage.columns.last().unwrap().clone()) diff --git a/iox_catalog/src/metrics.rs b/iox_catalog/src/metrics.rs index 8be8f907aa..536163166b 100644 --- a/iox_catalog/src/metrics.rs +++ b/iox_catalog/src/metrics.rs @@ -1,9 +1,9 @@ //! Metric instrumentation for catalog implementations. use crate::interface::{ - sealed::TransactionFinalize, ColumnRepo, ColumnUpsertRequest, NamespaceRepo, ParquetFileRepo, - PartitionRepo, ProcessedTombstoneRepo, QueryPoolRepo, RepoCollection, Result, ShardRepo, - TableRepo, TombstoneRepo, TopicMetadataRepo, + sealed::TransactionFinalize, ColumnRepo, NamespaceRepo, ParquetFileRepo, PartitionRepo, + ProcessedTombstoneRepo, QueryPoolRepo, RepoCollection, Result, ShardRepo, TableRepo, + TombstoneRepo, TopicMetadataRepo, }; use async_trait::async_trait; use data_types::{ @@ -15,7 +15,7 @@ use data_types::{ }; use iox_time::{SystemProvider, TimeProvider}; use metric::{DurationHistogram, Metric}; -use std::{fmt::Debug, sync::Arc}; +use std::{collections::HashMap, fmt::Debug, sync::Arc}; use uuid::Uuid; /// Decorates a implementation of the catalog's [`RepoCollection`] (and the @@ -219,7 +219,7 @@ decorate!( "column_create_or_get" = create_or_get(&mut self, name: &str, table_id: TableId, column_type: ColumnType) -> Result<Column>; "column_list_by_namespace_id" = list_by_namespace_id(&mut self, namespace_id: NamespaceId) -> Result<Vec<Column>>; "column_list_by_table_id" = list_by_table_id(&mut self, table_id: TableId) -> Result<Vec<Column>>; - "column_create_or_get_many_unchecked" = create_or_get_many_unchecked(&mut self, table_id: TableId, columns: &[ColumnUpsertRequest<'_>]) -> Result<Vec<Column>>; + "column_create_or_get_many_unchecked" = create_or_get_many_unchecked(&mut self, table_id: TableId, columns: HashMap<&str, ColumnType>) -> Result<Vec<Column>>; "column_list" = list(&mut self) -> Result<Vec<Column>>; "column_list_type_count_by_table_id" = list_type_count_by_table_id(&mut self, table_id: TableId) -> Result<Vec<ColumnTypeCount>>; ] diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs index 8924d896a6..378586cad8 100644 --- a/iox_catalog/src/postgres.rs +++ b/iox_catalog/src/postgres.rs @@ -2,10 +2,10 @@ use crate::{ interface::{ - self, sealed::TransactionFinalize, Catalog, ColumnRepo, ColumnTypeMismatchSnafu, - ColumnUpsertRequest, Error, NamespaceRepo, ParquetFileRepo, PartitionRepo, - ProcessedTombstoneRepo, QueryPoolRepo, RepoCollection, Result, ShardRepo, TableRepo, - TombstoneRepo, TopicMetadataRepo, Transaction, + self, sealed::TransactionFinalize, Catalog, ColumnRepo, ColumnTypeMismatchSnafu, Error, + NamespaceRepo, ParquetFileRepo, PartitionRepo, ProcessedTombstoneRepo, QueryPoolRepo, + RepoCollection, Result, ShardRepo, TableRepo, TombstoneRepo, TopicMetadataRepo, + Transaction, }, metrics::MetricDecorator, DEFAULT_MAX_COLUMNS_PER_TABLE, DEFAULT_MAX_TABLES, @@ -28,8 +28,7 @@ use sqlx::{ Acquire, ConnectOptions, Executor, Postgres, Row, }; use sqlx_hotswap_pool::HotSwapPool; -use std::str::FromStr; -use std::{sync::Arc, time::Duration}; +use std::{collections::HashMap, str::FromStr, sync::Arc, time::Duration}; static MIGRATOR: Migrator = sqlx::migrate!(); @@ -947,14 +946,13 @@ WHERE table_id = $1; async fn create_or_get_many_unchecked( &mut self, table_id: TableId, - columns: &[ColumnUpsertRequest<'_>], + columns: HashMap<&str, ColumnType>, ) -> Result<Vec<Column>> { - let mut v_name = Vec::new(); - let mut v_column_type = Vec::new(); - for c in columns { - v_name.push(c.name.to_string()); - v_column_type.push(c.column_type as i16); - } + let num_columns = columns.len(); + let (v_name, v_column_type): (Vec<&str>, Vec<i16>) = columns + .iter() + .map(|(&name, &column_type)| (name, column_type as i16)) + .unzip(); // The `ORDER BY` in this statement is important to avoid deadlocks during concurrent // writes to the same IOx table that each add many new columns. See: @@ -985,22 +983,21 @@ RETURNING *; } })?; - assert_eq!(columns.len(), out.len()); - - out.into_iter() - .zip(v_column_type) - .map(|(existing, want)| { - ensure!( - existing.column_type as i16 == want, - ColumnTypeMismatchSnafu { - name: existing.name, - existing: existing.column_type, - new: ColumnType::try_from(want).unwrap(), - } - ); - Ok(existing) - }) - .collect() + assert_eq!(num_columns, out.len()); + + for existing in &out { + let want = columns.get(existing.name.as_str()).unwrap(); + ensure!( + existing.column_type == *want, + ColumnTypeMismatchSnafu { + name: &existing.name, + existing: existing.column_type, + new: *want, + } + ); + } + + Ok(out) } async fn list_type_count_by_table_id( @@ -2692,33 +2689,35 @@ mod tests { .id; $( - let insert = [ - $( - ColumnUpsertRequest { - name: $col_name, - column_type: $col_type, - }, - )+ - ]; + let mut insert = HashMap::new(); + $( + insert.insert($col_name, $col_type); + )+ + let got = postgres .repositories() .await .columns() - .create_or_get_many_unchecked(table_id, &insert) + .create_or_get_many_unchecked(table_id, insert.clone()) .await; // The returned columns MUST always match the requested // column values if successful. if let Ok(got) = &got { assert_eq!(insert.len(), got.len()); - insert.iter().zip(got).for_each(|(req, got)| { - assert_eq!(req.name, got.name); + + for got in got { assert_eq!(table_id, got.table_id); + let requested_column_type = insert + .get(got.name.as_str()) + .expect("Should have gotten back a column that was inserted"); assert_eq!( - req.column_type, - ColumnType::try_from(got.column_type).expect("invalid column type") + *requested_column_type, + ColumnType::try_from(got.column_type) + .expect("invalid column type") ); - }); + } + assert_metric_hit(&metrics, "column_create_or_get_many_unchecked"); } )+ @@ -2827,19 +2826,6 @@ mod tests { } ); - // Issue one call containing a column specified twice, with differing types - // and observe an error different from the above test case. - test_column_create_or_get_many_unchecked!( - intra_request_type_conflict, - calls = { - [ - "test1" => ColumnType::String, - "test1" => ColumnType::Bool, - ] - }, - want = Err(Error::SqlxError{ .. }) - ); - #[tokio::test] async fn test_billing_summary_on_parqet_file_creation() { // If running an integration test on your laptop, this requires that you have Postgres running
197078d6c70e38139af1fdd4eafea488fffd04a7
Fraser Savage
2023-07-06 17:18:12
Use WAL reference actor for file deletion during graceful shutdown
This change integrates the WAL reference actor with the graceful shutdown buffer drain & persist behaviour, relying on its knowledge of partial persistence for deletion and shutdown timing.
null
feat(ingester): Use WAL reference actor for file deletion during graceful shutdown This change integrates the WAL reference actor with the graceful shutdown buffer drain & persist behaviour, relying on its knowledge of partial persistence for deletion and shutdown timing.
diff --git a/ingester/benches/wal.rs b/ingester/benches/wal.rs index d8eb0fbc3e..c383615a19 100644 --- a/ingester/benches/wal.rs +++ b/ingester/benches/wal.rs @@ -8,7 +8,7 @@ use generated_types::influxdata::{ }; use ingester::internal_implementation_details::{ encode::encode_write_op, - queue::MockPersistQueue, + queue::{MockPersistQueue, NopObserver}, write::{ PartitionedData as PayloadPartitionedData, TableData as PayloadTableData, WriteOperation, }, @@ -63,7 +63,7 @@ fn wal_replay_bench(c: &mut Criterion) { // overhead. let sink = NopSink::default(); - let persist = MockPersistQueue::default(); + let persist = MockPersistQueue::<NopObserver>::default(); // Replay the wal into the NOP. ingester::replay(&wal, &sink, Arc::new(persist), &metric::Registry::default()) diff --git a/ingester/src/init.rs b/ingester/src/init.rs index de72b57b9d..dfc5b64d8f 100644 --- a/ingester/src/init.rs +++ b/ingester/src/init.rs @@ -100,11 +100,6 @@ pub struct IngesterGuard<T> { /// Aborted on drop. rotation_task: tokio::task::JoinHandle<()>, - /// Handle to the WAL reference actor's task, it - /// is aborted on drop of the guard, or the actor's - /// handle. - wal_reference_actor_task: tokio::task::JoinHandle<()>, - /// The task handle executing the graceful shutdown once triggered. graceful_shutdown_handler: tokio::task::JoinHandle<()>, shutdown_complete: Shared<oneshot::Receiver<()>>, @@ -131,7 +126,6 @@ where impl<T> Drop for IngesterGuard<T> { fn drop(&mut self) { self.rotation_task.abort(); - self.wal_reference_actor_task.abort(); self.graceful_shutdown_handler.abort(); } } @@ -343,7 +337,7 @@ where )); // Start the WAL reference actor and then replay the WAL log files, if any. - let wal_reference_actor_task = tokio::spawn(wal_reference_actor.run()); + tokio::spawn(wal_reference_actor.run()); let max_sequence_number = wal_replay::replay(&wal, &buffer, Arc::clone(&persist_handle), &metrics) .await @@ -405,6 +399,7 @@ where Arc::clone(&buffer), Arc::clone(&persist_handle), Arc::clone(&wal), + Arc::clone(&wal_reference_handle), )); Ok(IngesterGuard { @@ -420,7 +415,6 @@ where persist_handle, ), rotation_task, - wal_reference_actor_task, graceful_shutdown_handler: shutdown_task, shutdown_complete: shutdown_rx.shared(), }) diff --git a/ingester/src/init/graceful_shutdown.rs b/ingester/src/init/graceful_shutdown.rs index ed08395cd0..c63014330d 100644 --- a/ingester/src/init/graceful_shutdown.rs +++ b/ingester/src/init/graceful_shutdown.rs @@ -9,6 +9,7 @@ use crate::{ ingest_state::{IngestState, IngestStateError}, partition_iter::PartitionIter, persist::{drain_buffer::persist_partitions, queue::PersistQueue}, + wal::reference_tracker::WalReferenceHandle, }; /// Defines how often the shutdown task polls the partition buffers for @@ -40,6 +41,7 @@ pub(super) async fn graceful_shutdown_handler<F, T, P>( buffer: T, persist: P, wal: Arc<wal::Wal>, + wal_reference_handle: Arc<WalReferenceHandle>, ) where F: Future<Output = CancellationToken> + Send, T: PartitionIter + Sync, @@ -97,22 +99,17 @@ pub(super) async fn graceful_shutdown_handler<F, T, P>( // There is now no data buffered in the ingester - all data has been // persisted to object storage. // - // Therefore there are no ops that need replaying to rebuild the (now empty) - // buffer state, therefore all WAL segments can be deleted to prevent - // spurious replay and re-uploading of the same data. - wal.rotate().expect("failed to rotate wal"); - for file in wal.closed_segments() { - if let Err(error) = wal.delete(file.id()).await { - // This MAY occur due to concurrent segment deletion driven by the - // WAL reference counting actor. - // - // If this is a legitimate failure to delete (not a "not found") - // then this causes the data to be re-uploaded - an acceptable - // outcome, and preferable to panicking here and not dropping the - // rest of the deletable files. - warn!(%error, "failed to drop WAL segment"); - } - } + // We can rotate the open WAL segment and notify the reference handle + // that the segment's file can be deleted because everything has been + // persisted. + let (closed_segment, sequence_number_set) = wal.rotate().expect("failed to rotate wal"); + wal_reference_handle + .enqueue_rotated_file(closed_segment.id(), sequence_number_set) + .await; + + // Wait for the reference handle to report it has no inactive WAL segments + // tracked, ensuring they are deleted. + wal_reference_handle.empty_inactive_notifier().await; info!("persisted all data - stopping ingester"); @@ -157,23 +154,34 @@ mod tests { Arc::new(Mutex::new(partition)) } - // Initialise a WAL with > 1 segment. + // Initialise a WAL. async fn new_wal() -> (tempfile::TempDir, Arc<wal::Wal>) { let dir = tempfile::tempdir().expect("failed to get temporary WAL directory"); let wal = wal::Wal::new(dir.path()) .await .expect("failed to initialise WAL to write"); - wal.rotate().expect("failed to rotate WAL"); - (dir, wal) } #[tokio::test] async fn test_graceful_shutdown() { - let persist = Arc::new(MockPersistQueue::default()); let ingest_state = Arc::new(IngestState::default()); let (_tempdir, wal) = new_wal().await; + let (wal_reference_handle, wal_reference_actor) = + WalReferenceHandle::new(Arc::clone(&wal), &metric::Registry::default()); + let wal_reference_handle = Arc::new(wal_reference_handle); + let persist = Arc::new(MockPersistQueue::new_with_observer(Arc::clone( + &wal_reference_handle, + ))); + tokio::spawn(wal_reference_actor.run()); + + // Ensure there is always more than 1 segment in the test, but notify the ref tracker. + let (closed_segment, set) = wal.rotate().expect("failed to rotate WAL"); + wal_reference_handle + .enqueue_rotated_file(closed_segment.id(), set) + .await; + let partition = new_partition(); let rpc_stop = CancellationToken::new(); @@ -185,6 +193,7 @@ mod tests { vec![Arc::clone(&partition)], Arc::clone(&persist), Arc::clone(&wal), + Arc::clone(&wal_reference_handle), ) .await; @@ -207,9 +216,22 @@ mod tests { #[tokio::test] async fn test_graceful_shutdown_concurrent_persist() { - let persist = Arc::new(MockPersistQueue::default()); let ingest_state = Arc::new(IngestState::default()); let (_tempdir, wal) = new_wal().await; + let (wal_reference_handle, wal_reference_actor) = + WalReferenceHandle::new(Arc::clone(&wal), &metric::Registry::default()); + let wal_reference_handle = Arc::new(wal_reference_handle); + let persist = Arc::new(MockPersistQueue::new_with_observer(Arc::clone( + &wal_reference_handle, + ))); + tokio::spawn(wal_reference_actor.run()); + + // Ensure there is always more than 1 segment in the test, but notify the ref tracker. + let (closed_segment, set) = wal.rotate().expect("failed to rotate WAL"); + wal_reference_handle + .enqueue_rotated_file(closed_segment.id(), set) + .await; + let partition = new_partition(); // Mark the partition as persisting @@ -229,6 +251,7 @@ mod tests { vec![Arc::clone(&partition)], Arc::clone(&persist), Arc::clone(&wal), + Arc::clone(&wal_reference_handle), )); // Wait a small duration of time for the first buffer emptiness check to @@ -309,9 +332,21 @@ mod tests { #[tokio::test] async fn test_graceful_shutdown_concurrent_new_writes() { - let persist = Arc::new(MockPersistQueue::default()); let ingest_state = Arc::new(IngestState::default()); let (_tempdir, wal) = new_wal().await; + let (wal_reference_handle, wal_reference_actor) = + WalReferenceHandle::new(Arc::clone(&wal), &metric::Registry::default()); + let wal_reference_handle = Arc::new(wal_reference_handle); + let persist = Arc::new(MockPersistQueue::new_with_observer(Arc::clone( + &wal_reference_handle, + ))); + tokio::spawn(wal_reference_actor.run()); + + // Ensure there is always more than 1 segment in the test, but notify the ref tracker. + let (closed_segment, set) = wal.rotate().expect("failed to rotate WAL"); + wal_reference_handle + .enqueue_rotated_file(closed_segment.id(), set) + .await; // Initialise a buffer that keeps yielding more and more newly wrote // data, up until the maximum. @@ -329,6 +364,7 @@ mod tests { Arc::clone(&buffer), Arc::clone(&persist), Arc::clone(&wal), + Arc::clone(&wal_reference_handle), )); // Wait for the shutdown to complete. diff --git a/ingester/src/init/wal_replay.rs b/ingester/src/init/wal_replay.rs index 4bb0c57a38..5cf7540e4e 100644 --- a/ingester/src/init/wal_replay.rs +++ b/ingester/src/init/wal_replay.rs @@ -279,7 +279,7 @@ mod tests { buffer_tree::partition::PartitionData, dml_payload::IngestOp, dml_sink::mock_sink::MockDmlSink, - persist::queue::mock::MockPersistQueue, + persist::{completion_observer::NopObserver, queue::mock::MockPersistQueue}, test_util::{ assert_write_ops_eq, make_multi_table_write_op, make_write_op, PartitionDataBuilder, ARBITRARY_NAMESPACE_ID, ARBITRARY_PARTITION_ID, ARBITRARY_PARTITION_KEY, @@ -417,7 +417,7 @@ mod tests { assert_eq!(wal.closed_segments().len(), 2); // Initialise the mock persist system - let persist = Arc::new(MockPersistQueue::default()); + let persist = Arc::new(MockPersistQueue::<NopObserver>::default()); // Replay the results into a mock to capture the DmlWrites and returns // some dummy partitions when iterated over. diff --git a/ingester/src/persist/completion_observer.rs b/ingester/src/persist/completion_observer.rs index 14b9ff5441..a3dfae0c0b 100644 --- a/ingester/src/persist/completion_observer.rs +++ b/ingester/src/persist/completion_observer.rs @@ -19,7 +19,7 @@ use crate::wal::reference_tracker::WalReferenceHandle; /// [`PartitionData::mark_persisted()`]: /// crate::buffer_tree::partition::PartitionData::mark_persisted() #[async_trait] -pub(crate) trait PersistCompletionObserver: Send + Sync + Debug { +pub trait PersistCompletionObserver: Send + Sync + Debug { /// Observe the [`CompletedPersist`] notification for the newly persisted /// data. async fn persist_complete(&self, note: Arc<CompletedPersist>); @@ -108,8 +108,8 @@ impl CompletedPersist { } /// A no-op implementation of the [`PersistCompletionObserver`] trait. -#[derive(Debug, Default)] -pub(crate) struct NopObserver; +#[derive(Clone, Copy, Debug, Default)] +pub struct NopObserver; #[async_trait] impl PersistCompletionObserver for NopObserver { diff --git a/ingester/src/persist/hot_partitions.rs b/ingester/src/persist/hot_partitions.rs index 06cc633c72..cfc5976cd6 100644 --- a/ingester/src/persist/hot_partitions.rs +++ b/ingester/src/persist/hot_partitions.rs @@ -109,7 +109,7 @@ mod tests { use parking_lot::Mutex; use crate::{ - persist::queue::mock::MockPersistQueue, + persist::{completion_observer::NopObserver, queue::mock::MockPersistQueue}, test_util::{PartitionDataBuilder, ARBITRARY_TABLE_NAME}, }; @@ -131,7 +131,7 @@ mod tests { let p = Arc::new(Mutex::new(p)); let metrics = metric::Registry::default(); - let persist_handle = Arc::new(MockPersistQueue::default()); + let persist_handle = Arc::new(MockPersistQueue::<NopObserver>::default()); let hot_partition_persister = HotPartitionPersister::new(Arc::clone(&persist_handle), max_cost, &metrics); diff --git a/ingester/src/persist/queue.rs b/ingester/src/persist/queue.rs index 2ad6d9af70..7e549cc212 100644 --- a/ingester/src/persist/queue.rs +++ b/ingester/src/persist/queue.rs @@ -46,14 +46,24 @@ where #[cfg(feature = "benches")] pub use mock::*; +/// This needs to be pub for the benchmarks but should not be used outside the crate. +#[cfg(feature = "benches")] +pub use crate::persist::completion_observer::*; + #[cfg(any(test, feature = "benches"))] pub(crate) mod mock { use std::{sync::Arc, time::Duration}; + use data_types::{ + ColumnId, ColumnSet, NamespaceId, ParquetFileParams, PartitionId, TableId, Timestamp, + }; use test_helpers::timeout::FutureTimeout; use tokio::task::JoinHandle; use super::*; + use crate::persist::completion_observer::{ + CompletedPersist, NopObserver, PersistCompletionObserver, + }; #[derive(Debug, Default)] struct State { @@ -74,11 +84,24 @@ pub(crate) mod mock { /// A mock [`PersistQueue`] implementation. #[derive(Debug, Default)] - pub struct MockPersistQueue { + pub struct MockPersistQueue<O = NopObserver> { state: Mutex<State>, + completion_observer: Arc<O>, } - impl MockPersistQueue { + impl<O> MockPersistQueue<O> + where + O: PersistCompletionObserver + 'static, + { + /// Creates a queue that notifies the [`PersistCompletionObserver`] + /// on persist enqueue completion. + pub fn new_with_observer(completion_observer: Arc<O>) -> Self { + Self { + state: Default::default(), + completion_observer, + } + } + /// Return all observed [`PartitionData`]. pub fn calls(&self) -> Vec<Arc<Mutex<PartitionData>>> { self.state.lock().calls.clone() @@ -97,7 +120,10 @@ pub(crate) mod mock { } #[async_trait] - impl PersistQueue for MockPersistQueue { + impl<O> PersistQueue for MockPersistQueue<O> + where + O: PersistCompletionObserver + 'static, + { #[allow(clippy::async_yields_async)] async fn enqueue( &self, @@ -109,6 +135,7 @@ pub(crate) mod mock { let mut guard = self.state.lock(); guard.calls.push(Arc::clone(&partition)); + let completion_observer = Arc::clone(&self.completion_observer); // Spawn a persist task that randomly completes (soon) in the // future. // @@ -118,7 +145,27 @@ pub(crate) mod mock { guard.handles.push(tokio::spawn(async move { let wait_ms: u64 = rand::random::<u64>() % 100; tokio::time::sleep(Duration::from_millis(wait_ms)).await; - partition.lock().mark_persisted(data); + let sequence_numbers = partition.lock().mark_persisted(data); + completion_observer + .persist_complete(Arc::new(CompletedPersist::new( + ParquetFileParams { + namespace_id: NamespaceId::new(1), + table_id: TableId::new(2), + partition_id: PartitionId::new(3), + partition_hash_id: None, + object_store_id: Default::default(), + min_time: Timestamp::new(42), + max_time: Timestamp::new(42), + file_size_bytes: 42424242, + row_count: 24, + compaction_level: data_types::CompactionLevel::Initial, + created_at: Timestamp::new(1234), + column_set: ColumnSet::new([1, 2, 3, 4].into_iter().map(ColumnId::new)), + max_l0_created_at: Timestamp::new(42), + }, + sequence_numbers, + ))) + .await; let _ = tx.send(()); })); diff --git a/ingester/src/wal/rotate_task.rs b/ingester/src/wal/rotate_task.rs index 5aad333d13..0afbb1fbf2 100644 --- a/ingester/src/wal/rotate_task.rs +++ b/ingester/src/wal/rotate_task.rs @@ -102,7 +102,7 @@ mod tests { use crate::{ buffer_tree::partition::{persisting::PersistingData, PartitionData}, dml_payload::IngestOp, - persist::queue::mock::MockPersistQueue, + persist::{completion_observer::NopObserver, queue::mock::MockPersistQueue}, test_util::{ make_write_op, new_persist_notification, PartitionDataBuilder, ARBITRARY_NAMESPACE_ID, ARBITRARY_PARTITION_ID, ARBITRARY_PARTITION_KEY, ARBITRARY_TABLE_ID, @@ -147,7 +147,7 @@ mod tests { // Initialise a mock persist queue to inspect the calls made to the // persist subsystem. - let persist_handle = Arc::new(MockPersistQueue::default()); + let persist_handle = Arc::new(MockPersistQueue::<NopObserver>::default()); // Initialise the WAL, write the operation to it let tmp_dir = tempdir().expect("no temp dir available"); @@ -176,7 +176,7 @@ mod tests { let (wal_reference_handle, wal_reference_actor) = WalReferenceHandle::new(Arc::clone(&wal), &metrics); let wal_reference_handle = Arc::new(wal_reference_handle); - let wal_reference_actor_task = tokio::spawn(wal_reference_actor.run()); + tokio::spawn(wal_reference_actor.run()); // Start the rotation task let rotate_task_handle = tokio::spawn(periodic_rotation( @@ -231,9 +231,8 @@ mod tests { .with_timeout_panic(Duration::from_secs(5)) .await; - // Stop the tasks and assert the state of the persist queue + // Stop the task and assert the state of the persist queue rotate_task_handle.abort(); - wal_reference_actor_task.abort(); assert_matches!(persist_handle.calls().as_slice(), [got] => { let guard = got.lock(); @@ -331,7 +330,7 @@ mod tests { let (wal_reference_handle, wal_reference_actor) = WalReferenceHandle::new(Arc::clone(&wal), &metrics); let wal_reference_handle = Arc::new(wal_reference_handle); - let wal_reference_actor_task = tokio::spawn(wal_reference_actor.run()); + tokio::spawn(wal_reference_actor.run()); // Start the rotation task let rotate_task_handle = tokio::spawn(periodic_rotation( @@ -417,9 +416,8 @@ mod tests { .with_timeout_panic(Duration::from_secs(5)) .await; - // Stop the workers and assert the state of the persist queue. + // Stop the worker and assert the state of the persist queue. rotate_task_handle.abort(); - wal_reference_actor_task.abort(); let calls = persist_handle.calls.lock().clone(); assert_matches!(calls.as_slice(), [got1, got2] => {
9350b643140309898d8fa313de498264c20a81e4
Carol (Nichols || Goulding)
2023-04-14 17:02:52
Add a CompactionType in compactor2::config as well as clap blocks
It's a little weird to have such similar types and have to convert them, but doing this prevents too many crates from having to depend on/know about each other.
null
feat: Add a CompactionType in compactor2::config as well as clap blocks It's a little weird to have such similar types and have to convert them, but doing this prevents too many crates from having to depend on/know about each other.
diff --git a/compactor2/src/components/report.rs b/compactor2/src/components/report.rs index 550783153d..83be540b09 100644 --- a/compactor2/src/components/report.rs +++ b/compactor2/src/components/report.rs @@ -10,6 +10,7 @@ use super::Components; pub fn log_config(config: &Config) { // use struct unpack so we don't forget any members let Config { + compaction_type, shard_id, // no need to print the internal state of the registry metric_registry: _, @@ -57,6 +58,7 @@ pub fn log_config(config: &Config) { let commit_wrapper = commit_wrapper.as_ref().map(|_| "Some").unwrap_or("None"); info!( + ?compaction_type, shard_id=shard_id.get(), %catalog, %parquet_store_real, diff --git a/compactor2/src/config.rs b/compactor2/src/config.rs index a375d1f885..cd87b5baba 100644 --- a/compactor2/src/config.rs +++ b/compactor2/src/config.rs @@ -19,6 +19,9 @@ const MIN_COMPACT_SIZE_MULTIPLE: usize = 3; /// Config to set up a compactor. #[derive(Debug, Clone)] pub struct Config { + /// Compaction type. + pub compaction_type: CompactionType, + /// Shard Id pub shard_id: ShardId, @@ -207,6 +210,17 @@ pub struct ShardConfig { pub shard_id: usize, } +/// Compaction type. +#[derive(Debug, Default, Clone, Copy, PartialEq)] +pub enum CompactionType { + /// Compacts recent writes as they come in. + #[default] + Hot, + + /// Compacts partitions that have not been written to very recently for longer-term storage. + Cold, +} + /// Partitions source config. #[derive(Debug, Clone, PartialEq)] pub enum PartitionsSourceConfig { diff --git a/compactor2_test_utils/src/lib.rs b/compactor2_test_utils/src/lib.rs index bf2cdfc633..a896d47030 100644 --- a/compactor2_test_utils/src/lib.rs +++ b/compactor2_test_utils/src/lib.rs @@ -121,6 +121,7 @@ impl TestSetupBuilder<false> { .with_invariant_check(Arc::clone(&invariant_check) as _); let config = Config { + compaction_type: Default::default(), shard_id: shard.shard.id, metric_registry: catalog.metric_registry(), catalog: catalog.catalog(), diff --git a/ioxd_compactor2/src/lib.rs b/ioxd_compactor2/src/lib.rs index c717f6e191..e2439c8cab 100644 --- a/ioxd_compactor2/src/lib.rs +++ b/ioxd_compactor2/src/lib.rs @@ -164,6 +164,16 @@ pub async fn create_compactor2_server_type( compactor_config.compaction_cold_partition_minute_threshold, ); + // This is annoying to have two types that are so similar and have to convert between them, but + // this way compactor2 doesn't have to know about clap_blocks and vice versa. It would also + // be nice to have this as a `From` trait implementation, but this crate isn't allowed because + // neither type is defined in ioxd_compactor. This feels like the right place to do the + // conversion, though. + let compaction_type = match compactor_config.compaction_type { + CompactionType::Hot => compactor2::config::CompactionType::Hot, + CompactionType::Cold => compactor2::config::CompactionType::Cold, + }; + let shard_id = Config::fetch_shard_id( Arc::clone(&catalog), backoff_config.clone(), @@ -172,6 +182,7 @@ pub async fn create_compactor2_server_type( ) .await; let compactor = Compactor2::start(Config { + compaction_type, shard_id, metric_registry: Arc::clone(&metric_registry), catalog,
e1036a0c638018bf575374d3e0d3887c0b7122ce
Raphael Taylor-Davies
2023-01-06 10:57:39
cleanup schema boxing (#6511)
* refactor: cleanup Schema boxing * chore: clippy
null
refactor: cleanup schema boxing (#6511) * refactor: cleanup Schema boxing * chore: clippy
diff --git a/compactor/src/parquet_file_combining.rs b/compactor/src/parquet_file_combining.rs index 88a60e82c0..7e9736aeac 100644 --- a/compactor/src/parquet_file_combining.rs +++ b/compactor/src/parquet_file_combining.rs @@ -356,7 +356,7 @@ impl CompactPlanBuilder { ReorgPlanner::new(ctx.child_ctx("ReorgPlanner")) .compact_plan( Arc::from(partition.table.name.clone()), - Arc::clone(&merged_schema), + &merged_schema, query_chunks, sort_key.clone(), ) @@ -384,7 +384,7 @@ impl CompactPlanBuilder { ReorgPlanner::new(ctx.child_ctx("ReorgPlanner")) .compact_plan( Arc::from(partition.table.name.clone()), - Arc::clone(&merged_schema), + &merged_schema, query_chunks, sort_key.clone(), ) @@ -394,7 +394,7 @@ impl CompactPlanBuilder { ReorgPlanner::new(ctx.child_ctx("ReorgPlanner")) .split_plan( Arc::from(partition.table.name.clone()), - Arc::clone(&merged_schema), + &merged_schema, query_chunks, sort_key.clone(), split_times, @@ -537,7 +537,7 @@ impl CompactPlanBuilder { let plan = ReorgPlanner::new(ctx.child_ctx("ReorgPlanner")) .compact_plan( Arc::from(partition.table.name.clone()), - Arc::clone(&merged_schema), + &merged_schema, query_chunks, sort_key.clone(), ) @@ -769,7 +769,7 @@ fn to_queryable_parquet_chunk( .map(|sk| sk.filter_to(&pk, file.partition_id().get())); let file = Arc::new(ParquetFile::from(file)); - let parquet_chunk = ParquetChunk::new(Arc::clone(&file), Arc::new(schema), store); + let parquet_chunk = ParquetChunk::new(Arc::clone(&file), schema, store); trace!( parquet_file_id=?file.id, diff --git a/compactor/src/query.rs b/compactor/src/query.rs index 27ce7cffb3..c34c4abcab 100644 --- a/compactor/src/query.rs +++ b/compactor/src/query.rs @@ -60,7 +60,7 @@ impl QueryableParquetChunk { let delete_predicates = tombstones_to_delete_predicates(deletes); let summary = Arc::new(create_basic_summary( data.rows() as u64, - &data.schema(), + data.schema(), data.timestamp_min_max(), )); Self { @@ -79,10 +79,10 @@ impl QueryableParquetChunk { } /// Merge schema of the given chunks - pub fn merge_schemas(chunks: &[Arc<dyn QueryChunk>]) -> Arc<Schema> { + pub fn merge_schemas(chunks: &[Arc<dyn QueryChunk>]) -> Schema { let mut merger = SchemaMerger::new(); for chunk in chunks { - merger = merger.merge(&chunk.schema()).expect("schemas compatible"); + merger = merger.merge(chunk.schema()).expect("schemas compatible"); } merger.build() } @@ -113,7 +113,7 @@ impl QueryChunkMeta for QueryableParquetChunk { Arc::clone(&self.summary) } - fn schema(&self) -> Arc<Schema> { + fn schema(&self) -> &Schema { self.data.schema() } @@ -259,7 +259,7 @@ mod tests { let parquet_chunk = Arc::new(ParquetChunk::new( Arc::clone(&parquet_file), - Arc::new(table.schema().await), + table.schema().await, ParquetStorage::new(Arc::clone(&catalog.object_store), StorageId::from("iox")), )); diff --git a/compactor/src/utils.rs b/compactor/src/utils.rs index 79aca26dbe..8cba34b932 100644 --- a/compactor/src/utils.rs +++ b/compactor/src/utils.rs @@ -176,7 +176,7 @@ impl ParquetFileWithTombstone { .as_ref() .map(|sk| sk.filter_to(&pk, self.partition_id.get())); - let parquet_chunk = ParquetChunk::new(Arc::clone(&self.data), Arc::new(schema), store); + let parquet_chunk = ParquetChunk::new(Arc::clone(&self.data), schema, store); trace!( parquet_file_id=?self.id, diff --git a/datafusion_util/src/lib.rs b/datafusion_util/src/lib.rs index f89bb4bcc5..3961a93864 100644 --- a/datafusion_util/src/lib.rs +++ b/datafusion_util/src/lib.rs @@ -227,8 +227,8 @@ where fn poll_next( mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll<Option<Self::Item>> { + cx: &mut Context<'_>, + ) -> Poll<Option<Self::Item>> { self.inner.poll_next_unpin(cx) } } @@ -243,13 +243,13 @@ where } /// Create a SendableRecordBatchStream a RecordBatch -pub fn stream_from_batch(schema: Arc<Schema>, batch: RecordBatch) -> SendableRecordBatchStream { +pub fn stream_from_batch(schema: SchemaRef, batch: RecordBatch) -> SendableRecordBatchStream { stream_from_batches(schema, vec![Arc::new(batch)]) } /// Create a SendableRecordBatchStream from Vec of RecordBatches with the same schema pub fn stream_from_batches( - schema: Arc<Schema>, + schema: SchemaRef, batches: Vec<Arc<RecordBatch>>, ) -> SendableRecordBatchStream { if batches.is_empty() { diff --git a/ingester/src/compact.rs b/ingester/src/compact.rs index 0813cdb393..62b10bffea 100644 --- a/ingester/src/compact.rs +++ b/ingester/src/compact.rs @@ -109,7 +109,7 @@ pub(crate) async fn compact_persisting_batch( } None => { let sort_key = compute_sort_key( - batch.schema().as_ref(), + batch.schema(), batch.record_batches().iter().map(|sb| sb.as_ref()), ); // Use the sort key computed from the cardinality as the sort key for this parquet @@ -141,7 +141,7 @@ pub(crate) async fn compact( .compact_plan( table_name.into(), data.schema(), - [data as Arc<dyn QueryChunk>], + [Arc::clone(&data) as Arc<dyn QueryChunk>], sort_key, ) .context(LogicalPlanSnafu {})?; @@ -525,7 +525,7 @@ mod tests { assert_eq!(expected_pk, pk); let sort_key = - compute_sort_key(&schema, batch.record_batches().iter().map(|rb| rb.as_ref())); + compute_sort_key(schema, batch.record_batches().iter().map(|rb| rb.as_ref())); assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"])); // compact @@ -566,7 +566,7 @@ mod tests { assert_eq!(expected_pk, pk); let sort_key = - compute_sort_key(&schema, batch.record_batches().iter().map(|rb| rb.as_ref())); + compute_sort_key(schema, batch.record_batches().iter().map(|rb| rb.as_ref())); assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"])); // compact @@ -612,7 +612,7 @@ mod tests { assert_eq!(expected_pk, pk); let sort_key = - compute_sort_key(&schema, batch.record_batches().iter().map(|rb| rb.as_ref())); + compute_sort_key(schema, batch.record_batches().iter().map(|rb| rb.as_ref())); assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"])); // compact @@ -658,7 +658,7 @@ mod tests { assert_eq!(expected_pk, pk); let sort_key = - compute_sort_key(&schema, batch.record_batches().iter().map(|rb| rb.as_ref())); + compute_sort_key(schema, batch.record_batches().iter().map(|rb| rb.as_ref())); assert_eq!(sort_key, SortKey::from_columns(["tag1", "tag2", "time"])); // compact @@ -708,7 +708,7 @@ mod tests { assert_eq!(expected_pk, pk); let sort_key = - compute_sort_key(&schema, batch.record_batches().iter().map(|rb| rb.as_ref())); + compute_sort_key(schema, batch.record_batches().iter().map(|rb| rb.as_ref())); assert_eq!(sort_key, SortKey::from_columns(["tag1", "tag2", "time"])); // compact diff --git a/ingester/src/query_adaptor.rs b/ingester/src/query_adaptor.rs index a70eda8ad9..babb281aab 100644 --- a/ingester/src/query_adaptor.rs +++ b/ingester/src/query_adaptor.rs @@ -60,7 +60,7 @@ pub(crate) struct QueryAdaptor { id: ChunkId, /// An interned schema for all [`RecordBatch`] in data. - schema: OnceCell<Arc<Schema>>, + schema: Schema, /// An interned table summary. summary: OnceCell<Arc<TableSummary>>, @@ -80,13 +80,14 @@ impl QueryAdaptor { // partitions - if there is a QueryAdaptor, it contains data. assert!(data.iter().map(|b| b.num_rows()).sum::<usize>() > 0); + let schema = merge_record_batch_schemas(&data); Self { + schema, data, partition_id, // To return a value for debugging and make it consistent with ChunkId created in Compactor, // use Uuid for this. Draw this UUID during chunk generation so that it is stable during the whole query process. id: ChunkId::new(), - schema: OnceCell::default(), summary: OnceCell::default(), } } @@ -137,17 +138,14 @@ impl QueryChunkMeta for QueryAdaptor { Arc::new(create_basic_summary( self.data.iter().map(|b| b.num_rows()).sum::<usize>() as u64, - &self.schema(), + self.schema(), ts_min_max, )) })) } - fn schema(&self) -> Arc<Schema> { - Arc::clone( - self.schema - .get_or_init(|| merge_record_batch_schemas(&self.data)), - ) + fn schema(&self) -> &Schema { + &self.schema } fn partition_sort_key(&self) -> Option<&SortKey> { diff --git a/ingester2/src/persist/compact.rs b/ingester2/src/persist/compact.rs index 2910aa2aad..84fabfcd55 100644 --- a/ingester2/src/persist/compact.rs +++ b/ingester2/src/persist/compact.rs @@ -68,7 +68,7 @@ pub(super) async fn compact_persisting_batch( } None => { let sort_key = compute_sort_key( - batch.schema().as_ref(), + batch.schema(), batch.record_batches().iter().map(|sb| sb.as_ref()), ); // Use the sort key computed from the cardinality as the sort key for this parquet @@ -85,7 +85,7 @@ pub(super) async fn compact_persisting_batch( .compact_plan( table_name.into(), batch.schema(), - [batch as Arc<dyn QueryChunk>], + [Arc::clone(&batch) as Arc<dyn QueryChunk>], data_sort_key.clone(), ) .unwrap(); @@ -460,7 +460,7 @@ mod tests { assert_eq!(expected_pk, pk); let sort_key = - compute_sort_key(&schema, batch.record_batches().iter().map(|rb| rb.as_ref())); + compute_sort_key(schema, batch.record_batches().iter().map(|rb| rb.as_ref())); assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"])); // compact @@ -501,7 +501,7 @@ mod tests { assert_eq!(expected_pk, pk); let sort_key = - compute_sort_key(&schema, batch.record_batches().iter().map(|rb| rb.as_ref())); + compute_sort_key(schema, batch.record_batches().iter().map(|rb| rb.as_ref())); assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"])); // compact @@ -547,7 +547,7 @@ mod tests { assert_eq!(expected_pk, pk); let sort_key = - compute_sort_key(&schema, batch.record_batches().iter().map(|rb| rb.as_ref())); + compute_sort_key(schema, batch.record_batches().iter().map(|rb| rb.as_ref())); assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"])); // compact @@ -594,7 +594,7 @@ mod tests { assert_eq!(expected_pk, pk); let sort_key = - compute_sort_key(&schema, batch.record_batches().iter().map(|rb| rb.as_ref())); + compute_sort_key(schema, batch.record_batches().iter().map(|rb| rb.as_ref())); assert_eq!(sort_key, SortKey::from_columns(["tag1", "tag2", "time"])); // compact @@ -645,7 +645,7 @@ mod tests { assert_eq!(expected_pk, pk); let sort_key = - compute_sort_key(&schema, batch.record_batches().iter().map(|rb| rb.as_ref())); + compute_sort_key(schema, batch.record_batches().iter().map(|rb| rb.as_ref())); assert_eq!(sort_key, SortKey::from_columns(["tag1", "tag2", "time"])); // compact diff --git a/ingester2/src/persist/handle.rs b/ingester2/src/persist/handle.rs index 0d3a59489b..1c9a5a1271 100644 --- a/ingester2/src/persist/handle.rs +++ b/ingester2/src/persist/handle.rs @@ -339,7 +339,7 @@ impl PersistQueue for PersistHandle { }; // Build the persist task request. - let schema = data.schema(); + let schema = data.schema().clone(); let (r, notify) = PersistRequest::new(Arc::clone(&partition), data, permit, enqueued_at); match sort_key { diff --git a/ingester2/src/query_adaptor.rs b/ingester2/src/query_adaptor.rs index 6188bef3b9..38e1645391 100644 --- a/ingester2/src/query_adaptor.rs +++ b/ingester2/src/query_adaptor.rs @@ -39,7 +39,7 @@ pub struct QueryAdaptor { id: ChunkId, /// An interned schema for all [`RecordBatch`] in data. - schema: OnceCell<Arc<Schema>>, + schema: Schema, /// An interned table summary. summary: OnceCell<Arc<TableSummary>>, @@ -59,13 +59,14 @@ impl QueryAdaptor { // partitions - if there is a QueryAdaptor, it contains data. assert!(data.iter().map(|b| b.num_rows()).sum::<usize>() > 0); + let schema = merge_record_batch_schemas(&data); Self { data, partition_id, // To return a value for debugging and make it consistent with ChunkId created in Compactor, // use Uuid for this. Draw this UUID during chunk generation so that it is stable during the whole query process. id: ChunkId::new(), - schema: OnceCell::default(), + schema, summary: OnceCell::default(), } } @@ -116,17 +117,14 @@ impl QueryChunkMeta for QueryAdaptor { Arc::new(create_basic_summary( self.data.iter().map(|b| b.num_rows()).sum::<usize>() as u64, - &self.schema(), + self.schema(), ts_min_max, )) })) } - fn schema(&self) -> Arc<Schema> { - Arc::clone( - self.schema - .get_or_init(|| merge_record_batch_schemas(&self.data)), - ) + fn schema(&self) -> &Schema { + &self.schema } fn partition_sort_key(&self) -> Option<&SortKey> { diff --git a/iox_arrow_flight/src/client.rs b/iox_arrow_flight/src/client.rs index 51a9a7ad67..03ff39af0a 100644 --- a/iox_arrow_flight/src/client.rs +++ b/iox_arrow_flight/src/client.rs @@ -1,3 +1,4 @@ +use arrow::datatypes::SchemaRef; /// Prototype "Flight Client" that handles underlying details of the flight protocol at a higher level /// Based on the "low level client" from IOx client: @@ -412,7 +413,7 @@ impl futures::Stream for FlightDataStream { /// streaming flight response. #[derive(Debug)] struct FlightStreamState { - schema: Arc<Schema>, + schema: SchemaRef, dictionaries_by_field: HashMap<i64, ArrayRef>, } @@ -431,7 +432,7 @@ impl DecodedFlightData { } } - pub fn new_schema(inner: FlightData, schema: Arc<Schema>) -> Self { + pub fn new_schema(inner: FlightData, schema: SchemaRef) -> Self { Self { inner, payload: DecodedPayload::Schema(schema), @@ -458,7 +459,7 @@ pub enum DecodedPayload { None, /// A decoded Schema message - Schema(Arc<Schema>), + Schema(SchemaRef), /// A decoded Record batch. RecordBatch(RecordBatch), diff --git a/iox_query/src/frontend.rs b/iox_query/src/frontend.rs index a1185aa016..a12c80bfd5 100644 --- a/iox_query/src/frontend.rs +++ b/iox_query/src/frontend.rs @@ -66,7 +66,7 @@ mod test { let ctx = IOxSessionContext::with_testing(); // Build a logical plan with deduplication - let scan_plan = ScanPlanBuilder::new(Arc::from("t"), schema, ctx.child_ctx("scan_plan")) + let scan_plan = ScanPlanBuilder::new(Arc::from("t"), &schema, ctx.child_ctx("scan_plan")) .with_chunks(chunks) .build() .unwrap(); @@ -114,7 +114,7 @@ mod test { let ctx = IOxSessionContext::with_testing(); // Build a logical plan without deduplication - let scan_plan = ScanPlanBuilder::new(Arc::from("t"), schema, ctx.child_ctx("scan_plan")) + let scan_plan = ScanPlanBuilder::new(Arc::from("t"), &schema, ctx.child_ctx("scan_plan")) .with_chunks(chunks) // force it to not deduplicate .enable_deduplication(false) @@ -178,7 +178,7 @@ mod test { let ctx = IOxSessionContext::with_testing(); // Build a logical plan without deduplication but sort - let scan_plan = ScanPlanBuilder::new(Arc::from("t"), schema, ctx.child_ctx("scan_plan")) + let scan_plan = ScanPlanBuilder::new(Arc::from("t"), &schema, ctx.child_ctx("scan_plan")) .with_chunks(chunks) // force it to not deduplicate .enable_deduplication(false) @@ -230,7 +230,7 @@ mod test { // Use a split plan as it has StreamSplitExec, DeduplicateExec and IOxReadFilternode let split_plan = ReorgPlanner::new(IOxSessionContext::with_testing()) - .split_plan(Arc::from("t"), schema, chunks, sort_key, vec![1000]) + .split_plan(Arc::from("t"), &schema, chunks, sort_key, vec![1000]) .expect("created compact plan"); let executor = Executor::new_testing(); @@ -361,7 +361,7 @@ mod test { extractor.inner } - fn test_chunks(overlapped: bool) -> (Arc<Schema>, Vec<Arc<dyn QueryChunk>>) { + fn test_chunks(overlapped: bool) -> (Schema, Vec<Arc<dyn QueryChunk>>) { let max_time = if overlapped { 70000 } else { 7000 }; let chunk1 = Arc::new( TestChunk::new("t") @@ -385,20 +385,20 @@ mod test { ); let schema = SchemaMerger::new() - .merge(&chunk1.schema()) + .merge(chunk1.schema()) .unwrap() - .merge(&chunk2.schema()) + .merge(chunk2.schema()) .unwrap() .build(); (schema, vec![chunk1, chunk2]) } - fn get_test_chunks() -> (Arc<Schema>, Vec<Arc<dyn QueryChunk>>) { + fn get_test_chunks() -> (Schema, Vec<Arc<dyn QueryChunk>>) { test_chunks(false) } - fn get_test_overlapped_chunks() -> (Arc<Schema>, Vec<Arc<dyn QueryChunk>>) { + fn get_test_overlapped_chunks() -> (Schema, Vec<Arc<dyn QueryChunk>>) { test_chunks(true) } } diff --git a/iox_query/src/frontend/common.rs b/iox_query/src/frontend/common.rs index 7074054228..f870b23304 100644 --- a/iox_query/src/frontend/common.rs +++ b/iox_query/src/frontend/common.rs @@ -63,7 +63,7 @@ impl std::fmt::Debug for ScanPlan { impl ScanPlan { /// Return the schema of the source (the merged schema across all tables) - pub fn schema(&self) -> Arc<Schema> { + pub fn schema(&self) -> &Schema { self.provider.iox_schema() } } @@ -90,7 +90,7 @@ pub struct ScanPlanBuilder<'a> { table_name: Arc<str>, /// The schema of the resulting table (any chunks that don't have /// all the necessary columns will be extended appropriately) - table_schema: Arc<Schema>, + table_schema: &'a Schema, chunks: Vec<Arc<dyn QueryChunk>>, /// The sort key that describes the desired output sort order output_sort_key: Option<SortKey>, @@ -100,7 +100,7 @@ pub struct ScanPlanBuilder<'a> { } impl<'a> ScanPlanBuilder<'a> { - pub fn new(table_name: Arc<str>, table_schema: Arc<Schema>, ctx: IOxSessionContext) -> Self { + pub fn new(table_name: Arc<str>, table_schema: &'a Schema, ctx: IOxSessionContext) -> Self { Self { ctx, table_name, @@ -158,7 +158,7 @@ impl<'a> ScanPlanBuilder<'a> { // Prepare the plan for the table let mut builder = ProviderBuilder::new( Arc::clone(&table_name), - table_schema, + table_schema.clone(), ctx.child_ctx("provider_builder"), ) .with_enable_deduplication(deduplication); @@ -193,7 +193,7 @@ impl<'a> ScanPlanBuilder<'a> { // Rewrite expression so it only refers to columns in this chunk let schema = provider.iox_schema(); trace!(%table_name, ?filter_expr, "Adding filter expr"); - let mut rewriter = MissingColumnsToNull::new(&schema); + let mut rewriter = MissingColumnsToNull::new(schema); let filter_expr = filter_expr .rewrite(&mut rewriter) diff --git a/iox_query/src/frontend/influxrpc.rs b/iox_query/src/frontend/influxrpc.rs index 33ad3d715b..35160b208c 100644 --- a/iox_query/src/frontend/influxrpc.rs +++ b/iox_query/src/frontend/influxrpc.rs @@ -309,7 +309,7 @@ impl InfluxRpcPlanner { let plan = Self::table_name_plan( ctx, Arc::clone(table_name), - schema, + &schema, predicate, chunks, )?; @@ -472,7 +472,7 @@ impl InfluxRpcPlanner { let mut ctx = ctx.child_ctx("tag_keys_plan"); ctx.set_metadata("table", table_name.to_string()); - let plan = self.tag_keys_plan(ctx, table_name, schema, predicate, chunks_full)?; + let plan = self.tag_keys_plan(ctx, table_name, &schema, predicate, chunks_full)?; if let Some(plan) = plan { builder = builder.append_other(plan) @@ -641,7 +641,7 @@ impl InfluxRpcPlanner { let mut ctx = ctx.child_ctx("scan_and_filter planning"); ctx.set_metadata("table", table_name.to_string()); - let scan_and_filter = ScanPlanBuilder::new(Arc::clone(table_name), schema, ctx) + let scan_and_filter = ScanPlanBuilder::new(Arc::clone(table_name), &schema, ctx) .with_chunks(chunks_full) .with_predicate(predicate) .build()?; @@ -884,7 +884,7 @@ impl InfluxRpcPlanner { Aggregate::None => Self::read_filter_plan( ctx.child_ctx("read_filter plan"), table_name, - Arc::clone(&schema), + schema, predicate, chunks, ), @@ -963,7 +963,7 @@ impl InfluxRpcPlanner { &self, ctx: IOxSessionContext, table_name: &str, - schema: Arc<Schema>, + schema: &Schema, predicate: &Predicate, chunks: Vec<Arc<dyn QueryChunk>>, ) -> Result<Option<StringSetPlan>> { @@ -1029,7 +1029,7 @@ impl InfluxRpcPlanner { fn field_columns_plan( ctx: IOxSessionContext, table_name: Arc<str>, - schema: Arc<Schema>, + schema: &Schema, predicate: &Predicate, chunks: Vec<Arc<dyn QueryChunk>>, ) -> Result<LogicalPlan> { @@ -1085,7 +1085,7 @@ impl InfluxRpcPlanner { fn table_name_plan( ctx: IOxSessionContext, table_name: Arc<str>, - schema: Arc<Schema>, + schema: &Schema, predicate: &Predicate, chunks: Vec<Arc<dyn QueryChunk>>, ) -> Result<LogicalPlan> { @@ -1100,7 +1100,7 @@ impl InfluxRpcPlanner { .build()?; // Select only fields requested - let select_exprs: Vec<_> = filtered_fields_iter(&scan_and_filter.schema(), predicate) + let select_exprs: Vec<_> = filtered_fields_iter(scan_and_filter.schema(), predicate) .map(|field| field.name.as_expr()) .collect(); @@ -1130,7 +1130,7 @@ impl InfluxRpcPlanner { fn read_filter_plan( ctx: IOxSessionContext, table_name: &str, - schema: Arc<Schema>, + schema: &Schema, predicate: &Predicate, chunks: Vec<Arc<dyn QueryChunk>>, ) -> Result<SeriesSetPlan> { @@ -1143,7 +1143,7 @@ impl InfluxRpcPlanner { .with_chunks(chunks) .build()?; - let schema = scan_and_filter.schema(); + let schema = scan_and_filter.provider.iox_schema(); let tags_and_timestamp: Vec<_> = scan_and_filter .schema() @@ -1164,7 +1164,7 @@ impl InfluxRpcPlanner { let tags_fields_and_timestamps: Vec<Expr> = schema .tags_iter() .map(|field| field.name().as_expr()) - .chain(filtered_fields_iter(&schema, predicate).map(|f| f.expr)) + .chain(filtered_fields_iter(schema, predicate).map(|f| f.expr)) .chain(schema.time_iter().map(|field| field.name().as_expr())) .collect(); @@ -1179,7 +1179,7 @@ impl InfluxRpcPlanner { .map(|field| Arc::from(field.name().as_str())) .collect(); - let field_columns = filtered_fields_iter(&schema, predicate) + let field_columns = filtered_fields_iter(schema, predicate) .map(|field| Arc::from(field.name)) .collect(); @@ -1238,7 +1238,7 @@ impl InfluxRpcPlanner { fn read_group_plan( ctx: IOxSessionContext, table_name: &str, - schema: Arc<Schema>, + schema: &Schema, predicate: &Predicate, agg: Aggregate, chunks: Vec<Arc<dyn QueryChunk>>, @@ -1255,7 +1255,7 @@ impl InfluxRpcPlanner { // order the tag columns so that the group keys come first (we // will group and // order in the same order) - let schema = scan_and_filter.schema(); + let schema = scan_and_filter.provider.iox_schema(); let tag_columns: Vec<_> = schema.tags_iter().map(|f| f.name() as &str).collect(); // Group by all tag columns @@ -1267,7 +1267,7 @@ impl InfluxRpcPlanner { let AggExprs { agg_exprs, field_columns, - } = AggExprs::try_new_for_read_group(agg, &schema, predicate)?; + } = AggExprs::try_new_for_read_group(agg, schema, predicate)?; let plan_builder = scan_and_filter .plan_builder @@ -1347,7 +1347,7 @@ impl InfluxRpcPlanner { fn read_window_aggregate_plan( ctx: IOxSessionContext, table_name: &str, - schema: Arc<Schema>, + schema: &Schema, predicate: &Predicate, agg: Aggregate, every: WindowDuration, @@ -1363,7 +1363,7 @@ impl InfluxRpcPlanner { .with_chunks(chunks) .build()?; - let schema = scan_and_filter.schema(); + let schema = scan_and_filter.provider.iox_schema(); // Group by all tag columns and the window bounds let window_bound = make_window_bound_expr(TIME_COLUMN_NAME.as_expr(), every, offset) @@ -1378,7 +1378,7 @@ impl InfluxRpcPlanner { let AggExprs { agg_exprs, field_columns, - } = AggExprs::try_new_for_read_window_aggregate(agg, &schema, predicate)?; + } = AggExprs::try_new_for_read_window_aggregate(agg, schema, predicate)?; // sort by the group by expressions as well let sort_exprs = group_exprs @@ -1436,7 +1436,7 @@ fn table_chunk_stream<'a>( let table_schema = namespace.table_schema(table_name); let projection = match table_schema { Some(table_schema) => { - columns_in_predicates(need_fields, table_schema, table_name, predicate) + columns_in_predicates(need_fields, &table_schema, table_name, predicate) } None => None, }; @@ -1470,7 +1470,7 @@ fn table_chunk_stream<'a>( // in the predicate. fn columns_in_predicates( need_fields: bool, - table_schema: Arc<Schema>, + table_schema: &Schema, table_name: &str, predicate: &Predicate, ) -> Option<Vec<usize>> { @@ -1560,7 +1560,7 @@ where &'a str, &'a Predicate, Vec<Arc<dyn QueryChunk>>, - Arc<Schema>, + &'a Schema, ) -> Result<P> + Clone + Send @@ -1592,7 +1592,7 @@ where table_name: table_name.as_ref(), })?; - f(&ctx, table_name, predicate, chunks, schema) + f(&ctx, table_name, predicate, chunks, &schema) } }) .try_collect() @@ -2004,25 +2004,24 @@ mod tests { // test 1: empty predicate without need_fields let predicate = Predicate::new(); let need_fields = false; - let projection = columns_in_predicates(need_fields, Arc::clone(&schema), table, &predicate); + let projection = columns_in_predicates(need_fields, &schema, table, &predicate); assert_eq!(projection, None); // test 2: empty predicate with need_fields let need_fields = true; - let projection = columns_in_predicates(need_fields, Arc::clone(&schema), table, &predicate); + let projection = columns_in_predicates(need_fields, &schema, table, &predicate); assert_eq!(projection, None); // test 3: predicate on tag without need_fields let predicate = Predicate::new().with_expr(col("foo").eq(lit("some_thing"))); let need_fields = false; - let projection = - columns_in_predicates(need_fields, Arc::clone(&schema), table, &predicate).unwrap(); + let projection = columns_in_predicates(need_fields, &schema, table, &predicate).unwrap(); // return index of foo assert_eq!(projection, vec![1]); // test 4: predicate on tag with need_fields let need_fields = true; - let projection = columns_in_predicates(need_fields, Arc::clone(&schema), table, &predicate); + let projection = columns_in_predicates(need_fields, &schema, table, &predicate); // return None means all fields assert_eq!(projection, None); @@ -2032,7 +2031,7 @@ mod tests { .with_field_columns(vec!["i64_field".to_string()]); let need_fields = false; let mut projection = - columns_in_predicates(need_fields, Arc::clone(&schema), table, &predicate).unwrap(); + columns_in_predicates(need_fields, &schema, table, &predicate).unwrap(); projection.sort(); // return indexes of i64_field and foo assert_eq!(projection, vec![1, 2]); @@ -2040,7 +2039,7 @@ mod tests { // test 6: predicate on tag with field_columns with need_fields let need_fields = true; let mut projection = - columns_in_predicates(need_fields, Arc::clone(&schema), table, &predicate).unwrap(); + columns_in_predicates(need_fields, &schema, table, &predicate).unwrap(); projection.sort(); // return indexes of foo and index of i64_field assert_eq!(projection, vec![1, 2]); @@ -2051,7 +2050,7 @@ mod tests { .with_field_columns(vec!["i64_field".to_string()]); let need_fields = false; let mut projection = - columns_in_predicates(need_fields, Arc::clone(&schema), table, &predicate).unwrap(); + columns_in_predicates(need_fields, &schema, table, &predicate).unwrap(); projection.sort(); // return indexes of bard and i64_field assert_eq!(projection, vec![0, 2]); @@ -2059,7 +2058,7 @@ mod tests { // test 7: predicate on tag and field with field_columns with need_fields let need_fields = true; let mut projection = - columns_in_predicates(need_fields, Arc::clone(&schema), table, &predicate).unwrap(); + columns_in_predicates(need_fields, &schema, table, &predicate).unwrap(); projection.sort(); // return indexes of bard and i64_field assert_eq!(projection, vec![0, 2]); diff --git a/iox_query/src/frontend/reorg.rs b/iox_query/src/frontend/reorg.rs index dcb3860140..b7686f0192 100644 --- a/iox_query/src/frontend/reorg.rs +++ b/iox_query/src/frontend/reorg.rs @@ -75,7 +75,7 @@ impl ReorgPlanner { pub fn compact_plan<I>( &self, table_name: Arc<str>, - schema: Arc<Schema>, + schema: &Schema, chunks: I, output_sort_key: SortKey, ) -> Result<LogicalPlan> @@ -150,7 +150,7 @@ impl ReorgPlanner { pub fn split_plan<I>( &self, table_name: Arc<str>, - schema: Arc<Schema>, + schema: &Schema, chunks: I, output_sort_key: SortKey, split_times: Vec<i64>, @@ -214,7 +214,7 @@ mod test { use super::*; - async fn get_test_chunks() -> (Arc<Schema>, Vec<Arc<dyn QueryChunk>>) { + async fn get_test_chunks() -> (Schema, Vec<Arc<dyn QueryChunk>>) { // Chunk 1 with 5 rows of data on 2 tags let chunk1 = Arc::new( TestChunk::new("t") @@ -261,16 +261,16 @@ mod test { assert_batches_eq!(&expected, &raw_data(&[Arc::clone(&chunk2)]).await); let schema = SchemaMerger::new() - .merge(&chunk1.schema()) + .merge(chunk1.schema()) .unwrap() - .merge(&chunk2.schema()) + .merge(chunk2.schema()) .unwrap() .build(); (schema, vec![chunk1, chunk2]) } - async fn get_sorted_test_chunks() -> (Arc<Schema>, Vec<Arc<dyn QueryChunk>>) { + async fn get_sorted_test_chunks() -> (Schema, Vec<Arc<dyn QueryChunk>>) { // Chunk 1 let chunk1 = Arc::new( TestChunk::new("t") @@ -307,7 +307,7 @@ mod test { ]; assert_batches_eq!(&expected, &raw_data(&[Arc::clone(&chunk2)]).await); - (chunk1.schema(), vec![chunk1, chunk2]) + (chunk1.schema().clone(), vec![chunk1, chunk2]) } #[tokio::test] @@ -333,7 +333,7 @@ mod test { .build(); let compact_plan = ReorgPlanner::new(IOxSessionContext::with_testing()) - .compact_plan(Arc::from("t"), Arc::clone(&schema), chunks, sort_key) + .compact_plan(Arc::from("t"), &schema, chunks, sort_key) .expect("created compact plan"); let physical_plan = executor @@ -370,7 +370,7 @@ mod test { .build(); let compact_plan = ReorgPlanner::new(IOxSessionContext::with_testing()) - .compact_plan(Arc::from("t"), schema, chunks, sort_key) + .compact_plan(Arc::from("t"), &schema, chunks, sort_key) .expect("created compact plan"); let executor = Executor::new_testing(); @@ -421,7 +421,7 @@ mod test { // split on 1000 should have timestamps 1000, 5000, and 7000 let split_plan = ReorgPlanner::new(IOxSessionContext::with_testing()) - .split_plan(Arc::from("t"), schema, chunks, sort_key, vec![1000]) + .split_plan(Arc::from("t"), &schema, chunks, sort_key, vec![1000]) .expect("created compact plan"); let executor = Executor::new_testing(); @@ -485,7 +485,7 @@ mod test { // split on 1000 and 7000 let split_plan = ReorgPlanner::new(IOxSessionContext::with_testing()) - .split_plan(Arc::from("t"), schema, chunks, sort_key, vec![1000, 7000]) + .split_plan(Arc::from("t"), &schema, chunks, sort_key, vec![1000, 7000]) .expect("created compact plan"); let executor = Executor::new_testing(); @@ -561,7 +561,7 @@ mod test { // split on 1000 and 7000 let _split_plan = ReorgPlanner::new(IOxSessionContext::with_testing()) - .split_plan(Arc::from("t"), schema, chunks, sort_key, vec![]) // reason of panic: empty split_times + .split_plan(Arc::from("t"), &schema, chunks, sort_key, vec![]) // reason of panic: empty split_times .expect("created compact plan"); } @@ -580,7 +580,7 @@ mod test { // split on 1000 and 7000 let _split_plan = ReorgPlanner::new(IOxSessionContext::with_testing()) - .split_plan(Arc::from("t"), schema, chunks, sort_key, vec![1000, 500]) // reason of panic: split_times not in ascending order + .split_plan(Arc::from("t"), &schema, chunks, sort_key, vec![1000, 500]) // reason of panic: split_times not in ascending order .expect("created compact plan"); } } diff --git a/iox_query/src/lib.rs b/iox_query/src/lib.rs index e074015c48..e9ea45b914 100644 --- a/iox_query/src/lib.rs +++ b/iox_query/src/lib.rs @@ -44,7 +44,7 @@ pub trait QueryChunkMeta { fn summary(&self) -> Arc<TableSummary>; /// return a reference to the summary of the data held in this chunk - fn schema(&self) -> Arc<Schema>; + fn schema(&self) -> &Schema; /// Return a reference to the chunk's partition sort key if any. /// Only persisted chunk has its partition sort key @@ -191,7 +191,7 @@ impl QueryChunkData { /// Read data into [`RecordBatch`]es. This is mostly meant for testing! pub async fn read_to_batches( self, - schema: Arc<Schema>, + schema: &Schema, session_ctx: &SessionContext, ) -> Vec<RecordBatch> { match self { @@ -274,7 +274,7 @@ where self.as_ref().summary() } - fn schema(&self) -> Arc<Schema> { + fn schema(&self) -> &Schema { self.as_ref().schema() } @@ -303,7 +303,7 @@ impl QueryChunkMeta for Arc<dyn QueryChunk> { self.as_ref().summary() } - fn schema(&self) -> Arc<Schema> { + fn schema(&self) -> &Schema { self.as_ref().schema() } diff --git a/iox_query/src/plan/influxql/test_utils.rs b/iox_query/src/plan/influxql/test_utils.rs index d7ef50ac88..0d905ca696 100644 --- a/iox_query/src/plan/influxql/test_utils.rs +++ b/iox_query/src/plan/influxql/test_utils.rs @@ -8,7 +8,6 @@ use influxdb_influxql_parser::select::{Field, SelectStatement}; use influxdb_influxql_parser::statement::Statement; use predicate::rpc_predicate::QueryNamespaceMeta; use schema::Schema; -use std::sync::Arc; /// Returns the first `Field` of the `SELECT` statement. pub(crate) fn get_first_field(s: &str) -> Field { @@ -86,8 +85,8 @@ impl QueryNamespaceMeta for MockNamespace { .collect() } - fn table_schema(&self, table_name: &str) -> Option<Arc<Schema>> { + fn table_schema(&self, table_name: &str) -> Option<Schema> { let c = self.chunks.iter().find(|x| x.table_name() == table_name)?; - Some(c.schema()) + Some(c.schema().clone()) } } diff --git a/iox_query/src/provider.rs b/iox_query/src/provider.rs index 5801099c59..9443fba6c5 100644 --- a/iox_query/src/provider.rs +++ b/iox_query/src/provider.rs @@ -100,7 +100,7 @@ pub trait ChunkPruner: Sync + Send + std::fmt::Debug { fn prune_chunks( &self, table_name: &str, - table_schema: Arc<Schema>, + table_schema: &Schema, chunks: Vec<Arc<dyn QueryChunk>>, predicate: &Predicate, ) -> Result<Vec<Arc<dyn QueryChunk>>>; @@ -112,7 +112,7 @@ pub trait ChunkPruner: Sync + Send + std::fmt::Debug { #[derive(Debug)] pub struct ProviderBuilder { table_name: Arc<str>, - schema: Arc<Schema>, + schema: Schema, chunks: Vec<Arc<dyn QueryChunk>>, output_sort_key: Option<SortKey>, deduplication: bool, @@ -122,7 +122,7 @@ pub struct ProviderBuilder { } impl ProviderBuilder { - pub fn new(table_name: Arc<str>, schema: Arc<Schema>, ctx: IOxSessionContext) -> Self { + pub fn new(table_name: Arc<str>, schema: Schema, ctx: IOxSessionContext) -> Self { Self { table_name, schema, @@ -173,7 +173,7 @@ impl ProviderBuilder { pub struct ChunkTableProvider { table_name: Arc<str>, /// The IOx schema (wrapper around Arrow Schemaref) for this table - iox_schema: Arc<Schema>, + iox_schema: Schema, /// The chunks chunks: Vec<Arc<dyn QueryChunk>>, /// The desired output sort key if any @@ -187,8 +187,8 @@ pub struct ChunkTableProvider { impl ChunkTableProvider { /// Return the IOx schema view for the data provided by this provider - pub fn iox_schema(&self) -> Arc<Schema> { - Arc::clone(&self.iox_schema) + pub fn iox_schema(&self) -> &Schema { + &self.iox_schema } /// Return the Arrow schema view for the data provided by this provider @@ -229,10 +229,7 @@ impl TableProvider for ChunkTableProvider { let chunks: Vec<Arc<dyn QueryChunk>> = self.chunks.to_vec(); // Figure out the schema of the requested output - let scan_schema = match projection { - Some(indices) => Arc::new(self.iox_schema.select_by_indices(indices)), - None => Arc::clone(&self.iox_schema), - }; + let schema = projection.map(|indices| self.iox_schema.select_by_indices(indices)); // This debug shows the self.arrow_schema() includes all columns in all chunks // which means the schema of all chunks are merged before invoking this scan @@ -247,7 +244,7 @@ impl TableProvider for ChunkTableProvider { let plan = deduplicate.build_scan_plan( Arc::clone(&self.table_name), - scan_schema, + schema.as_ref().unwrap_or(&self.iox_schema), chunks, predicate, self.output_sort_key.clone(), @@ -502,7 +499,7 @@ impl Deduplicater { pub(crate) fn build_scan_plan( mut self, table_name: Arc<str>, - output_schema: Arc<Schema>, + output_schema: &Schema, chunks: Vec<Arc<dyn QueryChunk>>, mut predicate: Predicate, output_sort_key: Option<SortKey>, @@ -532,7 +529,7 @@ impl Deduplicater { let mut non_duplicate_plans = Self::build_plans_for_non_duplicates_chunks( self.ctx.child_ctx("build_plans_for_non_duplicates_chunks"), - Arc::clone(&output_schema), + output_schema, chunks, predicate, output_sort_key.as_ref(), @@ -593,7 +590,7 @@ impl Deduplicater { plans.push(Self::build_deduplicate_plan_for_overlapped_chunks( self.ctx .child_ctx("build_deduplicate_plan_for_overlapped_chunks"), - Arc::clone(&output_schema), + output_schema, overlapped_chunks, predicate.clone(), &chunks_dedup_sort_key, @@ -622,7 +619,7 @@ impl Deduplicater { plans.push(Self::build_deduplicate_plan_for_chunk_with_duplicates( self.ctx .child_ctx("build_deduplicate_plan_for_chunk_with_duplicates"), - Arc::clone(&output_schema), + output_schema, chunk_with_duplicates, predicate.clone(), &chunk_dedup_sort_key, @@ -640,7 +637,7 @@ impl Deduplicater { ); let mut non_duplicate_plans = Self::build_plans_for_non_duplicates_chunks( self.ctx.child_ctx("build_plans_for_non_duplicates_chunks"), - Arc::clone(&output_schema), + output_schema, chunks, predicate, output_sort_key.as_ref(), @@ -808,7 +805,7 @@ impl Deduplicater { ///``` fn build_deduplicate_plan_for_overlapped_chunks( ctx: IOxSessionContext, - output_schema: Arc<Schema>, + output_schema: &Schema, chunks: Vec<Arc<dyn QueryChunk>>, // These chunks are identified overlapped predicate: Predicate, output_sort_key: &SortKey, @@ -829,7 +826,7 @@ impl Deduplicater { }; let pk_schema = Self::compute_pk_schema(&chunks, schema_interner); - let input_schema = Self::compute_input_schema(&output_schema, &pk_schema, schema_interner); + let input_schema = Self::compute_input_schema(output_schema, &pk_schema, schema_interner); debug!( ?output_schema, @@ -844,7 +841,7 @@ impl Deduplicater { .map(|chunk| { Self::build_sort_plan_for_read_filter( ctx.child_ctx("build_sort_plan_for_read_filter"), - Arc::clone(&input_schema), + &input_schema, Arc::clone(chunk), predicate.clone(), Some(output_sort_key), @@ -900,7 +897,7 @@ impl Deduplicater { ///``` fn build_deduplicate_plan_for_chunk_with_duplicates( ctx: IOxSessionContext, - output_schema: Arc<Schema>, + output_schema: &Schema, chunk: Arc<dyn QueryChunk>, // This chunk is identified having duplicates predicate: Predicate, output_sort_key: &SortKey, @@ -909,10 +906,10 @@ impl Deduplicater { // This will practically never matter because this can only happen for in-memory chunks which are currently // backed by RecordBatches and these don't do anything with the predicate at all. However to prevent weird // future issues, we still transform the predicate here. (@crepererum, 2022-11-16) - let predicate = predicate.push_through_dedup(&chunk.schema()); + let predicate = predicate.push_through_dedup(chunk.schema()); let pk_schema = Self::compute_pk_schema(&[Arc::clone(&chunk)], schema_interner); - let input_schema = Self::compute_input_schema(&output_schema, &pk_schema, schema_interner); + let input_schema = Self::compute_input_schema(output_schema, &pk_schema, schema_interner); debug!( ?output_schema, @@ -927,7 +924,7 @@ impl Deduplicater { // Create the 2 bottom nodes RecordBatchesExec and SortExec let plan = Self::build_sort_plan_for_read_filter( ctx.child_ctx("build_sort_plan_for_read_filter"), - Arc::clone(&input_schema), + &input_schema, Arc::clone(&chunks[0]), predicate, Some(output_sort_key), @@ -963,7 +960,7 @@ impl Deduplicater { ///``` fn add_projection_node_if_needed( - output_schema: Arc<Schema>, + output_schema: &Schema, input: Arc<dyn ExecutionPlan>, ) -> Result<Arc<dyn ExecutionPlan>> { let input_schema = input.schema(); @@ -1042,7 +1039,7 @@ impl Deduplicater { ///``` fn build_sort_plan_for_read_filter( ctx: IOxSessionContext, - output_schema: Arc<Schema>, + output_schema: &Schema, chunk: Arc<dyn QueryChunk>, predicate: Predicate, // This is the select predicate of the query output_sort_key: Option<&SortKey>, @@ -1066,7 +1063,7 @@ impl Deduplicater { trace!("Build sort plan for a single chunk. Sort node won't be added if the plan is already sorted"); let mut schema_merger = SchemaMerger::new() .with_interner(schema_interner) - .merge(&output_schema) + .merge(output_schema) .unwrap(); let chunk_schema = chunk.schema(); trace!(?chunk_schema, "chunk schema"); @@ -1097,7 +1094,7 @@ impl Deduplicater { // Create the bottom node RecordBatchesExec for this chunk let mut input = chunks_to_physical_nodes( - input_schema, + &input_schema, output_sort_key, vec![Arc::clone(&chunk)], predicate, @@ -1206,7 +1203,7 @@ impl Deduplicater { // And some optional operators on top such as applying delete predicates or sort the chunk fn build_plan_for_non_duplicates_chunk( ctx: IOxSessionContext, - output_schema: Arc<Schema>, + output_schema: &Schema, chunk: Arc<dyn QueryChunk>, // This chunk is identified having no duplicates predicate: Predicate, output_sort_key: Option<&SortKey>, @@ -1256,7 +1253,7 @@ impl Deduplicater { #[allow(clippy::too_many_arguments)] fn build_plans_for_non_duplicates_chunks( ctx: IOxSessionContext, - output_schema: Arc<Schema>, + output_schema: &Schema, chunks: Chunks, // These chunks is identified having no duplicates predicate: Predicate, output_sort_key: Option<&SortKey>, @@ -1291,7 +1288,7 @@ impl Deduplicater { .map(|chunk| { Self::build_plan_for_non_duplicates_chunk( ctx.child_ctx("build_plan_for_non_duplicates_chunk"), - Arc::clone(&output_schema), + output_schema, Arc::clone(chunk), predicate.clone(), output_sort_key, @@ -1307,7 +1304,7 @@ impl Deduplicater { fn compute_pk_schema<'a>( chunks: impl IntoIterator<Item = &'a Arc<dyn QueryChunk>>, schema_interner: &mut SchemaInterner, - ) -> Arc<Schema> { + ) -> Schema { let mut schema_merger = SchemaMerger::new().with_interner(schema_interner); for chunk in chunks { let chunk_schema = chunk.schema(); @@ -1330,10 +1327,10 @@ impl Deduplicater { fn compute_chunks_schema<'a>( chunks: impl IntoIterator<Item = &'a Arc<dyn QueryChunk>>, schema_interner: &mut SchemaInterner, - ) -> Arc<Schema> { + ) -> Schema { let mut schema_merger = SchemaMerger::new().with_interner(schema_interner); for chunk in chunks { - schema_merger = schema_merger.merge(&chunk.schema()).unwrap(); + schema_merger = schema_merger.merge(chunk.schema()).unwrap(); } schema_merger.build() @@ -1345,7 +1342,7 @@ impl Deduplicater { output_schema: &Schema, pk_schema: &Schema, schema_interner: &mut SchemaInterner, - ) -> Arc<Schema> { + ) -> Schema { SchemaMerger::new() .with_interner(schema_interner) .merge(output_schema) @@ -1621,7 +1618,7 @@ mod test { let sort_plan = Deduplicater::build_sort_plan_for_read_filter( IOxSessionContext::with_testing(), - Arc::clone(&schema), + schema, Arc::clone(&chunk), Predicate::default(), Some(&sort_key.clone()), @@ -1775,7 +1772,7 @@ mod test { // All chunks in one single scan let plans = Deduplicater::build_plans_for_non_duplicates_chunks( IOxSessionContext::with_testing(), - Arc::clone(&schema), + schema, Chunks::split_overlapped_chunks(vec![Arc::clone(&chunk1), Arc::clone(&chunk2)], false) .unwrap(), Predicate::default(), @@ -1939,7 +1936,7 @@ mod test { ) as Arc<dyn QueryChunk>; // Datafusion schema of the chunk // the same for 2 chunks - let schema = chunk1.schema(); + let schema = chunk1.schema().clone(); let chunks = vec![chunk1, chunk2]; // data in its original form @@ -1964,7 +1961,7 @@ mod test { let output_sort_key = SortKey::from_columns(vec!["tag1", "tag2", "time"]); let sort_plan = Deduplicater::build_deduplicate_plan_for_overlapped_chunks( IOxSessionContext::with_testing(), - Arc::clone(&schema), + &schema, chunks, Predicate::default(), &output_sort_key, @@ -2024,7 +2021,7 @@ mod test { let sort_plan = Deduplicater::build_deduplicate_plan_for_overlapped_chunks( IOxSessionContext::with_testing(), - schema, + &schema, vec![chunk1, chunk2], Predicate::default(), &output_sort_key, @@ -2100,7 +2097,7 @@ mod test { let output_sort_key = SortKey::from_columns(vec!["tag1", "tag2", "time"]); let sort_plan = Deduplicater::build_deduplicate_plan_for_overlapped_chunks( IOxSessionContext::with_testing(), - Arc::new(schema), + &schema, chunks, Predicate::default(), &output_sort_key, @@ -2199,7 +2196,7 @@ mod test { let output_sort_key = SortKey::from_columns(vec!["tag2", "tag1", "time"]); let sort_plan = Deduplicater::build_deduplicate_plan_for_overlapped_chunks( IOxSessionContext::with_testing(), - Arc::new(schema), + &schema, chunks, Predicate::default(), &output_sort_key, @@ -2267,11 +2264,11 @@ mod test { // With provided stats, the computed key will be (tag2, tag1, tag3, time) // Requested output schema == the schema for all three let schema = SchemaMerger::new() - .merge(chunk1.schema().as_ref()) + .merge(chunk1.schema()) .unwrap() - .merge(chunk2.schema().as_ref()) + .merge(chunk2.schema()) .unwrap() - .merge(chunk3.schema().as_ref()) + .merge(chunk3.schema()) .unwrap() .build(); @@ -2306,7 +2303,7 @@ mod test { let output_sort_key = SortKey::from_columns(vec!["tag2", "tag1", "time"]); let sort_plan = Deduplicater::build_deduplicate_plan_for_overlapped_chunks( IOxSessionContext::with_testing(), - schema, + &schema, chunks, Predicate::default(), &output_sort_key, @@ -2364,7 +2361,7 @@ mod test { ) as Arc<dyn QueryChunk>; // Datafusion schema of the chunk - let schema = chunk.schema(); + let schema = chunk.schema().clone(); let chunks = vec![chunk]; // data in its original form @@ -2385,7 +2382,7 @@ mod test { let plan = deduplicator .build_scan_plan( Arc::from("t"), - Arc::clone(&schema), + &schema, chunks.clone(), Predicate::default(), None, @@ -2400,7 +2397,7 @@ mod test { let deduplicator = Deduplicater::new(IOxSessionContext::with_testing()).enable_deduplication(false); let plan = deduplicator - .build_scan_plan(Arc::from("t"), schema, chunks, Predicate::default(), None) + .build_scan_plan(Arc::from("t"), &schema, chunks, Predicate::default(), None) .unwrap(); let batch = test_collect(plan).await; // The data will stay in their original order @@ -2433,7 +2430,7 @@ mod test { ) as Arc<dyn QueryChunk>; // Datafusion schema of the chunk - let schema = chunk.schema(); + let schema = chunk.schema().clone(); let chunks = vec![chunk]; // data in its original form @@ -2459,7 +2456,7 @@ mod test { let plan = deduplicator .build_scan_plan( Arc::from("t"), - Arc::clone(&schema), + &schema, chunks.clone(), Predicate::default(), None, @@ -2487,7 +2484,7 @@ mod test { let deduplicator = Deduplicater::new(IOxSessionContext::with_testing()).enable_deduplication(false); let plan = deduplicator - .build_scan_plan(Arc::from("t"), schema, chunks, Predicate::default(), None) + .build_scan_plan(Arc::from("t"), &schema, chunks, Predicate::default(), None) .unwrap(); let batch = test_collect(plan).await; // Deduplication is disabled, the output shoudl be the same as the original data @@ -2546,13 +2543,12 @@ mod test { .timestamp() .build() .unwrap(); - let schema = Arc::new(schema); let deduplicator = Deduplicater::new(IOxSessionContext::with_testing()); let plan = deduplicator .build_scan_plan( Arc::from("t"), - Arc::clone(&Arc::clone(&schema)), + &schema, chunks.clone(), Predicate::default(), None, @@ -2581,13 +2577,7 @@ mod test { let deduplicator = Deduplicater::new(IOxSessionContext::with_testing()).enable_deduplication(false); let plan = deduplicator - .build_scan_plan( - Arc::from("t"), - Arc::clone(&schema), - chunks, - Predicate::default(), - None, - ) + .build_scan_plan(Arc::from("t"), &schema, chunks, Predicate::default(), None) .unwrap(); let batch = test_collect(plan).await; // Deduplication is disabled, the output should include all rows but only 2 selected columns @@ -2655,7 +2645,7 @@ mod test { ) as Arc<dyn QueryChunk>; // Datafusion schema of the chunk - let schema = chunk1.schema(); + let schema = chunk1.schema().clone(); let chunks = vec![chunk1, chunk2]; // data in its original form @@ -2686,7 +2676,7 @@ mod test { let plan = deduplicator .build_scan_plan( Arc::from("t"), - Arc::clone(&schema), + &schema, chunks.clone(), Predicate::default(), None, @@ -2715,7 +2705,7 @@ mod test { let deduplicator = Deduplicater::new(IOxSessionContext::with_testing()).enable_deduplication(false); let plan = deduplicator - .build_scan_plan(Arc::from("t"), schema, chunks, Predicate::default(), None) + .build_scan_plan(Arc::from("t"), &schema, chunks, Predicate::default(), None) .unwrap(); let batch = test_collect(plan).await; // Deduplication is disabled, the output shoudl be the same as the original data @@ -2816,7 +2806,7 @@ mod test { ) as Arc<dyn QueryChunk>; // Datafusion schema of the chunk - let schema = chunk1.schema(); + let schema = chunk1.schema().clone(); let chunks = vec![chunk1, chunk2, chunk3, chunk4]; // data in its original form @@ -2855,7 +2845,7 @@ mod test { let plan = deduplicator .build_scan_plan( Arc::from("t"), - Arc::clone(&schema), + &schema, chunks.clone(), Predicate::default(), None, @@ -2935,7 +2925,7 @@ mod test { let deduplicator = Deduplicater::new(IOxSessionContext::with_testing()).enable_deduplication(false); let plan = deduplicator - .build_scan_plan(Arc::from("t"), schema, chunks, Predicate::default(), None) + .build_scan_plan(Arc::from("t"), &schema, chunks, Predicate::default(), None) .unwrap(); // Plan is very simple with one single RecordBatchesExec that includes 4 chunks @@ -3042,7 +3032,7 @@ mod test { ) as Arc<dyn QueryChunk>; // Datafusion schema of the chunk - let schema = chunk1.schema(); + let schema = chunk1.schema().clone(); let chunks = vec![chunk1, chunk2, chunk3, chunk4]; // data in its original form @@ -3081,7 +3071,7 @@ mod test { let plan = deduplicator .build_scan_plan( Arc::from("t"), - Arc::clone(&schema), + &schema, chunks.clone(), Predicate::default(), Some(sort_key.clone()), // Ask to sort the plan output @@ -3158,7 +3148,7 @@ mod test { let plan = deduplicator .build_scan_plan( Arc::from("t"), - schema, + &schema, chunks, Predicate::default(), Some(sort_key.clone()), @@ -3260,13 +3250,13 @@ mod test { .with_sort_key(sort_key.clone()), // signal the chunk is sorted ) as Arc<dyn QueryChunk>; - let schema = chunk1.schema(); + let schema = chunk1.schema().clone(); let chunks = vec![chunk1, chunk2, chunk3, chunk4]; let deduplicator = Deduplicater::new(IOxSessionContext::with_testing()); let plan = deduplicator .build_scan_plan( Arc::from("t"), - Arc::clone(&schema), + &schema, chunks.clone(), Predicate::default(), Some(sort_key.clone()), @@ -3325,7 +3315,7 @@ mod test { let plan = deduplicator .build_scan_plan( Arc::from("t"), - schema, + &schema, chunks, Predicate::default(), Some(sort_key), @@ -3488,7 +3478,7 @@ mod test { .with_sort_key(sort_key.clone()), // signal the chunk is sorted ) as Arc<dyn QueryChunk>; - let schema = chunk1_1.schema(); + let schema = chunk1_1.schema().clone(); let chunks = vec![ chunk1_1, chunk1_2, chunk1_3, chunk1_4, chunk2_1, chunk2_2, chunk2_3, chunk2_4, chunk2_5, chunk2_6, @@ -3497,7 +3487,7 @@ mod test { let plan = deduplicator .build_scan_plan( Arc::from("t"), - Arc::clone(&schema), + &schema, chunks.clone(), Predicate::default(), Some(sort_key.clone()), @@ -3557,7 +3547,7 @@ mod test { let plan = deduplicator .build_scan_plan( Arc::from("t"), - schema, + &schema, chunks, Predicate::default(), Some(sort_key), diff --git a/iox_query/src/provider/physical.rs b/iox_query/src/provider/physical.rs index 36b89e6feb..e59af96b55 100644 --- a/iox_query/src/provider/physical.rs +++ b/iox_query/src/provider/physical.rs @@ -119,7 +119,7 @@ fn combine_sort_key( /// pushdown ([`RecordBatchesExec`] has NO builtin filter function). Delete predicates are NOT applied at all. The /// caller is responsible for wrapping the output node into appropriate filter nodes. pub fn chunks_to_physical_nodes( - iox_schema: Arc<Schema>, + iox_schema: &Schema, output_sort_key: Option<&SortKey>, chunks: Vec<Arc<dyn QueryChunk>>, predicate: Predicate, diff --git a/iox_query/src/pruning.rs b/iox_query/src/pruning.rs index e13d9b87bc..a079c634eb 100644 --- a/iox_query/src/pruning.rs +++ b/iox_query/src/pruning.rs @@ -72,8 +72,8 @@ pub trait PruningObserver { /// filtering those where the predicate can be proven to evaluate to /// `false` for every single row. pub fn prune_chunks( - table_schema: Arc<Schema>, - chunks: &Vec<Arc<dyn QueryChunk>>, + table_schema: &Schema, + chunks: &[Arc<dyn QueryChunk>], predicate: &Predicate, ) -> Result<Vec<bool>, NotPrunedReason> { let num_chunks = chunks.len(); @@ -85,8 +85,8 @@ pub fn prune_chunks( /// Given a `Vec` of pruning summaries, return a `Vec<bool>` where `false` indicates that the /// predicate can be proven to evaluate to `false` for every single row. pub fn prune_summaries( - table_schema: Arc<Schema>, - summaries: &Vec<Arc<TableSummary>>, + table_schema: &Schema, + summaries: &[Arc<TableSummary>], predicate: &Predicate, ) -> Result<Vec<bool>, NotPrunedReason> { let filter_expr = match predicate.filter_expr() { @@ -108,7 +108,7 @@ pub fn prune_summaries( }; let statistics = ChunkPruningStatistics { - table_schema: table_schema.as_ref(), + table_schema, summaries, }; @@ -126,7 +126,7 @@ pub fn prune_summaries( /// interface required by [`PruningPredicate`] struct ChunkPruningStatistics<'a> { table_schema: &'a Schema, - summaries: &'a Vec<Arc<TableSummary>>, + summaries: &'a [Arc<TableSummary>], } impl<'a> ChunkPruningStatistics<'a> { @@ -263,7 +263,7 @@ mod test { let c1 = Arc::new(TestChunk::new("chunk1")); let predicate = Predicate::new(); - let result = prune_chunks(c1.schema(), &vec![c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); assert_eq!(result, Err(NotPrunedReason::NoExpressionOnPredicate)); } @@ -281,7 +281,7 @@ mod test { let predicate = Predicate::new().with_expr(col("column1").gt(lit(100.0f64))); - let result = prune_chunks(c1.schema(), &vec![c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); assert_eq!(result.expect("pruning succeeds"), vec![false]); } @@ -299,7 +299,7 @@ mod test { let predicate = Predicate::new().with_expr(col("column1").gt(lit(100i64))); - let result = prune_chunks(c1.schema(), &vec![c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); assert_eq!(result.expect("pruning succeeds"), vec![false]); } @@ -318,7 +318,7 @@ mod test { let predicate = Predicate::new().with_expr(col("column1").gt(lit(100u64))); - let result = prune_chunks(c1.schema(), &vec![c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); assert_eq!(result.expect("pruning succeeds"), vec![false]); } @@ -335,7 +335,7 @@ mod test { let predicate = Predicate::new().with_expr(col("column1")); - let result = prune_chunks(c1.schema(), &vec![c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); assert_eq!(result.expect("pruning succeeds"), vec![false; 1]); } @@ -355,7 +355,7 @@ mod test { let predicate = Predicate::new().with_expr(col("column1").gt(lit("z"))); - let result = prune_chunks(c1.schema(), &vec![c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); assert_eq!(result.expect("pruning succeeds"), vec![false]); } @@ -372,7 +372,7 @@ mod test { let predicate = Predicate::new().with_expr(col("column1").lt(lit(100.0f64))); - let result = prune_chunks(c1.schema(), &vec![c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); assert_eq!(result.expect("pruning succeeds"), vec![true]); } @@ -390,7 +390,7 @@ mod test { let predicate = Predicate::new().with_expr(col("column1").lt(lit(100i64))); - let result = prune_chunks(c1.schema(), &vec![c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); assert_eq!(result.expect("pruning succeeds"), vec![true]); } @@ -408,7 +408,7 @@ mod test { let predicate = Predicate::new().with_expr(col("column1").lt(lit(100u64))); - let result = prune_chunks(c1.schema(), &vec![c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); assert_eq!(result.expect("pruning succeeds"), vec![true]); } @@ -426,7 +426,7 @@ mod test { let predicate = Predicate::new().with_expr(col("column1")); - let result = prune_chunks(c1.schema(), &vec![c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); assert_eq!(result.expect("pruning succeeds"), vec![true]); } @@ -446,14 +446,14 @@ mod test { let predicate = Predicate::new().with_expr(col("column1").lt(lit("z"))); - let result = prune_chunks(c1.schema(), &vec![c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); assert_eq!(result.expect("pruning succeeds"), vec![true]); } - fn merge_schema(chunks: &[Arc<dyn QueryChunk>]) -> Arc<Schema> { + fn merge_schema(chunks: &[Arc<dyn QueryChunk>]) -> Schema { let mut merger = SchemaMerger::new(); for chunk in chunks { - merger = merger.merge(chunk.schema().as_ref()).unwrap(); + merger = merger.merge(chunk.schema()).unwrap(); } merger.build() } @@ -491,7 +491,7 @@ mod test { let chunks = vec![c1, c2, c3, c4]; let schema = merge_schema(&chunks); - let result = prune_chunks(schema, &chunks, &predicate); + let result = prune_chunks(&schema, &chunks, &predicate); assert_eq!( result.expect("pruning succeeds"), @@ -549,7 +549,7 @@ mod test { let chunks = vec![c1, c2, c3, c4, c5, c6]; let schema = merge_schema(&chunks); - let result = prune_chunks(schema, &chunks, &predicate); + let result = prune_chunks(&schema, &chunks, &predicate); assert_eq!( result.expect("pruning succeeds"), @@ -587,7 +587,7 @@ mod test { let chunks = vec![c1, c2, c3]; let schema = merge_schema(&chunks); - let result = prune_chunks(schema, &chunks, &predicate); + let result = prune_chunks(&schema, &chunks, &predicate); assert_eq!(result.expect("pruning succeeds"), vec![false, true, true]); } @@ -645,7 +645,7 @@ mod test { let chunks = vec![c1, c2, c3]; let schema = merge_schema(&chunks); - let result = prune_chunks(schema, &chunks, &predicate); + let result = prune_chunks(&schema, &chunks, &predicate); assert_eq!(result.expect("pruning succeeds"), vec![true, false, false]); } @@ -706,7 +706,7 @@ mod test { let chunks = vec![c1, c2, c3, c4, c5, c6]; let schema = merge_schema(&chunks); - let result = prune_chunks(schema, &chunks, &predicate); + let result = prune_chunks(&schema, &chunks, &predicate); assert_eq!( result.expect("Pruning succeeds"), diff --git a/iox_query/src/test.rs b/iox_query/src/test.rs index 7ae4e56ad1..a0c6a54fe7 100644 --- a/iox_query/src/test.rs +++ b/iox_query/src/test.rs @@ -142,7 +142,7 @@ impl QueryNamespace for TestDatabase { } impl QueryNamespaceMeta for TestDatabase { - fn table_schema(&self, table_name: &str) -> Option<Arc<Schema>> { + fn table_schema(&self, table_name: &str) -> Option<Schema> { let mut merger = SchemaMerger::new(); let mut found_one = false; @@ -150,7 +150,7 @@ impl QueryNamespaceMeta for TestDatabase { for partition in partitions.values() { for chunk in partition.values() { if chunk.table_name() == table_name { - merger = merger.merge(&chunk.schema()).expect("consistent schemas"); + merger = merger.merge(chunk.schema()).expect("consistent schemas"); found_one = true; } } @@ -188,7 +188,7 @@ pub struct TestChunk { table_name: String, /// Schema of the table - schema: Arc<Schema>, + schema: Schema, /// Return value for summary() table_summary: TableSummary, @@ -294,7 +294,7 @@ impl TestChunk { let table_name = table_name.into(); Self { table_name, - schema: Arc::new(SchemaBuilder::new().build().unwrap()), + schema: SchemaBuilder::new().build().unwrap(), table_summary: TableSummary::default(), id: ChunkId::new_test(0), may_contain_pk_duplicates: Default::default(), @@ -552,9 +552,7 @@ impl TestChunk { ) -> Self { let mut merger = SchemaMerger::new(); merger = merger.merge(&new_column_schema).unwrap(); - merger = merger - .merge(self.schema.as_ref()) - .expect("merging was successful"); + merger = merger.merge(&self.schema).expect("merging was successful"); self.schema = merger.build(); for i in 0..new_column_schema.len() { @@ -627,7 +625,7 @@ impl TestChunk { .collect::<Vec<_>>(); let batch = - RecordBatch::try_new(self.schema.as_ref().into(), columns).expect("made record batch"); + RecordBatch::try_new(self.schema.as_arrow(), columns).expect("made record batch"); if !self.quiet { println!("TestChunk batch data: {:#?}", batch); } @@ -667,7 +665,7 @@ impl TestChunk { .collect::<Vec<_>>(); let batch = - RecordBatch::try_new(self.schema.as_ref().into(), columns).expect("made record batch"); + RecordBatch::try_new(self.schema.as_arrow(), columns).expect("made record batch"); if !self.quiet { println!("TestChunk batch data: {:#?}", batch); } @@ -731,7 +729,7 @@ impl TestChunk { .collect::<Vec<_>>(); let batch = - RecordBatch::try_new(self.schema.as_ref().into(), columns).expect("made record batch"); + RecordBatch::try_new(self.schema.as_arrow(), columns).expect("made record batch"); self.table_data.push(Arc::new(batch)); self @@ -794,7 +792,7 @@ impl TestChunk { .collect::<Vec<_>>(); let batch = - RecordBatch::try_new(self.schema.as_ref().into(), columns).expect("made record batch"); + RecordBatch::try_new(self.schema.as_arrow(), columns).expect("made record batch"); self.table_data.push(Arc::new(batch)); self @@ -864,7 +862,7 @@ impl TestChunk { .collect::<Vec<_>>(); let batch = - RecordBatch::try_new(self.schema.as_ref().into(), columns).expect("made record batch"); + RecordBatch::try_new(self.schema.as_arrow(), columns).expect("made record batch"); self.table_data.push(Arc::new(batch)); self @@ -941,7 +939,7 @@ impl TestChunk { .collect::<Vec<_>>(); let batch = - RecordBatch::try_new(self.schema.as_ref().into(), columns).expect("made record batch"); + RecordBatch::try_new(self.schema.as_arrow(), columns).expect("made record batch"); self.table_data.push(Arc::new(batch)); self @@ -1069,8 +1067,8 @@ impl QueryChunkMeta for TestChunk { Arc::new(self.table_summary.clone()) } - fn schema(&self) -> Arc<Schema> { - Arc::clone(&self.schema) + fn schema(&self) -> &Schema { + &self.schema } fn partition_sort_key(&self) -> Option<&SortKey> { diff --git a/iox_tests/src/util.rs b/iox_tests/src/util.rs index 219552c4a4..7983f037df 100644 --- a/iox_tests/src/util.rs +++ b/iox_tests/src/util.rs @@ -424,11 +424,7 @@ impl TestTable { .collect(); let schema = table_schema.select_by_names(&selection).unwrap(); - let chunk = ParquetChunk::new( - Arc::new(file), - Arc::new(schema), - self.catalog.parquet_store.clone(), - ); + let chunk = ParquetChunk::new(Arc::new(file), schema, self.catalog.parquet_store.clone()); chunk .parquet_exec_input() .read_to_batches( @@ -624,7 +620,7 @@ impl TestPartition { ); let row_count = record_batch.num_rows(); assert!(row_count > 0, "Parquet file must have at least 1 row"); - let (record_batch, sort_key) = sort_batch(record_batch, schema.clone()); + let (record_batch, sort_key) = sort_batch(record_batch, &schema); let record_batch = dedup_batch(record_batch, &sort_key); let object_store_id = object_store_id.unwrap_or_else(Uuid::new_v4); @@ -974,7 +970,7 @@ impl TestParquetFile { } /// Get Parquet file schema. - pub async fn schema(&self) -> Arc<Schema> { + pub async fn schema(&self) -> Schema { let table_schema = self.table.catalog_schema().await; let column_id_lookup = table_schema.column_id_map(); let selection: Vec<_> = self @@ -984,7 +980,7 @@ impl TestParquetFile { .map(|id| *column_id_lookup.get(id).unwrap()) .collect(); let table_schema: Schema = table_schema.clone().try_into().unwrap(); - Arc::new(table_schema.select_by_names(&selection).unwrap()) + table_schema.select_by_names(&selection).unwrap() } } @@ -1018,9 +1014,9 @@ pub fn now() -> Time { } /// Sort arrow record batch into arrow record batch and sort key. -fn sort_batch(record_batch: RecordBatch, schema: Schema) -> (RecordBatch, SortKey) { +fn sort_batch(record_batch: RecordBatch, schema: &Schema) -> (RecordBatch, SortKey) { // calculate realistic sort key - let sort_key = compute_sort_key(&schema, std::iter::once(&record_batch)); + let sort_key = compute_sort_key(schema, std::iter::once(&record_batch)); // set up sorting let mut sort_columns = Vec::with_capacity(record_batch.num_columns()); diff --git a/parquet_file/src/chunk.rs b/parquet_file/src/chunk.rs index d125f09ef2..5aa9084e67 100644 --- a/parquet_file/src/chunk.rs +++ b/parquet_file/src/chunk.rs @@ -18,7 +18,7 @@ pub struct ParquetChunk { parquet_file: Arc<ParquetFile>, /// Schema that goes with this table's parquet file - schema: Arc<Schema>, + schema: Schema, /// Persists the parquet file within a namespace's relative path store: ParquetStorage, @@ -26,7 +26,7 @@ pub struct ParquetChunk { impl ParquetChunk { /// Create parquet chunk. - pub fn new(parquet_file: Arc<ParquetFile>, schema: Arc<Schema>, store: ParquetStorage) -> Self { + pub fn new(parquet_file: Arc<ParquetFile>, schema: Schema, store: ParquetStorage) -> Self { Self { parquet_file, schema, @@ -55,9 +55,9 @@ impl ParquetChunk { mem::size_of_val(self) + self.parquet_file.size() - mem::size_of_val(&self.parquet_file) } - /// Infallably return the full schema (for all columns) for this chunk - pub fn schema(&self) -> Arc<Schema> { - Arc::clone(&self.schema) + /// Infallibly return the full schema (for all columns) for this chunk + pub fn schema(&self) -> &Schema { + &self.schema } /// Return the columns names that belong to the given column selection diff --git a/parquet_file/src/metadata.rs b/parquet_file/src/metadata.rs index a2875f697d..470e2afc45 100644 --- a/parquet_file/src/metadata.rs +++ b/parquet_file/src/metadata.rs @@ -760,7 +760,7 @@ impl DecodedIoxParquetMetaData { } /// Read IOx schema from parquet metadata. - pub fn read_schema(&self) -> Result<Arc<Schema>> { + pub fn read_schema(&self) -> Result<Schema> { let file_metadata = self.md.file_metadata(); let arrow_schema = parquet_to_arrow_schema( @@ -776,10 +776,9 @@ impl DecodedIoxParquetMetaData { // as this metadata will vary from file to file let arrow_schema_ref = Arc::new(arrow_schema.with_metadata(Default::default())); - let schema: Schema = arrow_schema_ref + arrow_schema_ref .try_into() - .context(IoxFromArrowFailureSnafu {})?; - Ok(Arc::new(schema)) + .context(IoxFromArrowFailureSnafu {}) } /// Read IOx statistics (including timestamp range) from parquet metadata. diff --git a/predicate/src/rpc_predicate.rs b/predicate/src/rpc_predicate.rs index cf47cbd01e..e2b124ca0f 100644 --- a/predicate/src/rpc_predicate.rs +++ b/predicate/src/rpc_predicate.rs @@ -169,7 +169,9 @@ pub trait QueryNamespaceMeta { fn table_names(&self) -> Vec<String>; /// Schema for a specific table if the table exists. - fn table_schema(&self, table_name: &str) -> Option<Arc<Schema>>; + /// + /// TODO: Make this return Option<&Schema> + fn table_schema(&self, table_name: &str) -> Option<Schema>; } /// Predicate that has been "specialized" / normalized for a @@ -204,13 +206,13 @@ pub trait QueryNamespaceMeta { /// ``` fn normalize_predicate( table_name: &str, - schema: Arc<Schema>, + schema: Schema, predicate: &Predicate, ) -> DataFusionResult<Predicate> { let mut predicate = predicate.clone(); - let mut field_projections = FieldProjectionRewriter::new(Arc::clone(&schema)); - let mut missing_tag_columns = MissingTagColumnRewriter::new(Arc::clone(&schema)); + let mut field_projections = FieldProjectionRewriter::new(schema.clone()); + let mut missing_tag_columns = MissingTagColumnRewriter::new(schema.clone()); let mut field_value_exprs = vec![]; @@ -221,7 +223,7 @@ fn normalize_predicate( .exprs .into_iter() .map(|e| { - let simplifier = ExprSimplifier::new(SimplifyAdapter::new(schema.as_ref())); + let simplifier = ExprSimplifier::new(SimplifyAdapter::new(&schema)); debug!(?e, "rewriting expr"); @@ -349,10 +351,9 @@ mod tests { #[test] fn test_normalize_predicate_coerced() { - let schema = schema(); let predicate = normalize_predicate( "table", - Arc::clone(&schema), + schema(), &Predicate::new().with_expr(col("t1").eq(lit("f1"))), ) .unwrap(); @@ -470,8 +471,8 @@ mod tests { assert_eq!(predicate, expected); } - fn schema() -> Arc<Schema> { - let schema = schema::builder::SchemaBuilder::new() + fn schema() -> Schema { + schema::builder::SchemaBuilder::new() .tag("t1") .tag("t2") .field("f1", DataType::Int64) @@ -479,9 +480,7 @@ mod tests { .field("f2", DataType::Int64) .unwrap() .build() - .unwrap(); - - Arc::new(schema) + .unwrap() } #[allow(dead_code)] diff --git a/predicate/src/rpc_predicate/column_rewrite.rs b/predicate/src/rpc_predicate/column_rewrite.rs index 2c24c1bbc4..eeed0202bc 100644 --- a/predicate/src/rpc_predicate/column_rewrite.rs +++ b/predicate/src/rpc_predicate/column_rewrite.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use datafusion::{ error::Result as DataFusionResult, logical_expr::expr_rewriter::ExprRewriter, prelude::*, scalar::ScalarValue, @@ -11,12 +9,12 @@ use schema::{InfluxColumnType, Schema}; #[derive(Debug)] pub(crate) struct MissingTagColumnRewriter { /// The input schema - schema: Arc<Schema>, + schema: Schema, } impl MissingTagColumnRewriter { /// Create a new [`MissingTagColumnRewriter`] targeting the given schema - pub(crate) fn new(schema: Arc<Schema>) -> Self { + pub(crate) fn new(schema: Schema) -> Self { Self { schema } } @@ -105,7 +103,7 @@ mod tests { .build() .unwrap(); - let mut rewriter = MissingTagColumnRewriter::new(Arc::new(schema)); + let mut rewriter = MissingTagColumnRewriter::new(schema); expr.rewrite(&mut rewriter).unwrap() } } diff --git a/predicate/src/rpc_predicate/field_rewrite.rs b/predicate/src/rpc_predicate/field_rewrite.rs index af3baf2f87..93146955d4 100644 --- a/predicate/src/rpc_predicate/field_rewrite.rs +++ b/predicate/src/rpc_predicate/field_rewrite.rs @@ -45,12 +45,12 @@ pub(crate) struct FieldProjectionRewriter { /// output field_predicates: Vec<Expr>, /// The input schema (from where we know the field) - schema: Arc<Schema>, + schema: Schema, } impl FieldProjectionRewriter { /// Create a new [`FieldProjectionRewriter`] targeting the given schema - pub(crate) fn new(schema: Arc<Schema>) -> Self { + pub(crate) fn new(schema: Schema) -> Self { Self { field_predicates: vec![], schema, @@ -429,7 +429,7 @@ mod tests { "Running test\ninput: {:?}\nexpected_expr: {:?}\nexpected_field_columns: {:?}\n", input, exp_expr, exp_field_columns ); - let mut rewriter = FieldProjectionRewriter::new(Arc::clone(&schema)); + let mut rewriter = FieldProjectionRewriter::new(schema.clone()); let rewritten = rewriter.rewrite_field_exprs(input).unwrap(); assert_eq!(rewritten, exp_expr); @@ -471,9 +471,8 @@ mod tests { input, exp_error ); - let schema = Arc::clone(&schema); - let run_case = move || { - let mut rewriter = FieldProjectionRewriter::new(schema); + let run_case = || { + let mut rewriter = FieldProjectionRewriter::new(schema.clone()); // check for error in rewrite_field_exprs rewriter.rewrite_field_exprs(input)?; // check for error adding to predicate @@ -500,7 +499,7 @@ mod tests { query_functions::regex_not_match_expr(arg, pattern.into()) } - fn make_schema() -> Arc<Schema> { + fn make_schema() -> Schema { SchemaBuilder::new() .tag("foo") .tag("bar") @@ -513,7 +512,6 @@ mod tests { .field("f4", DataType::Float64) .unwrap() .build() - .map(Arc::new) .unwrap() } } diff --git a/querier/src/cache/namespace.rs b/querier/src/cache/namespace.rs index 8032be7426..7d862c97ed 100644 --- a/querier/src/cache/namespace.rs +++ b/querier/src/cache/namespace.rs @@ -206,7 +206,7 @@ impl NamespaceCache { #[derive(Debug, Clone, PartialEq, Eq)] pub struct CachedTable { pub id: TableId, - pub schema: Arc<Schema>, + pub schema: Schema, pub column_id_map: HashMap<ColumnId, Arc<str>>, pub column_id_map_rev: HashMap<Arc<str>, ColumnId>, pub primary_key_column_ids: Vec<ColumnId>, @@ -242,7 +242,7 @@ impl From<TableSchema> for CachedTable { column_id_map.shrink_to_fit(); let id = table.id; - let schema: Arc<Schema> = Arc::new(table.try_into().expect("Catalog table schema broken")); + let schema: Schema = table.try_into().expect("Catalog table schema broken"); let mut column_id_map_rev: HashMap<Arc<str>, ColumnId> = column_id_map .iter() @@ -368,15 +368,13 @@ mod tests { Arc::from("table1"), Arc::new(CachedTable { id: table11.table.id, - schema: Arc::new( - SchemaBuilder::new() - .field("col1", DataType::Int64) - .unwrap() - .tag("col2") - .timestamp() - .build() - .unwrap(), - ), + schema: SchemaBuilder::new() + .field("col1", DataType::Int64) + .unwrap() + .tag("col2") + .timestamp() + .build() + .unwrap(), column_id_map: HashMap::from([ (col111.column.id, Arc::from(col111.column.name.clone())), (col112.column.id, Arc::from(col112.column.name.clone())), @@ -394,14 +392,12 @@ mod tests { Arc::from("table2"), Arc::new(CachedTable { id: table12.table.id, - schema: Arc::new( - SchemaBuilder::new() - .field("col1", DataType::Float64) - .unwrap() - .timestamp() - .build() - .unwrap(), - ), + schema: SchemaBuilder::new() + .field("col1", DataType::Float64) + .unwrap() + .timestamp() + .build() + .unwrap(), column_id_map: HashMap::from([ (col121.column.id, Arc::from(col121.column.name.clone())), (col122.column.id, Arc::from(col122.column.name.clone())), @@ -433,7 +429,7 @@ mod tests { Arc::from("table1"), Arc::new(CachedTable { id: table21.table.id, - schema: Arc::new(SchemaBuilder::new().timestamp().build().unwrap()), + schema: SchemaBuilder::new().timestamp().build().unwrap(), column_id_map: HashMap::from([( col211.column.id, Arc::from(col211.column.name.clone()), diff --git a/querier/src/cache/partition.rs b/querier/src/cache/partition.rs index 5640db4fbe..c62f92da06 100644 --- a/querier/src/cache/partition.rs +++ b/querier/src/cache/partition.rs @@ -532,7 +532,7 @@ mod tests { assert_histogram_metric_count(&catalog.metric_registry, "partition_get_by_id", 5); } - fn schema() -> Arc<Schema> { - Arc::new(SchemaBuilder::new().build().unwrap()) + fn schema() -> Schema { + SchemaBuilder::new().build().unwrap() } } diff --git a/querier/src/cache/projected_schema.rs b/querier/src/cache/projected_schema.rs index 1408d609db..4213324293 100644 --- a/querier/src/cache/projected_schema.rs +++ b/querier/src/cache/projected_schema.rs @@ -58,7 +58,7 @@ impl CacheKey { type CacheT = Box< dyn Cache< K = CacheKey, - V = Arc<Schema>, + V = Schema, GetExtra = (Arc<CachedTable>, Option<Span>), PeekExtra = ((), Option<Span>), >, @@ -97,12 +97,10 @@ impl ProjectedSchemaCache { // order by name since IDs are rather arbitrary projection.sort(); - Arc::new( - table - .schema - .select_by_names(&projection) - .expect("Bug in schema projection"), - ) + table + .schema + .select_by_names(&projection) + .expect("Bug in schema projection") }); let loader = Arc::new(MetricsLoader::new( loader, @@ -117,7 +115,7 @@ impl ProjectedSchemaCache { backend.add_policy(LruPolicy::new( Arc::clone(&ram_pool), CACHE_ID, - Arc::new(FunctionEstimator::new(|k: &CacheKey, v: &Arc<Schema>| { + Arc::new(FunctionEstimator::new(|k: &CacheKey, v: &Schema| { RamSize(k.size() + size_of_val(v) + v.estimate_size()) })), )); @@ -148,7 +146,7 @@ impl ProjectedSchemaCache { table: Arc<CachedTable>, projection: Vec<ColumnId>, span: Option<Span>, - ) -> Arc<Schema> { + ) -> Schema { let key = CacheKey::new(table.id, projection); self.cache.get(key, (table, span)).await @@ -176,25 +174,21 @@ mod tests { let table_id_1 = TableId::new(1); let table_id_2 = TableId::new(2); - let table_schema_a = Arc::new( - SchemaBuilder::new() - .tag("t1") - .tag("t2") - .tag("t3") - .timestamp() - .build() - .unwrap(), - ); - let table_schema_b = Arc::new( - SchemaBuilder::new() - .tag("t1") - .tag("t2") - .tag("t3") - .tag("t4") - .timestamp() - .build() - .unwrap(), - ); + let table_schema_a = SchemaBuilder::new() + .tag("t1") + .tag("t2") + .tag("t3") + .timestamp() + .build() + .unwrap(); + let table_schema_b = SchemaBuilder::new() + .tag("t1") + .tag("t2") + .tag("t3") + .tag("t4") + .timestamp() + .build() + .unwrap(); let column_id_map_a = HashMap::from([ (ColumnId::new(1), Arc::from("t1")), (ColumnId::new(2), Arc::from("t2")), @@ -210,7 +204,7 @@ mod tests { ]); let table_1a = Arc::new(CachedTable { id: table_id_1, - schema: Arc::clone(&table_schema_a), + schema: table_schema_a.clone(), column_id_map: column_id_map_a.clone(), column_id_map_rev: reverse_map(&column_id_map_a), primary_key_column_ids: vec![ @@ -222,7 +216,7 @@ mod tests { }); let table_1b = Arc::new(CachedTable { id: table_id_1, - schema: Arc::clone(&table_schema_b), + schema: table_schema_b.clone(), column_id_map: column_id_map_b.clone(), column_id_map_rev: reverse_map(&column_id_map_b), primary_key_column_ids: vec![ @@ -234,7 +228,7 @@ mod tests { }); let table_2a = Arc::new(CachedTable { id: table_id_2, - schema: Arc::clone(&table_schema_a), + schema: table_schema_a.clone(), column_id_map: column_id_map_a.clone(), column_id_map_rev: reverse_map(&column_id_map_a), primary_key_column_ids: vec![ @@ -247,7 +241,7 @@ mod tests { }); // initial request - let expected = Arc::new(SchemaBuilder::new().tag("t1").tag("t2").build().unwrap()); + let expected = SchemaBuilder::new().tag("t1").tag("t2").build().unwrap(); let projection_1 = cache .get( Arc::clone(&table_1a), @@ -265,7 +259,7 @@ mod tests { None, ) .await; - assert!(Arc::ptr_eq(&projection_1, &projection_2)); + assert!(Arc::ptr_eq(projection_1.inner(), projection_2.inner())); // updated table schema let projection_3 = cache @@ -275,7 +269,7 @@ mod tests { None, ) .await; - assert!(Arc::ptr_eq(&projection_1, &projection_3)); + assert!(Arc::ptr_eq(projection_1.inner(), projection_3.inner())); // different column order let projection_4 = cache @@ -285,10 +279,10 @@ mod tests { None, ) .await; - assert!(Arc::ptr_eq(&projection_1, &projection_4)); + assert!(Arc::ptr_eq(projection_1.inner(), projection_4.inner())); // different columns set - let expected = Arc::new(SchemaBuilder::new().tag("t1").tag("t3").build().unwrap()); + let expected = SchemaBuilder::new().tag("t1").tag("t3").build().unwrap(); let projection_5 = cache .get( Arc::clone(&table_1a), @@ -307,7 +301,7 @@ mod tests { ) .await; assert_eq!(projection_6, projection_1); - assert!(!Arc::ptr_eq(&projection_1, &projection_6)); + assert!(!Arc::ptr_eq(projection_1.inner(), projection_6.inner())); // original data still present let projection_7 = cache @@ -317,7 +311,7 @@ mod tests { None, ) .await; - assert!(Arc::ptr_eq(&projection_1, &projection_7)); + assert!(Arc::ptr_eq(projection_1.inner(), projection_7.inner())); } fn reverse_map<K, V>(map: &HashMap<K, V>) -> HashMap<V, K> diff --git a/querier/src/ingester/mod.rs b/querier/src/ingester/mod.rs index 8e5823f826..992cbb30f7 100644 --- a/querier/src/ingester/mod.rs +++ b/querier/src/ingester/mod.rs @@ -617,7 +617,7 @@ impl IngesterStreamDecoder { .take() .expect("Partition should have been checked before chunk creation"); self.current_partition = - Some(current_partition.try_add_chunk(ChunkId::new(), Arc::new(schema), batches)?); + Some(current_partition.try_add_chunk(ChunkId::new(), schema, batches)?); } Ok(()) @@ -1043,7 +1043,7 @@ impl IngesterPartition { pub(crate) fn try_add_chunk( mut self, chunk_id: ChunkId, - expected_schema: Arc<Schema>, + expected_schema: Schema, batches: Vec<RecordBatch>, ) -> Result<Self> { // ignore chunk if there are no batches @@ -1059,7 +1059,7 @@ impl IngesterPartition { // details) let batches = batches .into_iter() - .map(|batch| ensure_schema(batch, expected_schema.as_ref())) + .map(|batch| ensure_schema(batch, &expected_schema)) .collect::<Result<Vec<RecordBatch>>>()?; // TODO: may want to ask the Ingester to send this value instead of computing it here. @@ -1141,7 +1141,7 @@ impl IngesterPartition { pub struct IngesterChunk { chunk_id: ChunkId, partition_id: PartitionId, - schema: Arc<Schema>, + schema: Schema, /// Partition-wide sort key. partition_sort_key: Option<Arc<SortKey>>, @@ -1198,9 +1198,9 @@ impl QueryChunkMeta for IngesterChunk { Arc::clone(&self.summary) } - fn schema(&self) -> Arc<Schema> { + fn schema(&self) -> &Schema { trace!(schema=?self.schema, "IngesterChunk schema"); - Arc::clone(&self.schema) + &self.schema } fn partition_sort_key(&self) -> Option<&SortKey> { @@ -2011,16 +2011,14 @@ mod tests { .await } - fn schema() -> Arc<Schema> { - Arc::new( - SchemaBuilder::new() - .influx_field("bar", InfluxFieldType::Float) - .influx_field("baz", InfluxFieldType::Float) - .influx_field("foo", InfluxFieldType::Float) - .timestamp() - .build() - .unwrap(), - ) + fn schema() -> Schema { + SchemaBuilder::new() + .influx_field("bar", InfluxFieldType::Float) + .influx_field("baz", InfluxFieldType::Float) + .influx_field("foo", InfluxFieldType::Float) + .timestamp() + .build() + .unwrap() } fn lp_to_record_batch(lp: &str) -> RecordBatch { @@ -2165,7 +2163,7 @@ mod tests { #[test] fn test_ingester_partition_type_cast() { - let expected_schema = Arc::new(SchemaBuilder::new().tag("t").timestamp().build().unwrap()); + let expected_schema = SchemaBuilder::new().tag("t").timestamp().build().unwrap(); let cases = vec![ // send a batch that matches the schema exactly @@ -2188,7 +2186,7 @@ mod tests { tombstone_max_sequence_number, None, ) - .try_add_chunk(ChunkId::new(), Arc::clone(&expected_schema), vec![case]) + .try_add_chunk(ChunkId::new(), expected_schema.clone(), vec![case]) .unwrap(); for batch in &ingester_partition.chunks[0].batches { @@ -2199,15 +2197,12 @@ mod tests { #[test] fn test_ingester_partition_fail_type_cast() { - let expected_schema = Arc::new( - SchemaBuilder::new() - .field("b", DataType::Boolean) - .unwrap() - .timestamp() - .build() - .unwrap(), - ); - + let expected_schema = SchemaBuilder::new() + .field("b", DataType::Boolean) + .unwrap() + .timestamp() + .build() + .unwrap(); let batch = RecordBatch::try_from_iter(vec![("b", int64_array()), ("time", ts_array())]).unwrap(); @@ -2223,7 +2218,7 @@ mod tests { tombstone_max_sequence_number, None, ) - .try_add_chunk(ChunkId::new(), Arc::clone(&expected_schema), vec![batch]) + .try_add_chunk(ChunkId::new(), expected_schema, vec![batch]) .unwrap_err(); assert_matches!(err, Error::RecordBatchType { .. }); diff --git a/querier/src/ingester/test_util.rs b/querier/src/ingester/test_util.rs index 3320b46958..7a0c5a7149 100644 --- a/querier/src/ingester/test_util.rs +++ b/querier/src/ingester/test_util.rs @@ -91,18 +91,17 @@ impl IngesterConnection for MockIngesterConnection { let total_row_count = batches.iter().map(|b| b.num_rows()).sum::<usize>() as u64; + let summary = + create_basic_summary(total_row_count, &new_schema, ic.ts_min_max); + super::IngesterChunk { chunk_id: ic.chunk_id, partition_id: ic.partition_id, - schema: Arc::new(new_schema.clone()), + schema: new_schema, partition_sort_key: ic.partition_sort_key, batches, ts_min_max: ic.ts_min_max, - summary: Arc::new(create_basic_summary( - total_row_count, - &new_schema, - ic.ts_min_max, - )), + summary: Arc::new(summary), } }) .collect::<Vec<_>>(); diff --git a/querier/src/namespace/mod.rs b/querier/src/namespace/mod.rs index 9f85b1d57a..c595570a8f 100644 --- a/querier/src/namespace/mod.rs +++ b/querier/src/namespace/mod.rs @@ -72,7 +72,7 @@ impl QuerierNamespace { namespace_retention_period: ns.retention_period, table_id: cached_table.id, table_name: Arc::clone(table_name), - schema: Arc::clone(&cached_table.schema), + schema: cached_table.schema.clone(), ingester_connection: ingester_connection.clone(), chunk_adapter: Arc::clone(&chunk_adapter), exec: Arc::clone(&exec), @@ -184,7 +184,7 @@ mod tests { let qns = querier_namespace(&ns).await; let expected_schema = SchemaBuilder::new().build().unwrap(); let actual_schema = schema(&qns, "table"); - assert_eq!(actual_schema.as_ref(), &expected_schema,); + assert_eq!(actual_schema, &expected_schema,); table.create_column("col1", ColumnType::I64).await; table.create_column("col2", ColumnType::Bool).await; @@ -197,7 +197,7 @@ mod tests { .build() .unwrap(); let actual_schema = schema(&qns, "table"); - assert_eq!(actual_schema.as_ref(), &expected_schema,); + assert_eq!(actual_schema, &expected_schema); table.create_column("col4", ColumnType::Tag).await; table @@ -213,7 +213,7 @@ mod tests { .build() .unwrap(); let actual_schema = schema(&qns, "table"); - assert_eq!(actual_schema.as_ref(), &expected_schema,); + assert_eq!(actual_schema, &expected_schema); } fn sorted<T>(mut v: Vec<T>) -> Vec<T> @@ -234,7 +234,7 @@ mod tests { ) } - fn schema(querier_namespace: &QuerierNamespace, table: &str) -> Arc<Schema> { - Arc::clone(querier_namespace.tables.get(table).unwrap().schema()) + fn schema<'a>(querier_namespace: &'a QuerierNamespace, table: &str) -> &'a Schema { + querier_namespace.tables.get(table).unwrap().schema() } } diff --git a/querier/src/namespace/query_access.rs b/querier/src/namespace/query_access.rs index 0cb71ce49a..da8b76aed1 100644 --- a/querier/src/namespace/query_access.rs +++ b/querier/src/namespace/query_access.rs @@ -31,8 +31,8 @@ impl QueryNamespaceMeta for QuerierNamespace { names } - fn table_schema(&self, table_name: &str) -> Option<Arc<Schema>> { - self.tables.get(table_name).map(|t| Arc::clone(t.schema())) + fn table_schema(&self, table_name: &str) -> Option<Schema> { + self.tables.get(table_name).map(|t| t.schema().clone()) } } diff --git a/querier/src/parquet/creation.rs b/querier/src/parquet/creation.rs index f3ed8cbdb5..3bbe8d3248 100644 --- a/querier/src/parquet/creation.rs +++ b/querier/src/parquet/creation.rs @@ -84,11 +84,7 @@ impl ChunkAdapter { .collect(); // Prune on the most basic summary data (timestamps and column names) before trying to fully load the chunks - let keeps = match prune_summaries( - Arc::clone(&cached_table.schema), - &basic_summaries, - predicate, - ) { + let keeps = match prune_summaries(&cached_table.schema, &basic_summaries, predicate) { Ok(keeps) => keeps, Err(reason) => { // Ignore pruning failures here - the chunk pruner should have already logged them. diff --git a/querier/src/parquet/mod.rs b/querier/src/parquet/mod.rs index 3544f24055..585bb4f5f8 100644 --- a/querier/src/parquet/mod.rs +++ b/querier/src/parquet/mod.rs @@ -6,7 +6,7 @@ use data_types::{ }; use iox_query::util::create_basic_summary; use parquet_file::chunk::ParquetChunk; -use schema::{sort::SortKey, Schema}; +use schema::sort::SortKey; use std::sync::Arc; mod creation; @@ -84,9 +84,6 @@ pub struct QuerierParquetChunk { /// Immutable chunk metadata meta: Arc<QuerierParquetChunkMeta>, - /// Schema of the chunk - schema: Arc<Schema>, - /// Delete predicates to be combined with the chunk delete_predicates: Vec<Arc<DeletePredicate>>, @@ -107,11 +104,9 @@ impl QuerierParquetChunk { meta: Arc<QuerierParquetChunkMeta>, partition_sort_key: Option<Arc<SortKey>>, ) -> Self { - let schema = parquet_chunk.schema(); - let table_summary = Arc::new(create_basic_summary( parquet_chunk.rows() as u64, - &parquet_chunk.schema(), + parquet_chunk.schema(), parquet_chunk.timestamp_min_max(), )); @@ -119,7 +114,6 @@ impl QuerierParquetChunk { meta, delete_predicates: Vec::new(), partition_sort_key, - schema, parquet_chunk, table_summary, } @@ -341,7 +335,7 @@ pub mod tests { .build() .unwrap(); let actual_schema = chunk.schema(); - assert_eq!(actual_schema.as_ref(), &expected_schema); + assert_eq!(actual_schema, &expected_schema); } fn assert_sort_key(chunk: &QuerierParquetChunk) { diff --git a/querier/src/parquet/query_access.rs b/querier/src/parquet/query_access.rs index 4a2b89c3fa..758c7dc41c 100644 --- a/querier/src/parquet/query_access.rs +++ b/querier/src/parquet/query_access.rs @@ -14,8 +14,8 @@ impl QueryChunkMeta for QuerierParquetChunk { Arc::clone(&self.table_summary) } - fn schema(&self) -> Arc<Schema> { - Arc::clone(&self.schema) + fn schema(&self) -> &Schema { + self.parquet_chunk.schema() } fn partition_sort_key(&self) -> Option<&SortKey> { diff --git a/querier/src/table/mod.rs b/querier/src/table/mod.rs index bd923c28ed..e40cd5285b 100644 --- a/querier/src/table/mod.rs +++ b/querier/src/table/mod.rs @@ -76,7 +76,7 @@ pub struct QuerierTableArgs { pub namespace_retention_period: Option<Duration>, pub table_id: TableId, pub table_name: Arc<str>, - pub schema: Arc<Schema>, + pub schema: Schema, pub ingester_connection: Option<Arc<dyn IngesterConnection>>, pub chunk_adapter: Arc<ChunkAdapter>, pub exec: Arc<Executor>, @@ -106,7 +106,7 @@ pub struct QuerierTable { table_id: TableId, /// Table schema. - schema: Arc<Schema>, + schema: Schema, /// Connection to ingester ingester_connection: Option<Arc<dyn IngesterConnection>>, @@ -165,7 +165,7 @@ impl QuerierTable { } /// Schema. - pub fn schema(&self) -> &Arc<Schema> { + pub fn schema(&self) -> &Schema { &self.schema } @@ -372,7 +372,7 @@ impl QuerierTable { .prune_chunks( self.table_name(), // use up-to-date schema - Arc::clone(&cached_table.schema), + &cached_table.schema, chunks, &predicate, ) @@ -848,7 +848,7 @@ mod tests { let schema = make_schema_two_fields_two_tags(&table).await; // let add a partion from the ingester - let builder = IngesterPartitionBuilder::new(&schema, &shard, &partition) + let builder = IngesterPartitionBuilder::new(schema, &shard, &partition) .with_lp(["table,tag1=val1,tag2=val2 foo=3,bar=4 11"]); let ingester_partition = @@ -945,7 +945,7 @@ mod tests { .with_compaction_level(CompactionLevel::FileNonOverlapped); partition.create_parquet_file(builder).await; - let builder = IngesterPartitionBuilder::new(&schema, &shard, &partition); + let builder = IngesterPartitionBuilder::new(schema, &shard, &partition); let ingester_partition = builder.build_with_max_parquet_sequence_number(Some(SequenceNumber::new(1))); @@ -1015,18 +1015,16 @@ mod tests { .create_tombstone(12, 1, 100, "foo=3") .await; - let schema = Arc::new( - SchemaBuilder::new() - .influx_field("foo", InfluxFieldType::Integer) - .timestamp() - .build() - .unwrap(), - ); + let schema = SchemaBuilder::new() + .influx_field("foo", InfluxFieldType::Integer) + .timestamp() + .build() + .unwrap(); let ingester_chunk_id1 = u128::MAX - 1; - let builder1 = IngesterPartitionBuilder::new(&schema, &shard, &partition1); - let builder2 = IngesterPartitionBuilder::new(&schema, &shard, &partition2); + let builder1 = IngesterPartitionBuilder::new(schema.clone(), &shard, &partition1); + let builder2 = IngesterPartitionBuilder::new(schema, &shard, &partition2); let querier_table = TestQuerierTable::new(&catalog, &table) .await .with_ingester_partition( @@ -1111,16 +1109,14 @@ mod tests { let partition1 = table.with_shard(&shard).create_partition("k1").await; let partition2 = table.with_shard(&shard).create_partition("k2").await; - let schema = Arc::new( - SchemaBuilder::new() - .influx_field("foo", InfluxFieldType::Integer) - .timestamp() - .build() - .unwrap(), - ); + let schema = SchemaBuilder::new() + .influx_field("foo", InfluxFieldType::Integer) + .timestamp() + .build() + .unwrap(); - let builder1 = IngesterPartitionBuilder::new(&schema, &shard, &partition1); - let builder2 = IngesterPartitionBuilder::new(&schema, &shard, &partition2); + let builder1 = IngesterPartitionBuilder::new(schema.clone(), &shard, &partition1); + let builder2 = IngesterPartitionBuilder::new(schema, &shard, &partition2); let querier_table = TestQuerierTable::new(&catalog, &table) .await @@ -1172,7 +1168,7 @@ mod tests { let schema = make_schema(&table).await; let builder = - IngesterPartitionBuilder::new(&schema, &shard, &partition).with_lp(["table foo=1 1"]); + IngesterPartitionBuilder::new(schema, &shard, &partition).with_lp(["table foo=1 1"]); // Parquet file between with max sequence number 2 let pf_builder = TestParquetFileBuilder::default() @@ -1230,7 +1226,7 @@ mod tests { let querier_table = TestQuerierTable::new(&catalog, &table).await; let builder = - IngesterPartitionBuilder::new(&schema, &shard, &partition).with_lp(["table foo=1 1"]); + IngesterPartitionBuilder::new(schema, &shard, &partition).with_lp(["table foo=1 1"]); // parquet file with max sequence number 1 let pf_builder = TestParquetFileBuilder::default() @@ -1292,7 +1288,7 @@ mod tests { let querier_table = TestQuerierTable::new(&catalog, &table).await; let builder = - IngesterPartitionBuilder::new(&schema, &shard, &partition).with_lp(["table foo=1 1"]); + IngesterPartitionBuilder::new(schema, &shard, &partition).with_lp(["table foo=1 1"]); // parquet file with max sequence number 1 let pf_builder = TestParquetFileBuilder::default() @@ -1319,20 +1315,18 @@ mod tests { } /// Adds a "foo" column to the table and returns the created schema - async fn make_schema(table: &Arc<TestTable>) -> Arc<Schema> { + async fn make_schema(table: &Arc<TestTable>) -> Schema { table.create_column("foo", ColumnType::F64).await; table.create_column("time", ColumnType::Time).await; // create corresponding schema - Arc::new( - SchemaBuilder::new() - .influx_field("foo", InfluxFieldType::Float) - .timestamp() - .build() - .unwrap(), - ) + SchemaBuilder::new() + .influx_field("foo", InfluxFieldType::Float) + .timestamp() + .build() + .unwrap() } - async fn make_schema_two_fields_two_tags(table: &Arc<TestTable>) -> Arc<Schema> { + async fn make_schema_two_fields_two_tags(table: &Arc<TestTable>) -> Schema { table.create_column("time", ColumnType::Time).await; table.create_column("foo", ColumnType::F64).await; table.create_column("bar", ColumnType::F64).await; @@ -1340,16 +1334,14 @@ mod tests { table.create_column("tag2", ColumnType::Tag).await; // create corresponding schema - Arc::new( - SchemaBuilder::new() - .influx_field("foo", InfluxFieldType::Float) - .influx_field("bar", InfluxFieldType::Float) - .tag("tag1") - .tag("tag2") - .timestamp() - .build() - .unwrap(), - ) + SchemaBuilder::new() + .influx_field("foo", InfluxFieldType::Float) + .influx_field("bar", InfluxFieldType::Float) + .tag("tag1") + .tag("tag2") + .timestamp() + .build() + .unwrap() } /// A `QuerierTable` and some number of `IngesterPartitions` that diff --git a/querier/src/table/query_access/mod.rs b/querier/src/table/query_access/mod.rs index 80933e7a37..4ca1f2d0d4 100644 --- a/querier/src/table/query_access/mod.rs +++ b/querier/src/table/query_access/mod.rs @@ -54,7 +54,7 @@ impl TableProvider for QuerierTable { let mut builder = ProviderBuilder::new( Arc::clone(self.table_name()), - Arc::clone(self.schema()), + self.schema().clone(), iox_ctx, ); @@ -108,7 +108,7 @@ impl ChunkPruner for QuerierTableChunkPruner { fn prune_chunks( &self, _table_name: &str, - table_schema: Arc<Schema>, + table_schema: &Schema, chunks: Vec<Arc<dyn QueryChunk>>, predicate: &Predicate, ) -> Result<Vec<Arc<dyn QueryChunk>>, ProviderError> { diff --git a/querier/src/table/test_util.rs b/querier/src/table/test_util.rs index e5cf1daedf..a20f5217a5 100644 --- a/querier/src/table/test_util.rs +++ b/querier/src/table/test_util.rs @@ -29,7 +29,7 @@ pub async fn querier_table(catalog: &Arc<TestCatalog>, table: &Arc<TestTable>) - .await .unwrap(); let schema = catalog_schema.tables.remove(&table.table.name).unwrap(); - let schema = Arc::new(Schema::try_from(schema).unwrap()); + let schema = Schema::try_from(schema).unwrap(); let namespace_name = Arc::from(table.namespace.namespace.name.as_str()); @@ -63,7 +63,7 @@ pub(crate) fn lp_to_record_batch(lp: &str) -> RecordBatch { /// Helper for creating IngesterPartitions #[derive(Debug, Clone)] pub(crate) struct IngesterPartitionBuilder { - schema: Arc<Schema>, + schema: Schema, shard: Arc<TestShard>, partition: Arc<TestPartition>, ingester_name: Arc<str>, @@ -77,12 +77,12 @@ pub(crate) struct IngesterPartitionBuilder { impl IngesterPartitionBuilder { pub(crate) fn new( - schema: &Arc<Schema>, + schema: Schema, shard: &Arc<TestShard>, partition: &Arc<TestPartition>, ) -> Self { Self { - schema: Arc::clone(schema), + schema, shard: Arc::clone(shard), partition: Arc::clone(partition), ingester_name: Arc::from("ingester1"), @@ -135,7 +135,7 @@ impl IngesterPartitionBuilder { ) .try_add_chunk( ChunkId::new_test(self.ingester_chunk_id), - Arc::clone(&self.schema), + self.schema.clone(), data, ) .unwrap() diff --git a/schema/src/interner.rs b/schema/src/interner.rs index c78f2604a3..c1895e6ea6 100644 --- a/schema/src/interner.rs +++ b/schema/src/interner.rs @@ -1,4 +1,4 @@ -use std::{collections::HashSet, sync::Arc}; +use std::collections::HashSet; use crate::Schema; @@ -10,7 +10,7 @@ use crate::Schema; /// [Interning]: https://en.wikipedia.org/wiki/Interning_(computer_science) #[derive(Debug, Default)] pub struct SchemaInterner { - schemas: HashSet<Arc<Schema>>, + schemas: HashSet<Schema>, } impl SchemaInterner { @@ -20,12 +20,11 @@ impl SchemaInterner { } /// Intern schema. - pub fn intern(&mut self, schema: Schema) -> Arc<Schema> { + pub fn intern(&mut self, schema: Schema) -> Schema { if let Some(schema) = self.schemas.get(&schema) { - Arc::clone(schema) + schema.clone() } else { - let schema = Arc::new(schema); - self.schemas.insert(Arc::clone(&schema)); + self.schemas.insert(schema.clone()); schema } } @@ -34,6 +33,7 @@ impl SchemaInterner { #[cfg(test)] mod tests { use crate::builder::SchemaBuilder; + use std::sync::Arc; use super::*; @@ -46,12 +46,12 @@ mod tests { let schema_2 = SchemaBuilder::new().tag("t1").tag("t3").build().unwrap(); let interned_1a = interner.intern(schema_1a.clone()); - assert_eq!(interned_1a.as_ref(), &schema_1a); + assert_eq!(interned_1a, schema_1a); let interned_1b = interner.intern(schema_1b); - assert!(Arc::ptr_eq(&interned_1a, &interned_1b)); + assert!(Arc::ptr_eq(interned_1a.inner(), interned_1b.inner())); let interned_2 = interner.intern(schema_2.clone()); - assert_eq!(interned_2.as_ref(), &schema_2); + assert_eq!(interned_2, schema_2); } } diff --git a/schema/src/merge.rs b/schema/src/merge.rs index 8b718da069..139258e941 100644 --- a/schema/src/merge.rs +++ b/schema/src/merge.rs @@ -44,7 +44,7 @@ pub type Result<T, E = Error> = std::result::Result<T, E>; /// This is infallable because the schemas of chunks within a /// partition are assumed to be compatible because that schema was /// enforced as part of writing into the partition -pub fn merge_record_batch_schemas(batches: &[Arc<RecordBatch>]) -> Arc<Schema> { +pub fn merge_record_batch_schemas(batches: &[Arc<RecordBatch>]) -> Schema { let mut merger = SchemaMerger::new(); for batch in batches { let schema = Schema::try_from(batch.schema()).expect("Schema conversion error"); @@ -154,7 +154,7 @@ impl<'a> SchemaMerger<'a> { } /// Returns the schema that was built, the columns are always sorted in lexicographic order - pub fn build(mut self) -> Arc<Schema> { + pub fn build(mut self) -> Schema { let schema = Schema::new_from_parts( self.measurement.take(), self.fields.drain().map(|x| x.1), @@ -165,7 +165,7 @@ impl<'a> SchemaMerger<'a> { if let Some(interner) = self.interner.as_mut() { interner.intern(schema) } else { - Arc::new(schema) + schema } } } @@ -198,8 +198,8 @@ mod tests { .unwrap() .build(); - assert_eq!(merged_schema.as_ref(), &schema1); - assert_eq!(merged_schema.as_ref(), &schema2); + assert_eq!(merged_schema, schema1); + assert_eq!(merged_schema, schema2); } #[test] @@ -239,11 +239,9 @@ mod tests { .sort_fields_by_name(); assert_eq!( - &expected_schema, - merged_schema.as_ref(), + expected_schema, merged_schema, "\nExpected:\n{:#?}\nActual:\n{:#?}", - expected_schema, - merged_schema + expected_schema, merged_schema ); } @@ -269,11 +267,9 @@ mod tests { .unwrap(); assert_eq!( - &expected_schema, - merged_schema.as_ref(), + expected_schema, merged_schema, "\nExpected:\n{:#?}\nActual:\n{:#?}", - expected_schema, - merged_schema + expected_schema, merged_schema ); } @@ -303,11 +299,9 @@ mod tests { .unwrap(); assert_eq!( - &expected_schema, - merged_schema.as_ref(), + expected_schema, merged_schema, "\nExpected:\n{:#?}\nActual:\n{:#?}", - expected_schema, - merged_schema + expected_schema, merged_schema ); } @@ -335,11 +329,9 @@ mod tests { .unwrap(); assert_eq!( - &expected_schema, - merged_schema.as_ref(), + expected_schema, merged_schema, "\nExpected:\n{:#?}\nActual:\n{:#?}", - expected_schema, - merged_schema + expected_schema, merged_schema ); } @@ -428,6 +420,9 @@ mod tests { .build(); assert_eq!(merged_schema_a, merged_schema_b); - assert!(Arc::ptr_eq(&merged_schema_a, &merged_schema_b)); + assert!(Arc::ptr_eq( + merged_schema_a.inner(), + merged_schema_b.inner() + )); } } diff --git a/service_grpc_influxrpc/src/expr.rs b/service_grpc_influxrpc/src/expr.rs index 7bab82bb08..e343e77f79 100644 --- a/service_grpc_influxrpc/src/expr.rs +++ b/service_grpc_influxrpc/src/expr.rs @@ -894,7 +894,7 @@ mod tests { use generated_types::node::Type as RPCNodeType; use predicate::{rpc_predicate::QueryNamespaceMeta, Predicate}; use schema::{Schema, SchemaBuilder}; - use std::{collections::BTreeSet, sync::Arc}; + use std::collections::BTreeSet; use test_helpers::assert_contains; use super::*; @@ -915,7 +915,7 @@ mod tests { self.table_names.clone() } - fn table_schema(&self, table_name: &str) -> Option<Arc<Schema>> { + fn table_schema(&self, table_name: &str) -> Option<Schema> { match table_name { "foo" => { let schema = SchemaBuilder::new() @@ -929,7 +929,7 @@ mod tests { .build() .unwrap(); - Some(Arc::new(schema)) + Some(schema) } "bar" => { let schema = SchemaBuilder::new() @@ -939,7 +939,7 @@ mod tests { .build() .unwrap(); - Some(Arc::new(schema)) + Some(schema) } _ => None, }
50ab9afd5e7e3937a91432e807f4d2d6fab56f12
Marco Neumann
2023-08-22 15:30:47
add schema+batch serde for ingester->querier V2 (#8498)
* feat: `PartitionIdentifier` serde * fix: typo * refactor: use `Bytes` for V2 protocols * feat: add schema+batch serde for i->q V2 ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
feat: add schema+batch serde for ingester->querier V2 (#8498) * feat: `PartitionIdentifier` serde * fix: typo * refactor: use `Bytes` for V2 protocols * feat: add schema+batch serde for i->q V2 --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/Cargo.lock b/Cargo.lock index 0317f355ca..7a0e57ea9a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2713,10 +2713,13 @@ dependencies = [ name = "ingester_query_grpc" version = "0.1.0" dependencies = [ + "arrow", "base64 0.21.2", + "bytes", "data_types", "datafusion", "datafusion-proto", + "flatbuffers", "pbjson", "pbjson-build", "predicate", diff --git a/ingester_query_grpc/Cargo.toml b/ingester_query_grpc/Cargo.toml index 5b097924ba..021c6b34a3 100644 --- a/ingester_query_grpc/Cargo.toml +++ b/ingester_query_grpc/Cargo.toml @@ -6,10 +6,13 @@ edition.workspace = true license.workspace = true [dependencies] # In alphabetical order +arrow = { workspace = true, features = ["prettyprint", "dyn_cmp_dict"] } +bytes = "1.4" base64 = "0.21" data_types = { path = "../data_types" } datafusion = { workspace = true } datafusion-proto = { workspace = true } +flatbuffers = "23.5.26" pbjson = "0.5" predicate = { path = "../predicate" } prost = "0.11" @@ -23,4 +26,3 @@ workspace-hack = { version = "0.1", path = "../workspace-hack" } tonic-build = { workspace = true } prost-build = "0.11" pbjson-build = "0.5" - diff --git a/ingester_query_grpc/build.rs b/ingester_query_grpc/build.rs index 729d21a4bc..34e925fda1 100644 --- a/ingester_query_grpc/build.rs +++ b/ingester_query_grpc/build.rs @@ -41,7 +41,8 @@ fn generate_grpc_types(root: &Path) -> Result<()> { .extern_path(".google.protobuf", "::pbjson_types") .btree_map([ ".influxdata.iox.ingester.v1.IngesterQueryResponseMetadata.unpersisted_partitions", - ]); + ]) + .bytes([".influxdata.iox.ingester.v2"]); let descriptor_path = PathBuf::from(env::var("OUT_DIR").unwrap()).join("proto_descriptor.bin"); tonic_build::configure() diff --git a/ingester_query_grpc/protos/influxdata/iox/ingester/v2/query.proto b/ingester_query_grpc/protos/influxdata/iox/ingester/v2/query.proto index 8671c3a853..01babb8c8e 100644 --- a/ingester_query_grpc/protos/influxdata/iox/ingester/v2/query.proto +++ b/ingester_query_grpc/protos/influxdata/iox/ingester/v2/query.proto @@ -36,6 +36,24 @@ message Filters { repeated bytes exprs = 1; } +// Arrow encoded data. +message EncodedData { + // Data that describes the arrow payload. + bytes ipc_message = 1; + + // The actual arrow payload itself. + bytes arrow_data = 2; +} + +// An encoded Arrow RecordBatch w/o schema information. +message RecordBatch { + // Dictionary data. + repeated EncodedData dictionaries = 1; + + // Record batch itself. + EncodedData batch = 2; +} + message QueryRequest { // Namespace to search int64 namespace_id = 1; @@ -50,7 +68,7 @@ message QueryRequest { Filters filters = 4; } -message IngesterQueryReponseMetadata { +message IngesterQueryResponseMetadata { message Partition { // Partition ID. PartitionIdentifier id = 1; @@ -81,7 +99,7 @@ message IngesterQueryReponseMetadata { repeated Partition partitions = 4; } -message IngesterQueryReponsePayload { +message IngesterQueryResponsePayload { // Partition ID. PartitionIdentifier partition_id = 1; @@ -94,16 +112,16 @@ message IngesterQueryReponsePayload { repeated int64 projection = 2; // Serialized RecordBatch (w/o schema) - bytes record_batch = 3; + RecordBatch record_batch = 3; } message QueryResponse { oneof msg { // Metadata, this is ALWAYS the first message (even when there are no further messages) and MUST NOT be repeated. - IngesterQueryReponseMetadata metadata = 1; + IngesterQueryResponseMetadata metadata = 1; // Payload, following the first message. - IngesterQueryReponsePayload payload = 2; + IngesterQueryResponsePayload payload = 2; } } diff --git a/ingester_query_grpc/src/arrow_serde.rs b/ingester_query_grpc/src/arrow_serde.rs new file mode 100644 index 0000000000..8a6efcd9fa --- /dev/null +++ b/ingester_query_grpc/src/arrow_serde.rs @@ -0,0 +1,450 @@ +//! (De-)Serialization of Apache Arrow [`Schema`] and [`RecordBatch`] data. +//! +//! **⚠️ These routines are IOx-specific and MUST NOT be used as a public interface!** +//! +//! Specifically this is a custom protocol, similar to but not derived from Arrow Flight. +//! See <https://github.com/influxdata/influxdb_iox/issues/8169>. +use std::{collections::HashMap, sync::Arc}; + +use arrow::{ + buffer::Buffer, + datatypes::{DataType, Field, FieldRef, Schema, SchemaRef}, + error::ArrowError, + ipc::{ + convert::fb_to_schema, + reader::{read_dictionary, read_record_batch}, + root_as_message, + writer::{DictionaryTracker, EncodedData, IpcDataGenerator, IpcWriteOptions}, + }, + record_batch::{RecordBatch, RecordBatchOptions}, +}; +use bytes::Bytes; +use flatbuffers::InvalidFlatbuffer; +use snafu::{ensure, OptionExt, ResultExt, Snafu}; + +use crate::influxdata::iox::ingester::v2 as proto2; + +/// Serialize [`Schema`] to [`Bytes`]. +pub fn schema_to_bytes(schema: &Schema) -> Bytes { + let EncodedData { + ipc_message, + arrow_data, + } = IpcDataGenerator::default().schema_to_bytes(schema, &write_options()); + assert!( + arrow_data.is_empty(), + "arrow_data should always be empty for schema messages" + ); + ipc_message.into() +} + +#[derive(Debug, Snafu)] +#[snafu(module)] +pub enum BytesToSchemaError { + #[snafu(display("Unable to get root as message: {source}"))] + RootAsMessage { source: InvalidFlatbuffer }, + + #[snafu(display("Unable to read IPC message as schema, is: {variant}"))] + WrongMessageType { variant: &'static str }, +} + +/// Read [`Schema`] from bytes. +pub fn bytes_to_schema(data: &[u8]) -> Result<Schema, BytesToSchemaError> { + let message = root_as_message(data).context(bytes_to_schema_error::RootAsMessageSnafu)?; + let ipc_schema = + message + .header_as_schema() + .context(bytes_to_schema_error::WrongMessageTypeSnafu { + variant: message.header_type().variant_name().unwrap_or("<UNKNOWN>"), + })?; + let schema = fb_to_schema(ipc_schema); + Ok(schema) +} + +/// Encoder to read/write Arrow [`RecordBatch`]es from/to [`proto2::RecordBatch`]. +#[derive(Debug)] +pub struct BatchEncoder { + /// The original batch schema. + batch_schema: SchemaRef, + + /// Schema with unique dictionary IDs. + dict_schema: SchemaRef, +} + +#[derive(Debug, Snafu)] +#[snafu(module)] +pub enum ProjectError { + #[snafu(display("Cannot project: {source}"))] + CannotProject { source: ArrowError }, +} + +#[derive(Debug, Snafu)] +#[snafu(module)] +pub enum WriteError { + #[snafu(display("Invalid batch schema\n\nActual:\n{actual}\n\nExpected:\n{expected}"))] + InvalidSchema { + actual: SchemaRef, + expected: SchemaRef, + }, +} + +#[derive(Debug, Snafu)] +#[snafu(module)] +pub enum ReadError { + #[snafu(display("Unable to get root as dictionary message #{idx} (0-based): {source}"))] + DictionaryRootAsMessage { + source: InvalidFlatbuffer, + idx: usize, + }, + + #[snafu(display("Unable to read IPC message #{idx} (0-based) as dictionary, is: {variant}"))] + DictionaryWrongMessageType { variant: &'static str, idx: usize }, + + #[snafu(display("Cannot read dictionary: {source}"))] + ReadDictionary { source: ArrowError }, + + #[snafu(display("Record batch is required but missing"))] + RecordBatchRequired, + + #[snafu(display("Unable to get root as record batch message: {source}"))] + RecordBatchRootAsMessage { source: InvalidFlatbuffer }, + + #[snafu(display("Unable to read IPC message as record batch, is: {variant}"))] + RecordBatchWrongMessageType { variant: &'static str }, + + #[snafu(display("Cannot read record batch: {source}"))] + ReadRecordBatch { source: ArrowError }, +} + +impl BatchEncoder { + /// Create new encoder. + /// + /// For schemas that contain dictionaries, this involves copying data and may be rather costly. If you can, try to + /// only do this once and use [`project`](Self::project) to select the right columns for the appropriate batch. + pub fn new(batch_schema: SchemaRef) -> Self { + let mut dict_id_counter = 0; + let dict_schema = Arc::new(Schema::new_with_metadata( + batch_schema + .fields() + .iter() + .map(|f| assign_dict_ids(f, &mut dict_id_counter)) + .collect::<Vec<_>>(), + batch_schema.metadata().clone(), + )); + Self { + batch_schema, + dict_schema, + } + } + + /// Project schema stored within this encoder. + pub fn project(&self, indices: &[usize]) -> Result<Self, ProjectError> { + Ok(Self { + batch_schema: Arc::new( + self.batch_schema + .project(indices) + .context(project_error::CannotProjectSnafu)?, + ), + dict_schema: Arc::new( + self.dict_schema + .project(indices) + .context(project_error::CannotProjectSnafu)?, + ), + }) + } + + /// Serialize batch. + pub fn write(&self, batch: &RecordBatch) -> Result<proto2::RecordBatch, WriteError> { + ensure!( + batch.schema() == self.batch_schema, + write_error::InvalidSchemaSnafu { + actual: batch.schema(), + expected: Arc::clone(&self.batch_schema), + } + ); + + let batch = reassign_schema(batch, Arc::clone(&self.dict_schema)); + + let mut dictionary_tracker = DictionaryTracker::new(true); + let (dictionaries, batch) = IpcDataGenerator::default() + .encoded_batch(&batch, &mut dictionary_tracker, &write_options()) + .expect("serialization w/o compression should NEVER fail"); + + Ok(proto2::RecordBatch { + dictionaries: dictionaries.into_iter().map(|enc| enc.into()).collect(), + batch: Some(batch.into()), + }) + } + + /// Deserialize batch. + pub fn read(&self, batch: proto2::RecordBatch) -> Result<RecordBatch, ReadError> { + let proto2::RecordBatch { + dictionaries, + batch, + } = batch; + + let mut dictionaries_by_field = HashMap::with_capacity(dictionaries.len()); + for (idx, enc) in dictionaries.into_iter().enumerate() { + let proto2::EncodedData { + ipc_message, + arrow_data, + } = enc; + + let message = root_as_message(&ipc_message) + .context(read_error::DictionaryRootAsMessageSnafu { idx })?; + let dictionary_batch = message.header_as_dictionary_batch().context( + read_error::DictionaryWrongMessageTypeSnafu { + variant: message.header_type().variant_name().unwrap_or("<UNKNOWN>"), + idx, + }, + )?; + + read_dictionary( + &Buffer::from_vec(arrow_data.to_vec()), + dictionary_batch, + &self.dict_schema, + &mut dictionaries_by_field, + &message.version(), + ) + .context(read_error::ReadDictionarySnafu)?; + } + + let proto2::EncodedData { + ipc_message, + arrow_data, + } = batch.context(read_error::RecordBatchRequiredSnafu)?; + let message = + root_as_message(&ipc_message).context(read_error::RecordBatchRootAsMessageSnafu)?; + let record_batch = message.header_as_record_batch().context( + read_error::RecordBatchWrongMessageTypeSnafu { + variant: message.header_type().variant_name().unwrap_or("<UNKNOWN>"), + }, + )?; + + let batch = read_record_batch( + &Buffer::from_vec(arrow_data.to_vec()), + record_batch, + Arc::clone(&self.dict_schema), + &dictionaries_by_field, + None, + &message.version(), + ) + .context(read_error::ReadRecordBatchSnafu)?; + + Ok(reassign_schema(&batch, Arc::clone(&self.batch_schema))) + } +} + +/// Recursively assign unique dictionary IDs. +fn assign_dict_ids(field: &FieldRef, counter: &mut i64) -> FieldRef { + match field.data_type() { + DataType::Dictionary(_, _) => { + let dict_id = *counter; + *counter += 1; + Arc::new( + Field::new_dict( + field.name(), + field.data_type().clone(), + field.is_nullable(), + dict_id, + field.dict_is_ordered().expect("is dict type"), + ) + .with_metadata(field.metadata().clone()), + ) + } + DataType::Struct(fields) => { + let data_type = + DataType::Struct(fields.iter().map(|f| assign_dict_ids(f, counter)).collect()); + Arc::new(field.as_ref().clone().with_data_type(data_type)) + } + DataType::Union(fields, mode) => { + let data_type = DataType::Union( + fields + .iter() + .map(|(id, f)| (id, assign_dict_ids(f, counter))) + .collect(), + *mode, + ); + Arc::new(field.as_ref().clone().with_data_type(data_type)) + } + DataType::List(field) => { + let data_type = DataType::List(assign_dict_ids(field, counter)); + Arc::new(field.as_ref().clone().with_data_type(data_type)) + } + DataType::LargeList(field) => { + let data_type = DataType::LargeList(assign_dict_ids(field, counter)); + Arc::new(field.as_ref().clone().with_data_type(data_type)) + } + DataType::FixedSizeList(field, s) => { + let data_type = DataType::FixedSizeList(assign_dict_ids(field, counter), *s); + Arc::new(field.as_ref().clone().with_data_type(data_type)) + } + DataType::Map(field, sorted) => { + let data_type = DataType::Map(assign_dict_ids(field, counter), *sorted); + Arc::new(field.as_ref().clone().with_data_type(data_type)) + } + _ => Arc::clone(field), + } +} + +/// Re-assign schema to given batch. +/// +/// This is required to overwrite dictionary IDs. +fn reassign_schema(batch: &RecordBatch, schema: SchemaRef) -> RecordBatch { + RecordBatch::try_new_with_options( + schema, + batch.columns().to_vec(), + &RecordBatchOptions::default().with_row_count(Some(batch.num_rows())), + ) + .expect("re-assigning schema should always work") +} + +impl From<EncodedData> for proto2::EncodedData { + fn from(enc: EncodedData) -> Self { + let EncodedData { + ipc_message, + arrow_data, + } = enc; + + Self { + ipc_message: ipc_message.into(), + arrow_data: arrow_data.into(), + } + } +} + +/// Write options that are used for all relevant methods in this module. +fn write_options() -> IpcWriteOptions { + IpcWriteOptions::default() +} + +#[cfg(test)] +mod tests { + use std::{collections::HashMap, sync::Arc}; + + use arrow::{ + array::{ArrayRef, Int64Array, StringDictionaryBuilder}, + datatypes::Int32Type, + }; + use datafusion::{ + arrow::datatypes::{DataType, Field}, + common::assert_contains, + }; + + use super::*; + + #[test] + fn test_schema_roundtrip() { + let schema = schema(); + let bytes = schema_to_bytes(&schema); + + // ensure that the deserialization is NOT sensitive to alignment + const MAX_OFFSET: usize = 8; + let mut buffer = Vec::with_capacity(bytes.len() + MAX_OFFSET); + + for offset in 0..MAX_OFFSET { + buffer.clear(); + buffer.resize(offset, 0u8); + buffer.extend_from_slice(&bytes); + + let schema2 = bytes_to_schema(&buffer[offset..]).unwrap(); + + assert_eq!(schema, schema2); + } + } + + #[test] + fn test_record_batch_roundtrip() { + let schema = Arc::new(schema()); + let batch = RecordBatch::try_new( + Arc::clone(&schema), + vec![int64array(), dictarray1(), dictarray2(), dictarray1()], + ) + .unwrap(); + + let encoder = BatchEncoder::new(schema); + let encoded = encoder.write(&batch).unwrap(); + + // check that we actually use dictionaries and don't hydrate them + assert_eq!(encoded.dictionaries.len(), 3); + + let batch2 = encoder.read(encoded).unwrap(); + assert_eq!(batch, batch2); + } + + #[test] + fn test_write_checks_schema() { + let schema = Arc::new(schema()); + let batch = + RecordBatch::try_new(Arc::new(schema.project(&[0]).unwrap()), vec![int64array()]) + .unwrap(); + + let encoder = BatchEncoder::new(schema); + let err = encoder.write(&batch).unwrap_err(); + + assert_contains!(err.to_string(), "Invalid batch schema"); + } + + #[test] + fn test_project() { + let schema = Arc::new(schema()); + let batch = RecordBatch::try_new( + Arc::new(schema.project(&[0, 3, 2]).unwrap()), + vec![int64array(), dictarray1(), dictarray1()], + ) + .unwrap(); + + let encoder = BatchEncoder::new(schema).project(&[0, 3, 2]).unwrap(); + let encoded = encoder.write(&batch).unwrap(); + + // check that we actually use dictionaries and don't hydrate them + assert_eq!(encoded.dictionaries.len(), 2); + + let batch2 = encoder.read(encoded).unwrap(); + assert_eq!(batch, batch2); + } + + fn schema() -> Schema { + Schema::new_with_metadata( + vec![ + Field::new("f1", DataType::Int64, true) + .with_metadata(HashMap::from([("k".to_owned(), "v".to_owned())])), + Field::new( + "f2", + DataType::Dictionary(Box::new(DataType::Int32), Box::new(DataType::Utf8)), + false, + ), + Field::new( + "f3", + DataType::Dictionary(Box::new(DataType::Int32), Box::new(DataType::Utf8)), + false, + ), + Field::new( + "f4", + DataType::Dictionary(Box::new(DataType::Int32), Box::new(DataType::Utf8)), + false, + ), + ], + HashMap::from([("foo".to_owned(), "bar".to_owned())]), + ) + } + + fn int64array() -> ArrayRef { + Arc::new(Int64Array::from(vec![None, Some(1i64), Some(2i64)])) + } + + fn dictarray1() -> ArrayRef { + let mut builder = StringDictionaryBuilder::<Int32Type>::new(); + builder.append("foo").unwrap(); + builder.append("foo").unwrap(); + builder.append("bar").unwrap(); + Arc::new(builder.finish()) + } + + fn dictarray2() -> ArrayRef { + let mut builder = StringDictionaryBuilder::<Int32Type>::new(); + builder.append("fo").unwrap(); + builder.append("fo").unwrap(); + builder.append("ba").unwrap(); + Arc::new(builder.finish()) + } +} diff --git a/ingester_query_grpc/src/lib.rs b/ingester_query_grpc/src/lib.rs index 7718d2ddb6..ca761ad4a0 100644 --- a/ingester_query_grpc/src/lib.rs +++ b/ingester_query_grpc/src/lib.rs @@ -21,7 +21,9 @@ use workspace_hack as _; use crate::influxdata::iox::ingester::v1 as proto; use crate::influxdata::iox::ingester::v2 as proto2; use base64::{prelude::BASE64_STANDARD, Engine}; -use data_types::{NamespaceId, TableId, TimestampRange}; +use data_types::{ + NamespaceId, PartitionHashId, PartitionId, TableId, TimestampRange, TransitionPartitionId, +}; use datafusion::{common::DataFusionError, prelude::Expr}; use datafusion_proto::bytes::Serializeable; use predicate::{Predicate, ValueExpr}; @@ -57,6 +59,8 @@ pub mod influxdata { } } +pub mod arrow_serde; + /// Error returned if a request field has an invalid value. Includes /// machinery to add parent field names for context -- thus it will /// report `rules.write_timeout` than simply `write_timeout`. @@ -373,7 +377,6 @@ impl TryFrom<Vec<Expr>> for proto2::Filters { .iter() .map(|expr| { expr.to_bytes() - .map(|bytes| bytes.to_vec()) .map_err(|e| expr_to_bytes_violation("exprs", e)) }) .collect::<Result<Vec<_>, _>>()?; @@ -449,6 +452,50 @@ pub fn decode_proto2_filters_from_base64( proto2::Filters::decode(predicate_binary.as_slice()).context(ProtobufDecodeSnafu) } +impl TryFrom<proto2::PartitionIdentifier> for TransitionPartitionId { + type Error = FieldViolation; + + fn try_from(value: proto2::PartitionIdentifier) -> Result<Self, Self::Error> { + let proto2::PartitionIdentifier { + partition_identifier, + } = value; + let id = + partition_identifier.ok_or_else(|| FieldViolation::required("partition_identifier"))?; + let id = match id { + proto2::partition_identifier::PartitionIdentifier::CatalogId(id) => { + Self::Deprecated(PartitionId::new(id)) + } + proto2::partition_identifier::PartitionIdentifier::HashId(id) => { + Self::Deterministic(PartitionHashId::try_from(id.as_ref()).map_err(|e| { + FieldViolation { + field: "partition_identifier".to_owned(), + description: e.to_string(), + } + })?) + } + }; + Ok(id) + } +} + +impl From<TransitionPartitionId> for proto2::PartitionIdentifier { + fn from(id: TransitionPartitionId) -> Self { + let id = match id { + TransitionPartitionId::Deprecated(id) => { + proto2::partition_identifier::PartitionIdentifier::CatalogId(id.get()) + } + TransitionPartitionId::Deterministic(id) => { + proto2::partition_identifier::PartitionIdentifier::HashId( + id.as_bytes().to_vec().into(), + ) + } + }; + Self { + partition_identifier: Some(id), + } + } +} + #[cfg(test)] mod tests { use std::collections::BTreeSet;
9b211df053de85c97375e1b2bd378b82aa1e65c3
Dom Dwyer
2023-05-16 00:16:06
persist & persistence metrics
Adds a test that asserts (manually triggered) persistence generates a file, uploads it to object storage, inserts metadata into the catalog, and emits various persistence metrics.
null
test(ingester): persist & persistence metrics Adds a test that asserts (manually triggered) persistence generates a file, uploads it to object storage, inserts metadata into the catalog, and emits various persistence metrics.
diff --git a/ingester/tests/write.rs b/ingester/tests/write.rs index 930e433461..c046fa6ba9 100644 --- a/ingester/tests/write.rs +++ b/ingester/tests/write.rs @@ -1,11 +1,14 @@ use arrow_util::assert_batches_sorted_eq; use assert_matches::assert_matches; -use data_types::PartitionKey; +use data_types::{PartitionKey, TableId, Timestamp}; use ingester_query_grpc::influxdata::iox::ingester::v1::IngesterQueryRequest; -use ingester_test_ctx::TestContextBuilder; +use ingester_test_ctx::{TestContextBuilder, DEFAULT_MAX_PERSIST_QUEUE_DEPTH}; use iox_catalog::interface::Catalog; -use metric::{DurationHistogram, U64Histogram}; -use std::sync::Arc; +use metric::{ + assert_counter, assert_histogram, DurationHistogram, U64Counter, U64Gauge, U64Histogram, +}; +use parquet_file::ParquetFilePath; +use std::{sync::Arc, time::Duration}; // Write data to an ingester through the RPC interface and query the data, validating the contents. #[tokio::test] @@ -89,6 +92,161 @@ async fn write_query() { assert_eq!(hist.total, 2); } +// Write data to an ingester through the RPC interface and persist the data. +#[tokio::test] +async fn write_persist() { + let namespace_name = "write_query_test_namespace"; + let mut ctx = TestContextBuilder::default().build().await; + let ns = ctx.ensure_namespace(namespace_name, None).await; + + let partition_key = PartitionKey::from("1970-01-01"); + ctx.write_lp( + namespace_name, + r#"bananas count=42,greatness="inf" 200"#, + partition_key.clone(), + 42, + ) + .await; + + // Perform a query to validate the actual data buffered. + let table_id = ctx.table_id(namespace_name, "bananas").await.get(); + let data: Vec<_> = ctx + .query(IngesterQueryRequest { + namespace_id: ns.id.get(), + table_id, + columns: vec![], + predicate: None, + }) + .await + .expect("query request failed"); + + let expected = vec![ + "+-------+-----------+--------------------------------+", + "| count | greatness | time |", + "+-------+-----------+--------------------------------+", + "| 42.0 | inf | 1970-01-01T00:00:00.000000200Z |", + "+-------+-----------+--------------------------------+", + ]; + assert_batches_sorted_eq!(&expected, &data); + + // Persist the data. + ctx.persist(namespace_name).await; + + // Ensure the data is no longer buffered. + let data: Vec<_> = ctx + .query(IngesterQueryRequest { + namespace_id: ns.id.get(), + table_id, + columns: vec![], + predicate: None, + }) + .await + .expect("query request failed"); + assert!(data.is_empty()); + + // Validate the parquet file was added to the catalog + let parquet_files = ctx.catalog_parquet_file_records(namespace_name).await; + let (path, want_file_size) = assert_matches!(parquet_files.as_slice(), [f] => { + assert_eq!(f.namespace_id, ns.id); + assert_eq!(f.table_id, TableId::new(table_id)); + assert_eq!(f.min_time, Timestamp::new(200)); + assert_eq!(f.max_time, Timestamp::new(200)); + assert_eq!(f.to_delete, None); + assert_eq!(f.row_count, 1); + assert_eq!(f.column_set.len(), 3); + assert_eq!(f.max_l0_created_at, f.created_at); + + (ParquetFilePath::from(f), f.file_size_bytes) + }); + + // Validate the file exists at the expected object store path. + let file_size = ctx + .object_store() + .get(&path.object_store_path()) + .await + .expect("parquet file must exist in object store") + .bytes() + .await + .expect("failed to read parquet file bytes") + .len(); + assert_eq!(file_size, want_file_size as usize); + + // And that the persist metrics were recorded. + let metrics = ctx.metrics(); + + //////////////////////////////////////////////////////////////////////////// + // Config reflection metrics + assert_counter!( + metrics, + U64Gauge, + "ingester_persist_max_parallelism", + value = 5, + ); + + assert_counter!( + metrics, + U64Gauge, + "ingester_persist_max_queue_depth", + value = DEFAULT_MAX_PERSIST_QUEUE_DEPTH as u64, + ); + + //////////////////////////////////////////////////////////////////////////// + // Persist worker metrics + assert_histogram!( + metrics, + DurationHistogram, + "ingester_persist_active_duration", + samples = 1, + ); + + assert_histogram!( + metrics, + DurationHistogram, + "ingester_persist_enqueue_duration", + samples = 1, + ); + + assert_counter!( + metrics, + U64Counter, + "ingester_persist_enqueued_jobs", + value = 1, + ); + + //////////////////////////////////////////////////////////////////////////// + // Parquet file metrics + assert_histogram!( + metrics, + DurationHistogram, + "ingester_persist_parquet_file_time_range", + samples = 1, + sum = Duration::from_secs(0), + ); + + assert_histogram!( + metrics, + U64Histogram, + "ingester_persist_parquet_file_size_bytes", + samples = 1, + ); + + assert_histogram!( + metrics, + U64Histogram, + "ingester_persist_parquet_file_row_count", + samples = 1, + sum = 1, + ); + + assert_histogram!( + metrics, + U64Histogram, + "ingester_persist_parquet_file_column_count", + samples = 1, + sum = 3, + ); +} + // Write data to the ingester, which writes it to the WAL, then drop and recreate the WAL and // validate the data is replayed from the WAL into memory. #[tokio::test] diff --git a/ingester_test_ctx/src/lib.rs b/ingester_test_ctx/src/lib.rs index d3f7f68f44..5d7d290163 100644 --- a/ingester_test_ctx/src/lib.rs +++ b/ingester_test_ctx/src/lib.rs @@ -35,6 +35,7 @@ use iox_time::TimeProvider; use metric::{Attributes, Metric, MetricObserver}; use mutable_batch_lp::lines_to_batches; use mutable_batch_pb::encode::encode_write; +use object_store::ObjectStore; use observability_deps::tracing::*; use parquet_file::storage::ParquetStorage; use tempfile::TempDir; @@ -158,7 +159,7 @@ impl TestContextBuilder { shutdown_tx, _dir: dir, catalog, - _storage: storage, + storage, metrics, namespaces: Default::default(), } @@ -174,7 +175,7 @@ pub struct TestContext<T> { ingester: IngesterGuard<T>, shutdown_tx: oneshot::Sender<CancellationToken>, catalog: Arc<dyn Catalog>, - _storage: ParquetStorage, + storage: ParquetStorage, metrics: Arc<metric::Registry>, /// Once the last [`TempDir`] reference is dropped, the directory it @@ -415,6 +416,11 @@ where .recorder() } + /// Return the metric recorder for the [`TestContext`]. + pub fn metrics(&self) -> &metric::Registry { + &self.metrics + } + /// Retrieve the Parquet files in the catalog for the specified namespace. pub async fn catalog_parquet_file_records(&self, namespace: &str) -> Vec<ParquetFile> { let namespace_id = self.namespace_id(namespace).await; @@ -432,6 +438,11 @@ where Arc::clone(&self.catalog) } + /// Return the [`ObjectStore`] for this [`TestContext`]. + pub fn object_store(&self) -> Arc<dyn ObjectStore> { + Arc::clone(self.storage.object_store()) + } + /// Return the [`IngesterRpcInterface`] for this [`TestContext`]. /// /// Calls duration made through this interface measures the cost of the
f1fc498dbf8de4cfb5649fd3c38b02868a8ba0f4
praveen-influx
2025-02-20 16:03:30
updates to default memory settings (#26023)
- breaking change, replaced `--parquet-mem-cache-size-mb` and env var for it with `--parquet-mem-cache-size` (takes value now in percentage or MB), now defaults to 20% of total available memory - force snapshotting is set at 50% - datafusion mem pool is set to 20%
closes: https://github.com/influxdata/influxdb/issues/26009
feat: updates to default memory settings (#26023) - breaking change, replaced `--parquet-mem-cache-size-mb` and env var for it with `--parquet-mem-cache-size` (takes value now in percentage or MB), now defaults to 20% of total available memory - force snapshotting is set at 50% - datafusion mem pool is set to 20% closes: https://github.com/influxdata/influxdb/issues/26009
diff --git a/influxdb3/src/commands/common.rs b/influxdb3/src/commands/common.rs index b05c0f002d..062816fa6e 100644 --- a/influxdb3/src/commands/common.rs +++ b/influxdb3/src/commands/common.rs @@ -1,8 +1,9 @@ use clap::{Parser, ValueEnum}; +use observability_deps::tracing::warn; use secrecy::Secret; -use std::error::Error; use std::fmt::Display; use std::str::FromStr; +use std::{env, error::Error}; use url::Url; #[derive(Debug, Parser)] @@ -165,3 +166,14 @@ where .ok_or_else(|| format!("invalid FIELD:VALUE. No `:` found in `{s}`"))?; Ok((s[..pos].parse()?, s[pos + 1..].parse()?)) } + +/// If passed in env vars are set, then it writes to log along with what the user can switch to +pub fn warn_use_of_deprecated_env_vars(deprecated_vars: &[(&'static str, &'static str)]) { + deprecated_vars + .iter() + .for_each(|(deprecated_var, migration_msg)| { + if env::var(deprecated_var).is_ok() { + warn!("detected deprecated/removed env var {deprecated_var}, {migration_msg}"); + } + }); +} diff --git a/influxdb3/src/commands/serve.rs b/influxdb3/src/commands/serve.rs index f3d5e8e4b9..8ae8454957 100644 --- a/influxdb3/src/commands/serve.rs +++ b/influxdb3/src/commands/serve.rs @@ -61,6 +61,8 @@ use trace_exporters::TracingConfig; use trace_http::ctx::TraceHeaderParser; use trogging::cli::LoggingConfig; +use crate::commands::common::warn_use_of_deprecated_env_vars; + /// The default name of the influxdb data directory #[allow(dead_code)] pub const DEFAULT_DATA_DIRECTORY_NAME: &str = ".influxdb3"; @@ -108,6 +110,14 @@ pub enum Error { pub type Result<T, E = Error> = std::result::Result<T, E>; +// variable name and migration message tuples +const DEPRECATED_ENV_VARS: &[(&str, &str)] = &[( + "INFLUXDB3_PARQUET_MEM_CACHE_SIZE_MB", + "use INFLUXDB3_PARQUET_MEM_CACHE_SIZE instead, it is in MB or %", +)]; + +/// Try to keep all the memory size in MB instead of raw bytes, also allow +/// them to be configured as a percentage of total memory using MemorySizeMb #[derive(Debug, clap::Parser)] pub struct Config { /// object store options @@ -148,27 +158,16 @@ pub struct Config { )] pub http_bind_address: SocketAddr, - /// Size of the RAM cache used to store data in bytes. - /// - /// Can be given as absolute value or in percentage of the total available memory (e.g. `10%`). - #[clap( - long = "ram-pool-data-bytes", - env = "INFLUXDB3_RAM_POOL_DATA_BYTES", - default_value = "1073741824", // 1GB - action - )] - pub ram_pool_data_bytes: MemorySize, - - /// Size of memory pool used during query exec, in bytes. + /// Size of memory pool used during query exec, in megabytes. /// /// Can be given as absolute value or in percentage of the total available memory (e.g. `10%`). #[clap( - long = "exec-mem-pool-bytes", - env = "INFLUXDB3_EXEC_MEM_POOL_BYTES", - default_value = "8589934592", // 8GB - action + long = "exec-mem-pool-bytes", + env = "INFLUXDB3_EXEC_MEM_POOL_BYTES", + default_value = "20%", + action )] - pub exec_mem_pool_bytes: MemorySize, + pub exec_mem_pool_bytes: MemorySizeMb, /// bearer token to be set for requests #[clap(long = "bearer-token", env = "INFLUXDB3_BEARER_TOKEN", action)] @@ -238,16 +237,6 @@ pub struct Config { )] pub query_log_size: usize, - // TODO - make this default to 70% of available memory: - /// The size limit of the buffered data. If this limit is passed a snapshot will be forced. - #[clap( - long = "buffer-mem-limit-mb", - env = "INFLUXDB3_BUFFER_MEM_LIMIT_MB", - default_value = "5000", - action - )] - pub buffer_mem_limit_mb: usize, - /// The node idendifier used as a prefix in all object store file paths. This should be unique /// for any InfluxDB 3 Core servers that share the same object store configuration, i.e., the /// same bucket. @@ -260,14 +249,15 @@ pub struct Config { )] pub node_identifier_prefix: String, - /// The size of the in-memory Parquet cache in megabytes (MB). + /// The size of the in-memory Parquet cache in megabytes or percentage of total available mem. + /// breaking: removed parquet-mem-cache-size-mb and env var INFLUXDB3_PARQUET_MEM_CACHE_SIZE_MB #[clap( - long = "parquet-mem-cache-size-mb", - env = "INFLUXDB3_PARQUET_MEM_CACHE_SIZE_MB", - default_value = "1000", + long = "parquet-mem-cache-size", + env = "INFLUXDB3_PARQUET_MEM_CACHE_SIZE", + default_value = "20%", action )] - pub parquet_mem_cache_size: ParquetCacheSizeMb, + pub parquet_mem_cache_size: MemorySizeMb, /// The percentage of entries to prune during a prune operation on the in-memory Parquet cache. /// @@ -334,15 +324,15 @@ pub struct Config { #[clap(flatten)] pub processing_engine_config: ProcessingEngineConfig, - /// Threshold for internal buffer, can be either percentage or absolute value. - /// eg: 70% or 100000 + /// Threshold for internal buffer, can be either percentage or absolute value in MB. + /// eg: 70% or 1000 MB #[clap( long = "force-snapshot-mem-threshold", env = "INFLUXDB3_FORCE_SNAPSHOT_MEM_THRESHOLD", - default_value = "70%", + default_value = "50%", action )] - pub force_snapshot_mem_threshold: MemorySize, + pub force_snapshot_mem_threshold: MemorySizeMb, /// Disable sending telemetry data to telemetry.v3.influxdata.com. #[clap( @@ -376,22 +366,28 @@ pub struct Config { /// Specified size of the Parquet cache in megabytes (MB) #[derive(Debug, Clone, Copy)] -pub struct ParquetCacheSizeMb(usize); +pub struct MemorySizeMb(usize); -impl ParquetCacheSizeMb { +impl MemorySizeMb { /// Express this cache size in terms of bytes (B) fn as_num_bytes(&self) -> usize { - self.0 * 1_000 * 1_000 + self.0 } } -impl FromStr for ParquetCacheSizeMb { - type Err = anyhow::Error; +impl FromStr for MemorySizeMb { + type Err = String; fn from_str(s: &str) -> std::prelude::v1::Result<Self, Self::Err> { - s.parse() - .context("failed to parse parquet cache size value as an unsigned integer") - .map(Self) + let num_bytes = if s.contains("%") { + let mem_size = MemorySize::from_str(s)?; + mem_size.bytes() + } else { + let num_mb = usize::from_str(s) + .map_err(|_| "failed to parse value as unsigned integer".to_string())?; + num_mb * 1000 * 1000 + }; + Ok(Self(num_bytes)) } } @@ -446,6 +442,9 @@ pub async fn command(config: Config) -> Result<()> { ); debug!(%build_malloc_conf, "build configuration"); + // check if any env vars that are deprecated is still being passed around and warn + warn_use_of_deprecated_env_vars(DEPRECATED_ENV_VARS); + let metrics = setup_metric_registry(); // Install custom panic handler and forget about it. @@ -505,7 +504,7 @@ pub async fn command(config: Config) -> Result<()> { .map(|store| (store.id(), Arc::clone(store.object_store()))) .collect(), metric_registry: Arc::clone(&metrics), - mem_pool_size: config.exec_mem_pool_bytes.bytes(), + mem_pool_size: config.exec_mem_pool_bytes.as_num_bytes(), }, DedicatedExecutor::new( "datafusion", @@ -577,7 +576,7 @@ pub async fn command(config: Config) -> Result<()> { info!("setting up background mem check for query buffer"); background_buffer_checker( - config.force_snapshot_mem_threshold.bytes(), + config.force_snapshot_mem_threshold.as_num_bytes(), &write_buffer_impl, ) .await;
89d92077617ea5c5286df583d8c604b9b675f160
Brandon Pfeifer
2022-10-18 12:21:47
update to use scheduled pipeline (2.x) (#23811)
* chore: update to use scheduled pipeline * chore: add documentation to scheduled pipelines
null
chore: update to use scheduled pipeline (2.x) (#23811) * chore: update to use scheduled pipeline * chore: add documentation to scheduled pipelines
diff --git a/.circleci/config.yml b/.circleci/config.yml index 739b3f2fc5..5cd30667a0 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,6 +9,10 @@ parameters: type: string default: go1.18.6-9c97f2f2903566a00bd4b00184aeca0c813adda0 + workflow: + type: string + default: build + executors: cross-builder: docker: @@ -64,6 +68,8 @@ nofork_filter: &nofork_filter workflows: version: 2 build: + when: + equal: [ << pipeline.parameters.workflow >>, build ] jobs: - test-race: <<: *any_filter @@ -200,13 +206,17 @@ workflows: - aws-destroy-by-date nightly: - triggers: - - schedule: - cron: "0 5 * * *" - filters: - branches: - only: - - master + when: + and: + # This requires a pipeline trigger with a custom "workflow" parameter + # set to "nightly". Since we want to trigger this workflow on several + # branches, we cannot use the trigger name as suggested by the + # documentation. + # + # For more information: + # https://circleci.com/docs/scheduled-pipelines/ + - equal: [ << pipeline.trigger_source >>, scheduled_pipeline ] + - equal: [ << pipeline.parameters.workflow >>, nightly ] jobs: - changelog - s3-publish-changelog:
a3b662f30ba29e403c7f2ea84919fc45702a985d
Marco Neumann
2023-02-14 12:13:22
explain new physical plan construction (#6970)
* docs: explain new physical plan construction There is no code yet, but this is the rough plan. Ref #6098. * docs: clarify wording Co-authored-by: Andrew Lamb <[email protected]> * docs: typos Co-authored-by: Andrew Lamb <[email protected]> * docs: add links * docs: extend reasoning ---------
Co-authored-by: Andrew Lamb <[email protected]>
docs: explain new physical plan construction (#6970) * docs: explain new physical plan construction There is no code yet, but this is the rough plan. Ref #6098. * docs: clarify wording Co-authored-by: Andrew Lamb <[email protected]> * docs: typos Co-authored-by: Andrew Lamb <[email protected]> * docs: add links * docs: extend reasoning --------- Co-authored-by: Andrew Lamb <[email protected]>
diff --git a/docs/physical_plan_construction.md b/docs/physical_plan_construction.md new file mode 100644 index 0000000000..82b4f22c85 --- /dev/null +++ b/docs/physical_plan_construction.md @@ -0,0 +1,482 @@ +# IOx Physical Plan Construction +This document describes how DataFusion physical plans should be constructed in IOx. As a reminder: Our logical plans +contain a `ChunkTableProvider` (implements [`TableProvider`]) that contains a set of `QueryChunk`s. The main entry point +for DataFusion is [`TableProvider::scan`] which receives the following information: + +- context (unused) +- projection (optional, otherwise retrieve all column) +- filter expression +- limit (unused) + +We want to design a system that is: + +- correct (esp. it handles deduplication) +- efficient (esp. it should support [`TableProvider::supports_filter_pushdown`] = [`TableProviderFilterPushDown::Exact`]) +- scalable (esp. it should avoid large fan-outs) +- easy to understand and extend +- work hand-in-hand w/ DataFusion's optimizer framework + +We use the physical plan to fullfill the requirements instead of the logical planning system, because: + +- it allows the querier to create a logical plan w/o contacting the ingesters (helpful for debugging and UX) +- currently (2023-02-14) the physical plan in DataFusion seems to be way more customizable than the logical plan +- it expresses the "what? scan a table" (logical) vs "how? perform dedup on this data" (physical) better + +The overall strategy is the following: + +1. **Initial Plan:** Construct a semantically correct, naive physical plan. +2. **IOx Optimizer Passes:** Apply IOx optimizer passes (in the order in which they occur in this document)]. +3. **DataFusion Optimizer Passes:** Apply DataFusion's optimizer passes. This will esp. add all required sorts: + - before `DeduplicateExec` + - if output sorting is required (e.g. for SQL queries a la `SELECT ... FROM ... ORDER BY ...`) + - if any group-by expression within the DataFusion plan can use sorting + +This document uses [YAML] to illustrate the plans or plan transformations (in which case two plans are shown). Only the +relevant parameters are shown. + +We assume that `QueryChunk`s can be transformed into `RecordBatchesExec`/[`ParquetExec`] and that we can always recover +the original `QueryChunk`s from these nodes. We use `ChunkExec` as a placeholder for `RecordBatchesExec`/[`ParquetExec`] +if the concrete node type is irrelevant. + +## Initial Plan +The initial plan should be correct under [`TableProvider::supports_filter_pushdown`] = +[`TableProviderFilterPushDown::Exact`]. Hence it must capture the projection and filter parameters. + +```yaml +--- +ProjectionExec: # optional + FilterExec: + DeduplicateExec: + UnionExec: + - RecordBatchesExec + - ParquetExec: + store: A + # if there are multiple stores (unlikely) + - ParquetExec: + store: B +``` + +The created [`ParquetExec`] does NOT contain any predicates or projections. The files may be grouped into +[`target_partitions`] partitions. + +## Union Handling +There are some essential transformations around [`UnionExec`] that always apply. They may be used at any at point (either +as an extra optimizer rule or built into some of the other rules). They are mentioned here once so the remaining +transformations are easier to follow. + +### Union Un-nesting +```yaml +--- +UnionExec: + - UnionExec: + - SomeExec1 + - SomeExec2 + - SomeExec3 + +--- +UnionExec: + - SomeExec1 + - SomeExec2 + - SomeExec3 +``` + +### 1-Unions +```yaml +--- +UnionExec: + - SomeExec1 + +--- +SomeExec1 +``` + +## Empty Chunk Nodes +`RecordBatchesExec` w/o any `RecordBatch`es and [`ParquetExec`] w/o any files may just be removed from the plan if the +parent node is a [`UnionExec`]. + +## Deduplication Scope +Deduplication must only be performed if there are duplicate tuples (based on their primary key). This is the case if +either duplicates may occur within a chunk (e.g. freshly after ingest) or if the key space of chunks overlap. Since +deduplication potentially requires sorting and is a costly operation in itself, we may want to avoid it as good as +possible and also limit the scope (i.e. set of tuples) on which a deduplication acts on. + +### Partition Split +```yaml +--- +DeduplicateExec: + UnionExec: # optional, may only contain a single child node + - ChunkExec: + partition: A + - ChunkExec: + partition: B + - ChunkExec: + partition: A + +--- +Union: + - DeduplicateExec: + UnionExec: + - ChunkExec: + partition: A + - ChunkExec: + partition: A + - DeduplicateExec: + UnionExec: + - ChunkExec: + partition: B +``` + +### Time Split +From the `QueryChunk` statistics we always know the time ranges of a chunk. If chunks do NOT overlap in these ranges, +they also do NOT overlap in their key space. Hence we can use the time range to split `DeduplicateExec` nodes. + +```yaml +--- +DeduplicateExec: + UnionExec: # optional, may only contain a single child node + - ChunkExec: + ts_min: 1 + ts_max: 10 + - ChunkExec: + ts_min: 2 + ts_max: 5 + - ChunkExec: + ts_min: 5 + ts_max: 5 + - ChunkExec: + ts_min: 8 + ts_max: 9 + - ChunkExec: + ts_min: 11 + ts_max: 15 + - ChunkExec: + ts_min: 16 + ts_max: 17 + - ChunkExec: + ts_min: 17 + ts_max: 18 + +--- +Union: + - DeduplicateExec: + UnionExec: + - ChunkExec: + ts_min: 1 + ts_max: 10 + - ChunkExec: + ts_min: 2 + ts_max: 5 + - ChunkExec: + ts_min: 5 + ts_max: 5 + - ChunkExec: + ts_min: 8 + ts_max: 9 + - DeduplicateExec: + UnionExec: + - ChunkExec: + ts_min: 11 + ts_max: 15 + - DeduplicateExec: + UnionExec: + - ChunkExec: + ts_min: 16 + ts_max: 17 + - ChunkExec: + ts_min: 17 + ts_max: 18 +``` + +### No Duplicates +If a `DeduplicateExec` has a single child node and that node does NOT contain any duplicates (based on the primary key), +then we can remove the `DeduplicateExec`: + +```yaml +--- +DeduplicateExec: + ChunkExec + may_contain_pk_duplicates: false + +--- +ChunkExec + may_contain_pk_duplicates: false +``` + +## Node Grouping +After the deduplication handling, chunks may or may not be contained in singular exec nodes. These transformations try +to reorganize them in a way that query execution is efficient. + +### Type Grouping +`RecordBatchesExec`s can be grouped into a single node. [`ParquetExec`]s can be grouped by object store (this is a single) +store in most cases: + +```yaml +--- +UnionExec: + - RecordBatchesExec: + chunks: [C1, C2] + - ParquetExec: + chunks: [C4] + store: A + - ParquetExec: + chunks: [C5] + store: B + - RecordBatchesExec + chunks: [C6] + - ParquetExec: + chunks: [C7] + store: A + +--- +UnionExec: + - RecordBatchesExec: + chunks: [C1, C2, C6] + - ParquetExec: + chunks: [C4, C7] + store: A + - ParquetExec: + chunks: [C5] + store: B +``` + +### Sort Grouping +Since DataFusion will insert the necessary [`SortExec`]s for us, it is important that we are able to tell it about already +sorted data. This only concerns [`ParquetExec`], since `RecordBatchesExec` are based on not-yet-sorted ingester data. +[`ParquetExec`] is able to express its sorting per partition, so we are NOT required a [`ParquetExec`] per existing sorting. +We just need to make sure that files/chunks with the same sorting end up in the same partition and that we make sure +that DataFusion knows about it ([`FileScanConfig::output_ordering`]). + +This somewhat interferes with the [`target_partitions`] setting. We shall find a good balance between avoiding resorts and +"too wide" fan-outs. + +## Predicate Pushdown +We may push down filters closer to the source under certain circumstances. + +### Predicates & Unions +[`FilterExec`] can always be pushed through [`UnionExec`], since [`FilterExec`] only allows row-based operations: + +```yaml +--- +FilterExec: + UnionExec: + - SomeExec1 + - SomeExec2 + +--- +UnionExec: + - FilterExec: + SomeExec1 + - FilterExec: + SomeExec2 +``` + +### Predicates & Projections +With the current "Initial Plan" and rule ordering, it should not be required to push predicates through projections. The +opposite however is the case, see "Projections & Predicates". + +Note that we could add a transformation implementing this if we ever require it. + +### Predicates & Dedup +[`FilterExec`]s contain a [`PhysicalExpr`]. If this is a AND-chain / logical conjunction, we can split it into +sub-expressions (otherwise we treat the whole expression as a single sub-expression). For each sub-expression, we can +tell which columns it uses. If it refers to only primary-key columns (i.e. no references to fields), we can push it through +`DeduplicateExec`: + +```yaml +--- +FilterExec: + expr: (field = 1) AND (field=2 OR tag=3) AND (tag > 0) + child: + DeduplicateExec: + SomeExec + +--- +FilterExec: + expr: (field = 1) AND (field=2 OR tag=3) + child: + DeduplicateExec: + FilterExec: + expr: tag > 0 + child: + SomeExec +``` + +Note that empty filters are removed during this process: + +```yaml +--- +FilterExec: + expr: tag > 0 + child: + DeduplicateExec: + SomeExec + +--- +DeduplicateExec: + FilterExec: + expr: tag > 0 + child: + SomeExec +``` + +### Predicates & Parquet +Predicates can be pushed down into [`ParquetExec`] and are partially evaluated there (depending on various other configs +and the complexity of the filter). Note that the [`ParquetExec`] itself decides when/how to evaluate predicates, so we are +note required to perform any predicate manipulation here: + +```yaml +--- +FilterExec: + expr: (tag1 > 0) AND (some_fun(tag2) = 2) + child: + DeduplicateExec: + ParquetExec: + files: ... + +--- +FilterExec: + expr: (tag1 > 0) AND (some_fun(tag2) = 2) + child: + DeduplicateExec: + ParquetExec: + predicate: (tag1 > 0) AND (some_fun(tag2) = 2) + files: ... +``` + +### Predicates & Record Batches +`RecordBatchesExec` does not have any filter mechanism built in and hence relies on [`FilterExec`] to evaluate predicates. +We therefore do NOT push down any predicates into `RecordBatchesExec`. + +## Projection Pushdown +This concerns the pushdown of columns selections only. Note that [`ProjectionExec`] may contain renaming columns or even +the calculation of new ones; these are NOT part of this rule and are never generated by the "Initial Plan". + +### Projections & Unions +Projections can always be pushed through union operations since they are only column-based and all union inputs are +required to have the same schema: + +```yaml +--- +ProjectionExec: + keep: tag1, field2, time + child: + UnionExec: + - SomeExec1 + - SomeExec2 + +--- +UnionExec: + - ProjectionExec: + keep: tag1, field2, time + child: + SomeExec1 + - ProjectionExec: + keep: tag1, field2, time + child: + SomeExec2 +``` + +### Projections & Predicates +Projections may be pushed through [`FilterExec`] if they keep all columns required to evaluate the filter expression: + +```yaml +--- +ProjectionExec: + keep: tag1, field2, time + child: + FilterExec: + predicate: field3 > 0 + child: + SomeExec + +--- +ProjectionExec: + keep: tag1, field2, time + child: + FilterExec: + predicate: field3 > 0 + child: + ProjectionExec: + keep: tag1, field2, field3, time + child: + SomeExec +``` + +### Projections & Dedup +Projections that do NOT remove primary keys can be pushed through the deduplication. This is also compatible with the +[`SortExec`]s added by DataFusion, since these will only act on the primary keys: + +```yaml +--- +ProjectionExec: + keep: tag1, field2, time + child: + DeduplicateExec: + # We assume a primary key of [tag1, tag2, time] here, + # but `SomeExec` may have more fields (e.g. [field1, field2]). + SomeExec + +--- +ProjectionExec: + keep: tag1, field2, time + child: + DeduplicateExec: + ProjectionExec: + keep: tag1, tag2, field2, time + child: + SomeExec +``` + +### Projections & Parquet +[`ParquetExec`] can be instructed to only deserialize required columns via [`FileScanConfig::projection`]. Note that we +shall not modify [`FileScanConfig::file_schema`] because we MUST NOT remove columns that are used for pushdown predicates. + +```yaml +--- +ProjectionExec: + keep: tag1, field2, time + child: + ParquetExec: + predicate: field1 > 0 + projection: null + +--- +ParquetExec: + predicate: field1 > 0 + projection: tag1, field2, time +``` + +### Projections & Record Batches +While `RecordBatchesExec` does not implement any predicate evaluation, it implements projection (column selection). The +reason is that it creates NULL-columns for batches that do not contain the required output columns. Hence it is valuable +to push down projections into `RecordBatchesExec` so we can avoid creating columns that we would throw away anyways: + +```yaml +--- +ProjectionExec: + keep: tag1, field2, time + child: + RecordBatchesExec: + schema: tag1, tag2, field1, field2, time + +--- +RecordBatchesExec: + schema: tag1, field2, time +``` + + +[`FileScanConfig::file_schema`]: https://docs.rs/datafusion/18.0.0/datafusion/physical_plan/file_format/struct.FileScanConfig.html#structfield.file_schema +[`FileScanConfig::output_ordering`]: https://docs.rs/datafusion/18.0.0/datafusion/physical_plan/file_format/struct.FileScanConfig.html#structfield.output_ordering +[`FileScanConfig::projection`]: https://docs.rs/datafusion/18.0.0/datafusion/physical_plan/file_format/struct.FileScanConfig.html#structfield.projection +[`FilterExec`]: https://docs.rs/datafusion/18.0.0/datafusion/physical_plan/filter/struct.FilterExec.html +[`ParquetExec`]: https://docs.rs/datafusion/18.0.0/datafusion/physical_plan/file_format/struct.ParquetExec.html +[`PhysicalExpr`]:https://docs.rs/datafusion/18.0.0/datafusion/physical_plan/trait.PhysicalExpr.html +[`ProjectionExec`]: https://docs.rs/datafusion/18.0.0/datafusion/physical_plan/projection/struct.ProjectionExec.html +[`SortExec`]: https://docs.rs/datafusion/18.0.0/datafusion/physical_plan/sorts/sort/struct.SortExec.html +[`TableProvider`]: https://docs.rs/datafusion/18.0.0/datafusion/datasource/datasource/trait.TableProvider.html +[`TableProvider::scan`]: https://docs.rs/datafusion/18.0.0/datafusion/datasource/datasource/trait.TableProvider.html#tymethod.scan +[`TableProvider::supports_filter_pushdown`]: https://docs.rs/datafusion/18.0.0/datafusion/datasource/datasource/trait.TableProvider.html#method.supports_filter_pushdown +[`TableProviderFilterPushDown::Exact`]: https://docs.rs/datafusion/18.0.0/datafusion/datasource/datasource/enum.TableProviderFilterPushDown.html#variant.Exact +[`target_partitions`]: https://docs.rs/datafusion/18.0.0/datafusion/config/struct.ExecutionOptions.html#structfield.target_partitions +[`UnionExec`]: https://docs.rs/datafusion/18.0.0/datafusion/physical_plan/union/struct.UnionExec.html +[YAML]: https://yaml.org/
b76b75e91124d38878f8674e7d73a1c3879924db
Marco Neumann
2023-02-23 18:03:43
do not panic for unsupported expressions (#7052)
We see this in one of our prod clusters ATM.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
fix: do not panic for unsupported expressions (#7052) We see this in one of our prod clusters ATM. Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/iox_query/src/frontend/influxrpc.rs b/iox_query/src/frontend/influxrpc.rs index e7d32d01ce..8b41f92ad0 100644 --- a/iox_query/src/frontend/influxrpc.rs +++ b/iox_query/src/frontend/influxrpc.rs @@ -2028,7 +2028,8 @@ mod tests { // test 5: predicate on tag with field_columns without need_fields let predicate = Predicate::new() .with_expr(col("foo").eq(lit("some_thing"))) - .with_field_columns(vec!["i64_field".to_string()]); + .with_field_columns(vec!["i64_field".to_string()]) + .unwrap(); let need_fields = false; let mut projection = columns_in_predicates(need_fields, &schema, table, &predicate).unwrap(); @@ -2047,7 +2048,8 @@ mod tests { // test 7: predicate on tag and field with field_columns without need_fields let predicate = Predicate::new() .with_expr(col("bar").eq(lit(1)).and(col("i64_field").eq(lit(1)))) - .with_field_columns(vec!["i64_field".to_string()]); + .with_field_columns(vec!["i64_field".to_string()]) + .unwrap(); let need_fields = false; let mut projection = columns_in_predicates(need_fields, &schema, table, &predicate).unwrap(); @@ -2307,7 +2309,8 @@ mod tests { let expr = col("bar").eq(lit(10)); let predicate = Predicate::new() .with_expr(expr) - .with_field_columns(vec!["i64_field".to_string()]); + .with_field_columns(vec!["i64_field".to_string()]) + .unwrap(); let table_predicates = vec![(Arc::from("h2o"), predicate)]; let test_db = Arc::new(TestDatabase::new(Arc::clone(&executor))); @@ -2511,6 +2514,7 @@ mod tests { let got_predicate = test_db.get_chunks_predicate(); let exp_predicate = Predicate::new() .with_field_columns(vec!["foo.bar"]) + .unwrap() .with_value_expr( "_value" .as_expr() diff --git a/predicate/src/lib.rs b/predicate/src/lib.rs index d68208d059..4d3ac4c265 100644 --- a/predicate/src/lib.rs +++ b/predicate/src/lib.rs @@ -478,12 +478,12 @@ impl Predicate { pub fn with_field_columns( mut self, columns: impl IntoIterator<Item = impl Into<String>>, - ) -> Self { + ) -> Result<Self, &'static str> { // We need to distinguish predicates like `column_name In // (foo, bar)` and `column_name = foo and column_name = bar` in order to handle // this if self.field_columns.is_some() { - unimplemented!("Complex/Multi field predicates are not yet supported"); + return Err("Complex/Multi field predicates are not yet supported"); } let column_names = columns @@ -492,7 +492,7 @@ impl Predicate { .collect::<BTreeSet<_>>(); self.field_columns = Some(column_names); - self + Ok(self) } /// Adds all expressions to the list of general purpose predicates @@ -703,7 +703,8 @@ mod tests { let p = Predicate::new() .with_range(1, 100) .with_expr(col("foo").eq(lit(42))) - .with_field_columns(vec!["f1", "f2"]); + .with_field_columns(vec!["f1", "f2"]) + .unwrap(); assert_eq!( p.to_string(), @@ -711,6 +712,20 @@ mod tests { ); } + #[test] + fn predicate_multi_field_cols_not_supported() { + let err = Predicate::new() + .with_field_columns(vec!["f1", "f2"]) + .unwrap() + .with_field_columns(vec!["f1", "f2"]) + .unwrap_err(); + + assert_eq!( + err.to_string(), + "Complex/Multi field predicates are not yet supported" + ); + } + #[test] fn test_clear_timestamp_if_max_range_out_of_range() { let p = Predicate::new() diff --git a/predicate/src/rpc_predicate.rs b/predicate/src/rpc_predicate.rs index e2b124ca0f..849906c61d 100644 --- a/predicate/src/rpc_predicate.rs +++ b/predicate/src/rpc_predicate.rs @@ -372,7 +372,7 @@ mod tests { ) .unwrap(); - let expected = Predicate::new().with_field_columns(vec!["f1"]); + let expected = Predicate::new().with_field_columns(vec!["f1"]).unwrap(); assert_eq!(predicate, expected); } @@ -387,7 +387,9 @@ mod tests { ) .unwrap(); - let expected = Predicate::new().with_field_columns(vec!["f1", "f2"]); + let expected = Predicate::new() + .with_field_columns(vec!["f1", "f2"]) + .unwrap(); assert_eq!(predicate, expected); } @@ -401,7 +403,9 @@ mod tests { ) .unwrap(); - let expected = Predicate::new().with_field_columns(vec![] as Vec<String>); + let expected = Predicate::new() + .with_field_columns(vec![] as Vec<String>) + .unwrap(); assert_eq!(&expected.field_columns, &Some(BTreeSet::new())); assert_eq!(predicate, expected); } @@ -449,7 +453,7 @@ mod tests { ) .unwrap(); - let expected = Predicate::new().with_field_columns(vec!["f2"]); + let expected = Predicate::new().with_field_columns(vec!["f2"]).unwrap(); assert_eq!(predicate, expected); } @@ -466,7 +470,7 @@ mod tests { ) .unwrap(); - let expected = Predicate::new().with_field_columns(vec!["f1"]); + let expected = Predicate::new().with_field_columns(vec!["f1"]).unwrap(); assert_eq!(predicate, expected); } diff --git a/predicate/src/rpc_predicate/field_rewrite.rs b/predicate/src/rpc_predicate/field_rewrite.rs index a1562ef827..0d26c27938 100644 --- a/predicate/src/rpc_predicate/field_rewrite.rs +++ b/predicate/src/rpc_predicate/field_rewrite.rs @@ -204,7 +204,7 @@ impl FieldProjectionRewriter { } }); - Ok(predicate.with_field_columns(new_fields)) + Ok(predicate.with_field_columns(new_fields).unwrap()) } } diff --git a/service_grpc_influxrpc/src/expr.rs b/service_grpc_influxrpc/src/expr.rs index 02b21fbd6f..39faff93d6 100644 --- a/service_grpc_influxrpc/src/expr.rs +++ b/service_grpc_influxrpc/src/expr.rs @@ -110,6 +110,11 @@ pub enum Error { tag_name: String, source: DataFusionError, }, + + #[snafu(display("Field columns not supported: {}", source))] + FieldColumnsNotSupported { + source: Box<dyn std::error::Error + Send + Sync>, + }, } pub type Result<T, E = Error> = std::result::Result<T, E>; @@ -309,7 +314,11 @@ fn convert_simple_node( return Ok(builder.tables(value_list)); } Ok(DecodedTagKey::Field) => { - builder.inner = builder.inner.with_field_columns(value_list); + builder.inner = builder + .inner + .with_field_columns(value_list) + .map_err(|e| Box::<dyn std::error::Error + Send + Sync>::from(e.to_owned())) + .context(FieldColumnsNotSupportedSnafu)?; return Ok(builder); } _ => {} @@ -1072,7 +1081,7 @@ mod tests { // predicate is rewritten to true (which is simplified to an // empty expr), and projection is added - let expected = Predicate::new().with_field_columns(vec!["foo"]); + let expected = Predicate::new().with_field_columns(vec!["foo"]).unwrap(); assert_eq!( predicate, expected, diff --git a/service_grpc_influxrpc/src/service.rs b/service_grpc_influxrpc/src/service.rs index 86d017382d..81895a0405 100644 --- a/service_grpc_influxrpc/src/service.rs +++ b/service_grpc_influxrpc/src/service.rs @@ -210,6 +210,14 @@ impl Error { | Self::FilteringSeries { source, .. } | Self::GroupingSeries { source, .. } | Self::ListingTagValues { source, .. } => datafusion_error_to_tonic_code(&source), + Self::ConvertingPredicate { source, .. } + | Self::ConvertingReadGroupType { source, .. } + | Self::ConvertingReadGroupAggregate { source, .. } + | Self::ConvertingWindowAggregate { source, .. } + if matches!(source, super::expr::Error::FieldColumnsNotSupported { .. }) => + { + tonic::Code::Unimplemented + } Self::ConvertingPredicate { .. } | Self::ConvertingReadGroupAggregate { .. } | Self::ConvertingReadGroupType { .. }
184565b552cba493154a5c343cb40b9a56776d1b
Andrew Lamb
2023-03-20 20:34:18
Implement FlightSQL `GetSqlInfo` endpoint (#7198)
* feat(flightsql): Implement GetSqlInfo endpoint * chore: Add some comments to clarify the tests intent
null
feat(flightsql): Implement FlightSQL `GetSqlInfo` endpoint (#7198) * feat(flightsql): Implement GetSqlInfo endpoint * chore: Add some comments to clarify the tests intent
diff --git a/Cargo.lock b/Cargo.lock index 6a4909737d..2ad7cecca3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1889,6 +1889,7 @@ dependencies = [ "futures", "iox_query", "observability_deps", + "once_cell", "prost", "snafu", "tokio", diff --git a/flightsql/Cargo.toml b/flightsql/Cargo.toml index 65c5ce7731..1aa7151acc 100644 --- a/flightsql/Cargo.toml +++ b/flightsql/Cargo.toml @@ -16,6 +16,7 @@ iox_query = { path = "../iox_query" } bytes = "1.4" futures = "0.3" snafu = "0.7" +once_cell = { version = "1", default-features = false } prost = "0.11" tokio = { version = "1.26", features = ["macros", "net", "parking_lot", "rt-multi-thread", "signal", "sync", "time"] } tonic = "0.8" diff --git a/flightsql/src/cmd.rs b/flightsql/src/cmd.rs index da20d7e2f7..886e1d16f1 100644 --- a/flightsql/src/cmd.rs +++ b/flightsql/src/cmd.rs @@ -4,8 +4,8 @@ use std::fmt::Display; use arrow_flight::sql::{ ActionClosePreparedStatementRequest, ActionCreatePreparedStatementRequest, Any, - CommandGetCatalogs, CommandGetDbSchemas, CommandGetTableTypes, CommandGetTables, - CommandPreparedStatementQuery, CommandStatementQuery, + CommandGetCatalogs, CommandGetDbSchemas, CommandGetSqlInfo, CommandGetTableTypes, + CommandGetTables, CommandPreparedStatementQuery, CommandStatementQuery, }; use bytes::Bytes; use prost::Message; @@ -67,9 +67,12 @@ impl From<PreparedStatementHandle> for Bytes { /// <https://github.com/apache/arrow-rs/issues/3874> #[derive(Debug, Clone, PartialEq)] pub enum FlightSQLCommand { + /// Run a normal query CommandStatementQuery(CommandStatementQuery), /// Run a prepared statement. CommandPreparedStatementQuery(PreparedStatementHandle), + /// Get information about the SQL supported + CommandGetSqlInfo(CommandGetSqlInfo), /// Get a list of the available catalogs. See [`CommandGetCatalogs`] for details. CommandGetCatalogs(CommandGetCatalogs), /// Get a list of the available schemas. See [`CommandGetDbSchemas`] @@ -92,6 +95,9 @@ impl Display for FlightSQLCommand { write!(f, "CommandStatementQuery{query}") } Self::CommandPreparedStatementQuery(h) => write!(f, "CommandPreparedStatementQuery{h}"), + Self::CommandGetSqlInfo(CommandGetSqlInfo { info: _ }) => { + write!(f, "CommandGetSqlInfo(...)") + } Self::CommandGetCatalogs(CommandGetCatalogs {}) => write!(f, "CommandGetCatalogs"), Self::CommandGetDbSchemas(CommandGetDbSchemas { catalog, @@ -161,6 +167,8 @@ impl FlightSQLCommand { // Decode to IOx specific structure let handle = PreparedStatementHandle::try_decode(prepared_statement_handle)?; Ok(Self::CommandPreparedStatementQuery(handle)) + } else if let Some(decoded_cmd) = Any::unpack::<CommandGetSqlInfo>(&msg)? { + Ok(Self::CommandGetSqlInfo(decoded_cmd)) } else if let Some(decoded_cmd) = Any::unpack::<CommandGetCatalogs>(&msg)? { Ok(Self::CommandGetCatalogs(decoded_cmd)) } else if let Some(decoded_cmd) = Any::unpack::<CommandGetDbSchemas>(&msg)? { @@ -199,6 +207,7 @@ impl FlightSQLCommand { }; Any::pack(&cmd) } + FlightSQLCommand::CommandGetSqlInfo(cmd) => Any::pack(&cmd), FlightSQLCommand::CommandGetCatalogs(cmd) => Any::pack(&cmd), FlightSQLCommand::CommandGetDbSchemas(cmd) => Any::pack(&cmd), FlightSQLCommand::CommandGetTables(cmd) => Any::pack(&cmd), diff --git a/flightsql/src/lib.rs b/flightsql/src/lib.rs index e22723c74c..3a3b8c12fc 100644 --- a/flightsql/src/lib.rs +++ b/flightsql/src/lib.rs @@ -2,6 +2,7 @@ mod cmd; mod error; mod planner; +mod sql_info; pub use cmd::{FlightSQLCommand, PreparedStatementHandle}; pub use error::{Error, Result}; diff --git a/flightsql/src/planner.rs b/flightsql/src/planner.rs index df10c3cbb6..47d40d9167 100644 --- a/flightsql/src/planner.rs +++ b/flightsql/src/planner.rs @@ -5,8 +5,8 @@ use arrow::{datatypes::Schema, error::ArrowError, ipc::writer::IpcWriteOptions}; use arrow_flight::{ sql::{ ActionCreatePreparedStatementRequest, ActionCreatePreparedStatementResult, Any, - CommandGetCatalogs, CommandGetDbSchemas, CommandGetTableTypes, CommandGetTables, - CommandStatementQuery, + CommandGetCatalogs, CommandGetDbSchemas, CommandGetSqlInfo, CommandGetTableTypes, + CommandGetTables, CommandStatementQuery, }, IpcMessage, SchemaAsIpc, }; @@ -16,7 +16,7 @@ use iox_query::{exec::IOxSessionContext, QueryNamespace}; use observability_deps::tracing::debug; use prost::Message; -use crate::error::*; +use crate::{error::*, sql_info::iox_sql_info_list}; use crate::{FlightSQLCommand, PreparedStatementHandle}; /// Logic for creating plans for various Flight messages against a query database @@ -44,6 +44,12 @@ impl FlightSQLPlanner { FlightSQLCommand::CommandPreparedStatementQuery(handle) => { get_schema_for_query(handle.query(), ctx).await } + FlightSQLCommand::CommandGetSqlInfo(CommandGetSqlInfo { info }) => { + let plan = plan_get_sql_info(ctx, info).await?; + // As an optimization, we could hard code the result + // schema instead of recomputing it each time. + get_schema_for_plan(plan) + } FlightSQLCommand::CommandGetCatalogs(CommandGetCatalogs {}) => { let plan = plan_get_catalogs(ctx).await?; // As an optimization, we could hard code the result @@ -112,6 +118,11 @@ impl FlightSQLPlanner { debug!(%query, "Planning FlightSQL prepared query"); Ok(ctx.sql_to_physical_plan(query).await?) } + FlightSQLCommand::CommandGetSqlInfo(CommandGetSqlInfo { info }) => { + debug!("Planning GetSqlInfo query"); + let plan = plan_get_sql_info(ctx, info).await?; + Ok(ctx.create_physical_plan(&plan).await?) + } FlightSQLCommand::CommandGetCatalogs(CommandGetCatalogs {}) => { debug!("Planning GetCatalogs query"); let plan = plan_get_catalogs(ctx).await?; @@ -240,6 +251,14 @@ fn encode_schema(schema: &Schema) -> Result<Bytes> { Ok(schema) } +/// Return a `LogicalPlan` for GetSqlInfo +/// +/// The infos are passed directly from the [`CommandGetSqlInfo::info`] +async fn plan_get_sql_info(ctx: &IOxSessionContext, info: Vec<u32>) -> Result<LogicalPlan> { + let batch = iox_sql_info_list().filter(&info).encode()?; + Ok(ctx.batch_to_logical_plan(batch)?) +} + /// Return a `LogicalPlan` for GetCatalogs /// /// In the future this could be made more efficient by building the diff --git a/flightsql/src/sql_info/meta.rs b/flightsql/src/sql_info/meta.rs new file mode 100644 index 0000000000..fc56fcdb65 --- /dev/null +++ b/flightsql/src/sql_info/meta.rs @@ -0,0 +1,303 @@ +//! SQL metadata tables (originally from [queryrouterd]) +//! +//! TODO: figure out how to generate these keywords automatically from DataFusion / sqlparser-rs +//! +//! [queryrouterd]: https://github.com/influxdata/idpe/blob/85aa7a52b40f173cc4d79ac02b3a4a13e82333c4/queryrouter/internal/server/flightsql_info.go#L4 + +pub(crate) const SQL_INFO_SQL_KEYWORDS: &[&str] = &[ + // SQL-92 Reserved Words + "absolute", + "action", + "add", + "all", + "allocate", + "alter", + "and", + "any", + "are", + "as", + "asc", + "assertion", + "at", + "authorization", + "avg", + "begin", + "between", + "bit", + "bit_length", + "both", + "by", + "cascade", + "cascaded", + "case", + "cast", + "catalog", + "char", + "char_length", + "character", + "character_length", + "check", + "close", + "coalesce", + "collate", + "collation", + "column", + "commit", + "connect", + "connection", + "constraint", + "constraints", + "continue", + "convert", + "corresponding", + "count", + "create", + "cross", + "current", + "current_date", + "current_time", + "current_timestamp", + "current_user", + "cursor", + "date", + "day", + "deallocate", + "dec", + "decimal", + "declare", + "default", + "deferrable", + "deferred", + "delete", + "desc", + "describe", + "descriptor", + "diagnostics", + "disconnect", + "distinct", + "domain", + "double", + "drop", + "else", + "end", + "end-exec", + "escape", + "except", + "exception", + "exec", + "execute", + "exists", + "external", + "extract", + "false", + "fetch", + "first", + "float", + "for", + "foreign", + "found", + "from", + "full", + "get", + "global", + "go", + "goto", + "grant", + "group", + "having", + "hour", + "identity", + "immediate", + "in", + "indicator", + "initially", + "inner", + "input", + "insensitive", + "insert", + "int", + "integer", + "intersect", + "interval", + "into", + "is", + "isolation", + "join", + "key", + "language", + "last", + "leading", + "left", + "level", + "like", + "local", + "lower", + "match", + "max", + "min", + "minute", + "module", + "month", + "names", + "national", + "natural", + "nchar", + "next", + "no", + "not", + "null", + "nullif", + "numeric", + "octet_length", + "of", + "on", + "only", + "open", + "option", + "or", + "order", + "outer", + "output", + "overlaps", + "pad", + "partial", + "position", + "precision", + "prepare", + "preserve", + "primary", + "prior", + "privileges", + "procedure", + "public", + "read", + "real", + "references", + "relative", + "restrict", + "revoke", + "right", + "rollback", + "rows", + "schema", + "scroll", + "second", + "section", + "select", + "session", + "session_user", + "set", + "size", + "smallint", + "some", + "space", + "sql", + "sqlcode", + "sqlerror", + "sqlstate", + "substring", + "sum", + "system_user", + "table", + "temporary", + "then", + "time", + "timestamp", + "timezone_hour", + "timezone_minute", + "to", + "trailing", + "transaction", + "translate", + "translation", + "trim", + "true", + "union", + "unique", + "unknown", + "update", + "upper", + "usage", + "user", + "using", + "value", + "values", + "varchar", + "varying", + "view", + "when", + "whenever", + "where", + "with", + "work", + "write", + "year", + "zone", +]; + +pub(crate) const SQL_INFO_NUMERIC_FUNCTIONS: &[&str] = &[ + "abs", "acos", "asin", "atan", "atan2", "ceil", "cos", "exp", "floor", "ln", "log", "log10", + "log2", "pow", "power", "round", "signum", "sin", "sqrt", "tan", "trunc", +]; + +pub(crate) const SQL_INFO_STRING_FUNCTIONS: &[&str] = &[ + "arrow_typeof", + "ascii", + "bit_length", + "btrim", + "char_length", + "character_length", + "chr", + "concat", + "concat_ws", + "digest", + "from_unixtime", + "initcap", + "left", + "length", + "lower", + "lpad", + "ltrim", + "md5", + "octet_length", + "random", + "regexp_match", + "regexp_replace", + "repeat", + "replace", + "reverse", + "right", + "rpad", + "rtrim", + "sha224", + "sha256", + "sha384", + "sha512", + "split_part", + "starts_with", + "strpos", + "substr", + "to_hex", + "translate", + "trim", + "upper", + "uuid", +]; + +pub(crate) const SQL_INFO_DATE_TIME_FUNCTIONS: &[&str] = &[ + "current_date", + "current_time", + "date_bin", + "date_part", + "date_trunc", + "datepart", + "datetrunc", + "from_unixtime", + "now", + "to_timestamp", + "to_timestamp_micros", + "to_timestamp_millis", + "to_timestamp_seconds", +]; + +pub(crate) const SQL_INFO_SYSTEM_FUNCTIONS: &[&str] = &["array", "arrow_typeof", "struct"]; diff --git a/flightsql/src/sql_info/mod.rs b/flightsql/src/sql_info/mod.rs new file mode 100644 index 0000000000..a851557c1e --- /dev/null +++ b/flightsql/src/sql_info/mod.rs @@ -0,0 +1,275 @@ +//! Represents the response to FlightSQL `GetSqlInfo` requests and +//! handles the conversion to/from the format specified in the +//! [Arrow FlightSQL Specification]. +//! +//! < +//! info_name: uint32 not null, +//! value: dense_union< +//! string_value: utf8, +//! bool_value: bool, +//! bigint_value: int64, +//! int32_bitmask: int32, +//! string_list: list<string_data: utf8> +//! int32_to_int32_list_map: map<key: int32, value: list<$data$: int32>> +//! > +//! +//! where there is one row per requested piece of metadata information. +//! +//! +//! [Arrow FlightSQL Specification]: https://github.com/apache/arrow/blob/f1eece9f276184063c9c35011e8243eb3b071233/format/FlightSql.proto#L33-L42 + +mod meta; +mod value; + +use crate::error::Result; +use std::{borrow::Cow, collections::BTreeMap, sync::Arc}; + +use arrow::{array::UInt32Builder, record_batch::RecordBatch}; +use arrow_flight::sql::{ + SqlInfo, SqlNullOrdering, SqlSupportedCaseSensitivity, SqlSupportedTransactions, + SupportedSqlGrammar, +}; +use once_cell::sync::Lazy; + +use meta::{ + SQL_INFO_DATE_TIME_FUNCTIONS, SQL_INFO_NUMERIC_FUNCTIONS, SQL_INFO_SQL_KEYWORDS, + SQL_INFO_STRING_FUNCTIONS, SQL_INFO_SYSTEM_FUNCTIONS, +}; +use value::{SqlInfoName, SqlInfoUnionBuilder, SqlInfoValue}; + +/// A list of SQL info names and valies +#[derive(Debug, Clone, PartialEq)] +pub struct SqlInfoList { + /// Use BTreeMap to ensure the values are sorted by value as + /// to make output consistent + /// + /// Use u32 to support "custom" sql info values that are not + /// part of the SqlInfo enum + infos: BTreeMap<u32, SqlInfoValue>, +} + +impl SqlInfoList { + pub fn new() -> Self { + Self { + infos: BTreeMap::new(), + } + } + + /// register the specific sql metadata item + fn with_sql_info(mut self, name: impl SqlInfoName, value: impl Into<SqlInfoValue>) -> Self { + self.infos.insert(name.as_u32(), value.into()); + self + } + + /// Filter this info list keeping only the info values specified + /// in `infos`. + /// + /// Returns self if infos is empty (no filtering) + pub fn filter(&self, info: &[u32]) -> Cow<'_, Self> { + if info.is_empty() { + Cow::Borrowed(self) + } else { + let infos: BTreeMap<_, _> = info + .iter() + .filter_map(|name| self.infos.get(name).map(|v| (*name, v.clone()))) + .collect(); + Cow::Owned(Self { infos }) + } + } + + /// Encode the contents of this info list according to the FlightSQL spec + pub fn encode(&self) -> Result<RecordBatch> { + let mut name_builder = UInt32Builder::new(); + let mut value_builder = SqlInfoUnionBuilder::new(); + + for (&name, value) in self.infos.iter() { + name_builder.append_value(name); + value_builder.append_value(value) + } + + let batch = RecordBatch::try_from_iter(vec![ + ("info_name", Arc::new(name_builder.finish()) as _), + ("value", Arc::new(value_builder.finish()) as _), + ])?; + Ok(batch) + } +} + +#[allow(non_snake_case)] +static INSTANCE: Lazy<SqlInfoList> = Lazy::new(|| { + // The following are not defined in the [`SqlInfo`], but are + // documented at + // https://arrow.apache.org/docs/format/FlightSql.html#protocol-buffer-definitions. + + let SqlInfoFlightSqlServerSql = 4; + let SqlInfoFlightSqlServerSubstrait = 5; + //let SqlInfoFlightSqlServerSubstraitMinVersion = 6; + //let SqlInfoFlightSqlServerSubstraitMaxVersion = 7; + let SqlInfoFlightSqlServerTransaction = 8; + let SqlInfoFlightSqlServerCancel = 9; + let SqlInfoFlightSqlServerStatementTimeout = 100; + let SqlInfoFlightSqlServerTransactionTimeout = 101; + + // Copied from https://github.com/influxdata/idpe/blob/85aa7a52b40f173cc4d79ac02b3a4a13e82333c4/queryrouter/internal/server/flightsql_handler.go#L208-L275 + + SqlInfoList::new() + // Server information + .with_sql_info(SqlInfo::FlightSqlServerName, "InfluxDB IOx") + .with_sql_info(SqlInfo::FlightSqlServerVersion, "2") + // 1.3 comes from https://github.com/apache/arrow/blob/f9324b79bf4fc1ec7e97b32e3cce16e75ef0f5e3/format/Schema.fbs#L24 + .with_sql_info(SqlInfo::FlightSqlServerArrowVersion, "1.3") + .with_sql_info(SqlInfo::FlightSqlServerReadOnly, true) + .with_sql_info(SqlInfoFlightSqlServerSql, true) + .with_sql_info(SqlInfoFlightSqlServerSubstrait, false) + .with_sql_info( + SqlInfoFlightSqlServerTransaction, + SqlSupportedTransactions::SqlTransactionUnspecified as i32, + ) + // don't yetsupport `CancelQuery` action + .with_sql_info(SqlInfoFlightSqlServerCancel, false) + .with_sql_info(SqlInfoFlightSqlServerStatementTimeout, 0i32) + .with_sql_info(SqlInfoFlightSqlServerTransactionTimeout, 0i32) + // SQL syntax information + .with_sql_info(SqlInfo::SqlDdlCatalog, false) + .with_sql_info(SqlInfo::SqlDdlSchema, false) + .with_sql_info(SqlInfo::SqlDdlTable, false) + .with_sql_info( + SqlInfo::SqlIdentifierCase, + SqlSupportedCaseSensitivity::SqlCaseSensitivityLowercase as i32, + ) + .with_sql_info(SqlInfo::SqlIdentifierQuoteChar, r#"""#) + .with_sql_info( + SqlInfo::SqlQuotedIdentifierCase, + SqlSupportedCaseSensitivity::SqlCaseSensitivityCaseInsensitive as i32, + ) + .with_sql_info(SqlInfo::SqlAllTablesAreSelectable, true) + .with_sql_info( + SqlInfo::SqlNullOrdering, + SqlNullOrdering::SqlNullsSortedHigh as i32, + ) + .with_sql_info(SqlInfo::SqlKeywords, SQL_INFO_SQL_KEYWORDS) + .with_sql_info(SqlInfo::SqlNumericFunctions, SQL_INFO_NUMERIC_FUNCTIONS) + .with_sql_info(SqlInfo::SqlStringFunctions, SQL_INFO_STRING_FUNCTIONS) + .with_sql_info(SqlInfo::SqlSystemFunctions, SQL_INFO_SYSTEM_FUNCTIONS) + .with_sql_info(SqlInfo::SqlDatetimeFunctions, SQL_INFO_DATE_TIME_FUNCTIONS) + .with_sql_info(SqlInfo::SqlSearchStringEscape, "\\") + .with_sql_info(SqlInfo::SqlExtraNameCharacters, "") + .with_sql_info(SqlInfo::SqlSupportsColumnAliasing, true) + .with_sql_info(SqlInfo::SqlNullPlusNullIsNull, true) + // Skip SqlSupportsConvert (which is the map of the conversions that are supported) + // .with_sql_info(SqlInfo::SqlSupportsConvert, TBD) + // https://github.com/influxdata/influxdb_iox/issues/7253 + .with_sql_info(SqlInfo::SqlSupportsTableCorrelationNames, false) + .with_sql_info(SqlInfo::SqlSupportsDifferentTableCorrelationNames, false) + .with_sql_info(SqlInfo::SqlSupportsExpressionsInOrderBy, true) + .with_sql_info(SqlInfo::SqlSupportsOrderByUnrelated, true) + .with_sql_info(SqlInfo::SqlSupportedGroupBy, 3i32) + .with_sql_info(SqlInfo::SqlSupportsLikeEscapeClause, true) + .with_sql_info(SqlInfo::SqlSupportsNonNullableColumns, true) + .with_sql_info( + SqlInfo::SqlSupportedGrammar, + SupportedSqlGrammar::SqlCoreGrammar as i32, + ) + // report IOx supports all ansi 92 + .with_sql_info(SqlInfo::SqlAnsi92SupportedLevel, 0b111_i32) + .with_sql_info(SqlInfo::SqlSupportsIntegrityEnhancementFacility, false) + .with_sql_info(SqlInfo::SqlOuterJoinsSupportLevel, 2i32) + .with_sql_info(SqlInfo::SqlSchemaTerm, "schema") + .with_sql_info(SqlInfo::SqlProcedureTerm, "procedure") + .with_sql_info(SqlInfo::SqlCatalogAtStart, false) + .with_sql_info(SqlInfo::SqlSchemasSupportedActions, 0i32) + .with_sql_info(SqlInfo::SqlCatalogsSupportedActions, 0i32) + .with_sql_info(SqlInfo::SqlSupportedPositionedCommands, 0i32) + .with_sql_info(SqlInfo::SqlSelectForUpdateSupported, false) + .with_sql_info(SqlInfo::SqlStoredProceduresSupported, false) + .with_sql_info(SqlInfo::SqlSupportedSubqueries, 15i32) + .with_sql_info(SqlInfo::SqlCorrelatedSubqueriesSupported, true) + .with_sql_info(SqlInfo::SqlSupportedUnions, 3i32) + // For max lengths, report max arrow string length (IOx + // doesn't enfore many of these limits yet + .with_sql_info(SqlInfo::SqlMaxBinaryLiteralLength, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxCharLiteralLength, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxColumnNameLength, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxColumnsInGroupBy, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxColumnsInIndex, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxColumnsInOrderBy, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxColumnsInSelect, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxColumnsInTable, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxConnections, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxCursorNameLength, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxIndexLength, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlDbSchemaNameLength, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxProcedureNameLength, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxCatalogNameLength, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxRowSize, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxRowSizeIncludesBlobs, true) + .with_sql_info(SqlInfo::SqlMaxStatementLength, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxStatements, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxTableNameLength, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxTablesInSelect, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlMaxUsernameLength, i32::MAX as i64) + .with_sql_info(SqlInfo::SqlDefaultTransactionIsolation, 0i64) + .with_sql_info(SqlInfo::SqlTransactionsSupported, false) + .with_sql_info(SqlInfo::SqlSupportedTransactionsIsolationLevels, 0i32) + .with_sql_info(SqlInfo::SqlDataDefinitionCausesTransactionCommit, false) + .with_sql_info(SqlInfo::SqlDataDefinitionsInTransactionsIgnored, true) + .with_sql_info(SqlInfo::SqlSupportedResultSetTypes, 0i32) + .with_sql_info( + SqlInfo::SqlSupportedConcurrenciesForResultSetUnspecified, + 0i32, + ) + .with_sql_info( + SqlInfo::SqlSupportedConcurrenciesForResultSetForwardOnly, + 0i32, + ) + .with_sql_info( + SqlInfo::SqlSupportedConcurrenciesForResultSetScrollSensitive, + 0i32, + ) + .with_sql_info( + SqlInfo::SqlSupportedConcurrenciesForResultSetScrollInsensitive, + 0i32, + ) + .with_sql_info(SqlInfo::SqlBatchUpdatesSupported, false) + .with_sql_info(SqlInfo::SqlSavepointsSupported, false) + .with_sql_info(SqlInfo::SqlNamedParametersSupported, false) + .with_sql_info(SqlInfo::SqlLocatorsUpdateCopy, false) + .with_sql_info(SqlInfo::SqlStoredFunctionsUsingCallSyntaxSupported, false) +}); + +/// Return the static SqlInfoList that describes IOx's capablity +pub fn iox_sql_info_list() -> &'static SqlInfoList { + &INSTANCE +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn filter_empty() { + let filter = &[]; + assert_eq!( + iox_sql_info_list(), + iox_sql_info_list().filter(filter).as_ref() + ) + } + + #[test] + fn filter_some() { + let filter = &[ + SqlInfo::FlightSqlServerName as u32, + SqlInfo::FlightSqlServerArrowVersion as u32, + SqlInfo::SqlBatchUpdatesSupported as u32, + 999999, // model some unknown info requested + ]; + let result = iox_sql_info_list().filter(filter); + + let infos = &result.infos; + assert_eq!(result.infos.len(), 3); + assert!(infos.contains_key(&(SqlInfo::FlightSqlServerName as u32))); + assert!(infos.contains_key(&(SqlInfo::FlightSqlServerArrowVersion as u32))); + assert!(infos.contains_key(&(SqlInfo::SqlBatchUpdatesSupported as u32))); + assert!(!infos.contains_key(&999999)); + } +} diff --git a/flightsql/src/sql_info/value.rs b/flightsql/src/sql_info/value.rs new file mode 100644 index 0000000000..9fa46647ca --- /dev/null +++ b/flightsql/src/sql_info/value.rs @@ -0,0 +1,228 @@ +use arrow::{ + array::{ + Array, ArrayBuilder, ArrayData, BooleanBuilder, Int32Builder, Int64Builder, Int8Builder, + ListBuilder, StringBuilder, UnionArray, + }, + datatypes::{DataType, Field, UnionMode}, +}; +use arrow_flight::sql::SqlInfo; +use once_cell::sync::Lazy; + +///! Dynamic value support for SqlInfo + +/// Represents a dynamic value +#[derive(Debug, Clone, PartialEq)] +pub enum SqlInfoValue { + String(String), + Bool(bool), + BigInt(i64), + Bitmask(i32), + StringList(Vec<String>), + // TODO support more exotic metadata that requires the map of lists + //ListMap(BTreeMap<i32, Vec<i32>>), +} + +impl From<&str> for SqlInfoValue { + fn from(value: &str) -> Self { + Self::String(value.to_string()) + } +} + +impl From<bool> for SqlInfoValue { + fn from(value: bool) -> Self { + Self::Bool(value) + } +} + +impl From<i32> for SqlInfoValue { + fn from(value: i32) -> Self { + Self::Bitmask(value) + } +} + +impl From<i64> for SqlInfoValue { + fn from(value: i64) -> Self { + Self::BigInt(value) + } +} + +impl From<&[&str]> for SqlInfoValue { + fn from(values: &[&str]) -> Self { + let values = values.iter().map(|s| s.to_string()).collect(); + Self::StringList(values) + } +} + +/// Something that can be converted into u32 (the represenation of a +/// [`SqlInfo`] name) +pub trait SqlInfoName { + fn as_u32(&self) -> u32; +} + +impl SqlInfoName for SqlInfo { + fn as_u32(&self) -> u32 { + // SqlInfos are u32 in the flight spec, but for some reason + // SqlInfo repr is an i32, so convert between them + u32::try_from(i32::from(*self)).expect("SqlInfo fit into u32") + } +} + +// Allow passing u32 directly into to with_sql_info +impl SqlInfoName for u32 { + fn as_u32(&self) -> u32 { + *self + } +} + +/// Handles creating the dense [`UnionArray`] described by [flightsql] +/// +/// +/// NOT YET COMPLETE: The int32_to_int32_list_map +/// +/// ```text +/// * value: dense_union< +/// * string_value: utf8, +/// * bool_value: bool, +/// * bigint_value: int64, +/// * int32_bitmask: int32, +/// * string_list: list<string_data: utf8> +/// * int32_to_int32_list_map: map<key: int32, value: list<$data$: int32>> +/// * > +/// ``` +///[flightsql]: (https://github.com/apache/arrow/blob/f9324b79bf4fc1ec7e97b32e3cce16e75ef0f5e3/format/FlightSql.proto#L32-L43 +pub struct SqlInfoUnionBuilder { + // Values for each child type + string_values: StringBuilder, + bool_values: BooleanBuilder, + bigint_values: Int64Builder, + int32_bitmask_values: Int32Builder, + string_list_values: ListBuilder<StringBuilder>, + + /// incrementally build types/offset of the dense union, + /// + /// See [Union Spec] for details. + /// + /// [Union Spec]: https://arrow.apache.org/docs/format/Columnar.html#dense-union + type_ids: Int8Builder, + offsets: Int32Builder, +} + +/// [`DataType`] for the output union array +static UNION_TYPE: Lazy<DataType> = Lazy::new(|| { + let nullable = false; + let fields = vec![ + Field::new("string_value", DataType::Utf8, nullable), + Field::new("bool_value", DataType::Boolean, nullable), + Field::new("bigint_value", DataType::Int64, nullable), + Field::new("int32_bitmask", DataType::Int32, nullable), + // treat list as nullable b/c that is what hte builders make + Field::new( + "string_list", + DataType::List(Box::new(Field::new("item", DataType::Utf8, true))), + true, + ), + ]; + + // create "type ids", one for each type + // assume they go from 0 .. num_fields + let type_ids: Vec<i8> = (0..fields.len()).map(|v| v as i8).collect(); + + DataType::Union(fields, type_ids, UnionMode::Dense) +}); + +impl SqlInfoUnionBuilder { + pub fn new() -> Self { + Self { + string_values: StringBuilder::new(), + bool_values: BooleanBuilder::new(), + bigint_values: Int64Builder::new(), + int32_bitmask_values: Int32Builder::new(), + string_list_values: ListBuilder::new(StringBuilder::new()), + type_ids: Int8Builder::new(), + offsets: Int32Builder::new(), + } + } + + /// Append the specified value to this builder + pub fn append_value(&mut self, v: &SqlInfoValue) { + // typeid is which child and len is the child array's length + // *after* adding the value + let (type_id, len) = match v { + SqlInfoValue::String(v) => { + self.string_values.append_value(v); + (0, self.string_values.len()) + } + SqlInfoValue::Bool(v) => { + self.bool_values.append_value(*v); + (1, self.bool_values.len()) + } + SqlInfoValue::BigInt(v) => { + self.bigint_values.append_value(*v); + (2, self.bigint_values.len()) + } + SqlInfoValue::Bitmask(v) => { + self.int32_bitmask_values.append_value(*v); + (3, self.int32_bitmask_values.len()) + } + SqlInfoValue::StringList(values) => { + // build list + for v in values { + self.string_list_values.values().append_value(v); + } + // complete the list + self.string_list_values.append(true); + (4, self.string_list_values.len()) + } + }; + + self.type_ids.append_value(type_id); + let len = i32::try_from(len).expect("offset fit in i32"); + self.offsets.append_value(len - 1); + } + + /// Complete the construction and build the [`UnionArray`] + pub fn finish(self) -> UnionArray { + let Self { + mut string_values, + mut bool_values, + mut bigint_values, + mut int32_bitmask_values, + mut string_list_values, + mut type_ids, + mut offsets, + } = self; + let type_ids = type_ids.finish(); + let offsets = offsets.finish(); + + // form the correct ArrayData + + let len = offsets.len(); + let null_bit_buffer = None; + let offset = 0; + + let buffers = vec![ + type_ids.into_data().buffers()[0].clone(), + offsets.into_data().buffers()[0].clone(), + ]; + + let child_data = vec![ + string_values.finish().into_data(), + bool_values.finish().into_data(), + bigint_values.finish().into_data(), + int32_bitmask_values.finish().into_data(), + string_list_values.finish().into_data(), + ]; + + let data = ArrayData::try_new( + UNION_TYPE.clone(), + len, + null_bit_buffer, + offset, + buffers, + child_data, + ) + .expect("Correctly created UnionArray"); + + UnionArray::from(data) + } +} diff --git a/influxdb_iox/tests/end_to_end_cases/flightsql.rs b/influxdb_iox/tests/end_to_end_cases/flightsql.rs index 8d61bc65db..c7d669c3be 100644 --- a/influxdb_iox/tests/end_to_end_cases/flightsql.rs +++ b/influxdb_iox/tests/end_to_end_cases/flightsql.rs @@ -1,7 +1,7 @@ use std::path::PathBuf; use arrow::record_batch::RecordBatch; -use arrow_flight::decode::FlightRecordBatchStream; +use arrow_flight::{decode::FlightRecordBatchStream, sql::SqlInfo}; use arrow_util::test_util::batches_to_sorted_lines; use assert_cmd::Command; use datafusion::common::assert_contains; @@ -140,6 +140,82 @@ async fn flightsql_prepared_query() { .await } +#[tokio::test] +async fn flightsql_get_sql_infos() { + test_helpers::maybe_start_logging(); + let database_url = maybe_skip_integration!(); + + let table_name = "the_table"; + + // Set up the cluster ==================================== + let mut cluster = MiniCluster::create_shared2(database_url).await; + + StepTest::new( + &mut cluster, + vec![ + Step::WriteLineProtocol(format!( + "{table_name},tag1=A,tag2=B val=42i 123456\n\ + {table_name},tag1=A,tag2=C val=43i 123457" + )), + Step::Custom(Box::new(move |state: &mut StepTestState| { + async move { + let mut client = flightsql_client(state.cluster()); + + // test with no filtering + let batches = collect_stream(client.get_sql_info(vec![]).await.unwrap()).await; + let total_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); + // 85 `SqlInfo` entries are returned by IOx's GetSqlInfo implementation + // if we change what is returned then this number should be updated too + assert_eq!(total_rows, 85); + + // only retrieve requested metadata + let infos = vec![ + SqlInfo::FlightSqlServerName as u32, + SqlInfo::FlightSqlServerArrowVersion as u32, + SqlInfo::SqlBatchUpdatesSupported as u32, + 999999, // model some unknown info requested + ]; + + let batches = collect_stream(client.get_sql_info(infos).await.unwrap()).await; + + insta::assert_yaml_snapshot!( + batches_to_sorted_lines(&batches), + @r###" + --- + - +-----------+-----------------------------+ + - "| info_name | value |" + - +-----------+-----------------------------+ + - "| 0 | {string_value=InfluxDB IOx} |" + - "| 2 | {string_value=1.3} |" + - "| 572 | {bool_value=false} |" + - +-----------+-----------------------------+ + "### + ); + + // Test zero case (nothing matches) + let infos = vec![ + 999999, // model some unknown info requested + ]; + + let batches = collect_stream(client.get_sql_info(infos).await.unwrap()).await; + + insta::assert_yaml_snapshot!( + batches_to_sorted_lines(&batches), + @r###" + --- + - ++ + - ++ + "### + ); + } + .boxed() + })), + ], + ) + .run() + .await +} + #[tokio::test] async fn flightsql_get_catalogs() { test_helpers::maybe_start_logging(); @@ -523,7 +599,7 @@ async fn flightsql_jdbc() { VIEW"; // Validate metadata: jdbc_client <url> metadata - Command::from_std(std::process::Command::new(&path)) + let mut assert = Command::from_std(std::process::Command::new(&path)) .arg(&jdbc_url) .arg("metadata") .assert() @@ -532,6 +608,14 @@ async fn flightsql_jdbc() { .stdout(predicate::str::contains(expected_schemas)) .stdout(predicate::str::contains(expected_tables)) .stdout(predicate::str::contains(expected_table_types)); + + let expected_metadata = EXPECTED_METADATA + .trim() + .replace("REPLACE_ME_WITH_JBDC_URL", &jdbc_url); + + for expected in expected_metadata.lines() { + assert = assert.stdout(predicate::str::contains(expected)); + } } .boxed() })), @@ -558,3 +642,140 @@ fn flightsql_client(cluster: &MiniCluster) -> FlightSqlClient { async fn collect_stream(stream: FlightRecordBatchStream) -> Vec<RecordBatch> { stream.try_collect().await.expect("collecting batches") } + +const EXPECTED_METADATA: &str = r#" +allProceduresAreCallable: true +allTablesAreSelectable: true +autoCommitFailureClosesAllResultSets: false +dataDefinitionCausesTransactionCommit: false +dataDefinitionIgnoredInTransactions: true +doesMaxRowSizeIncludeBlobs: true +generatedKeyAlwaysReturned: false +getCatalogSeparator: . +getCatalogTerm: null +getDatabaseMajorVersion: 10 +getDatabaseMinorVersion: 0 +getDatabaseProductName: InfluxDB IOx +getDatabaseProductVersion: 2 +getDefaultTransactionIsolation: 0 +getDriverMajorVersion: 10 +getDriverMinorVersion: 0 +getDriverName: Arrow Flight SQL JDBC Driver +getDriverVersion: 10.0.0 +getExtraNameCharacters: +getIdentifierQuoteString: " +getJDBCMajorVersion: 4 +getJDBCMinorVersion: 1 +getMaxBinaryLiteralLength: 2147483647 +getMaxCatalogNameLength: 2147483647 +getMaxCharLiteralLength: 2147483647 +getMaxColumnNameLength: 2147483647 +getMaxColumnsInGroupBy: 2147483647 +getMaxColumnsInIndex: 2147483647 +getMaxColumnsInOrderBy: 2147483647 +getMaxColumnsInSelect: 2147483647 +getMaxColumnsInTable: 2147483647 +getMaxConnections: 2147483647 +getMaxCursorNameLength: 2147483647 +getMaxIndexLength: 2147483647 +getMaxLogicalLobSize: 0 +getMaxProcedureNameLength: 2147483647 +getMaxRowSize: 2147483647 +getMaxSchemaNameLength: 2147483647 +getMaxStatementLength: 2147483647 +getMaxStatements: 2147483647 +getMaxTableNameLength: 2147483647 +getMaxTablesInSelect: 2147483647 +getMaxUserNameLength: 2147483647 +getNumericFunctions: abs, acos, asin, atan, atan2, ceil, cos, exp, floor, ln, log, log10, log2, pow, power, round, signum, sin, sqrt, tan, trunc +getProcedureTerm: procedure +getResultSetHoldability: 1 +getSchemaTerm: schema +getSearchStringEscape: \ +getSQLKeywords: absolute, action, add, all, allocate, alter, and, any, are, as, asc, assertion, at, authorization, avg, begin, between, bit, bit_length, both, by, cascade, cascaded, case, cast, catalog, char, char_length, character, character_length, check, close, coalesce, collate, collation, column, commit, connect, connection, constraint, constraints, continue, convert, corresponding, count, create, cross, current, current_date, current_time, current_timestamp, current_user, cursor, date, day, deallocate, dec, decimal, declare, default, deferrable, deferred, delete, desc, describe, descriptor, diagnostics, disconnect, distinct, domain, double, drop, else, end, end-exec, escape, except, exception, exec, execute, exists, external, extract, false, fetch, first, float, for, foreign, found, from, full, get, global, go, goto, grant, group, having, hour, identity, immediate, in, indicator, initially, inner, input, insensitive, insert, int, integer, intersect, interval, into, is, isolation, join, key, language, last, leading, left, level, like, local, lower, match, max, min, minute, module, month, names, national, natural, nchar, next, no, not, null, nullif, numeric, octet_length, of, on, only, open, option, or, order, outer, output, overlaps, pad, partial, position, precision, prepare, preserve, primary, prior, privileges, procedure, public, read, real, references, relative, restrict, revoke, right, rollback, rows, schema, scroll, second, section, select, session, session_user, set, size, smallint, some, space, sql, sqlcode, sqlerror, sqlstate, substring, sum, system_user, table, temporary, then, time, timestamp, timezone_hour, timezone_minute, to, trailing, transaction, translate, translation, trim, true, union, unique, unknown, update, upper, usage, user, using, value, values, varchar, varying, view, when, whenever, where, with, work, write, year, zone +getSQLStateType: 2 +getStringFunctions: arrow_typeof, ascii, bit_length, btrim, char_length, character_length, chr, concat, concat_ws, digest, from_unixtime, initcap, left, length, lower, lpad, ltrim, md5, octet_length, random, regexp_match, regexp_replace, repeat, replace, reverse, right, rpad, rtrim, sha224, sha256, sha384, sha512, split_part, starts_with, strpos, substr, to_hex, translate, trim, upper, uuid +getSystemFunctions: array, arrow_typeof, struct +getTimeDateFunctions: current_date, current_time, date_bin, date_part, date_trunc, datepart, datetrunc, from_unixtime, now, to_timestamp, to_timestamp_micros, to_timestamp_millis, to_timestamp_seconds +getURL: REPLACE_ME_WITH_JBDC_URL +getUserName: test +isCatalogAtStart: false +isReadOnly: true +locatorsUpdateCopy: false +nullPlusNonNullIsNull: true +nullsAreSortedAtEnd: true +nullsAreSortedAtStart: false +nullsAreSortedHigh: false +nullsAreSortedLow: false +storesLowerCaseIdentifiers: false +storesLowerCaseQuotedIdentifiers: false +storesMixedCaseIdentifiers: false +storesMixedCaseQuotedIdentifiers: false +storesUpperCaseIdentifiers: true +storesUpperCaseQuotedIdentifiers: false +supportsAlterTableWithAddColumn: false +supportsAlterTableWithDropColumn: false +supportsANSI92EntryLevelSQL: true +supportsANSI92FullSQL: true +supportsANSI92IntermediateSQL: true +supportsBatchUpdates: false +supportsCatalogsInDataManipulation: true +supportsCatalogsInIndexDefinitions: false +supportsCatalogsInPrivilegeDefinitions: false +supportsCatalogsInProcedureCalls: true +supportsCatalogsInTableDefinitions: true +supportsColumnAliasing: true +supportsCoreSQLGrammar: false +supportsCorrelatedSubqueries: true +supportsDataDefinitionAndDataManipulationTransactions: false +supportsDataManipulationTransactionsOnly: true +supportsDifferentTableCorrelationNames: false +supportsExpressionsInOrderBy: true +supportsExtendedSQLGrammar: false +supportsFullOuterJoins: false +supportsGetGeneratedKeys: false +supportsGroupBy: true +supportsGroupByBeyondSelect: true +supportsGroupByUnrelated: true +supportsIntegrityEnhancementFacility: false +supportsLikeEscapeClause: true +supportsLimitedOuterJoins: true +supportsMinimumSQLGrammar: true +supportsMixedCaseIdentifiers: false +supportsMixedCaseQuotedIdentifiers: true +supportsMultipleOpenResults: false +supportsMultipleResultSets: false +supportsMultipleTransactions: false +supportsNamedParameters: false +supportsNonNullableColumns: true +supportsOpenCursorsAcrossCommit: false +supportsOpenCursorsAcrossRollback: false +supportsOpenStatementsAcrossCommit: false +supportsOpenStatementsAcrossRollback: false +supportsOrderByUnrelated: true +supportsOuterJoins: true +supportsPositionedDelete: false +supportsPositionedUpdate: false +supportsRefCursors: false +supportsSavepoints: false +supportsSchemasInDataManipulation: true +supportsSchemasInIndexDefinitions: false +supportsSchemasInPrivilegeDefinitions: false +supportsSchemasInProcedureCalls: false +supportsSchemasInTableDefinitions: true +supportsSelectForUpdate: false +supportsStatementPooling: false +supportsStoredFunctionsUsingCallSyntax: false +supportsStoredProcedures: false +supportsSubqueriesInComparisons: true +supportsSubqueriesInExists: true +supportsSubqueriesInIns: true +supportsSubqueriesInQuantifieds: true +supportsTableCorrelationNames: false +supportsTransactionIsolationLevel: false +supportsTransactions: false +supportsUnion: true +supportsUnionAll: true +usesLocalFilePerTable: false +usesLocalFiles: false +"#; diff --git a/influxdb_iox/tests/jdbc_client/Main.java b/influxdb_iox/tests/jdbc_client/Main.java index 6270d369ce..b9cfa385f4 100644 --- a/influxdb_iox/tests/jdbc_client/Main.java +++ b/influxdb_iox/tests/jdbc_client/Main.java @@ -148,13 +148,157 @@ public class Main { System.out.println("**************"); print_result_set(md.getTableTypes()); - //System.out.println("isReadOnly: " + md.isReadOnly()); - //System.out.println("getSearchStringEscape: " + md.getSearchStringEscape()); - //System.out.println("getDriverVersion: " + md.getDriverVersion()); - //System.out.println("getDatabaseProductVersion: " + md.getDatabaseProductVersion()); - //System.out.println("getJDBCMajorVersion: " + md.getJDBCMajorVersion()); - //System.out.println("getJDBCMinorVersion: " + md.getJDBCMinorVersion()); - //System.out.println("getDriverName: " + md.getDriverName()); + + // TODO uncomment when GetTables is implemented + //System.out.println("**************"); + //System.out.println("getColumns:"); + //System.out.println("**************"); + //print_result_set(md.getColumns(null, null, null, null)); + + System.out.println("**************"); + System.out.println("getFunctions:"); + System.out.println("**************"); + print_result_set(md.getFunctions(null, null, null)); + + + // List from https://docs.oracle.com/javase/8/docs/api/java/sql/DatabaseMetaData.html + System.out.println("allProceduresAreCallable: " + md.allProceduresAreCallable()); + System.out.println("allTablesAreSelectable: " + md.allTablesAreSelectable()); + System.out.println("autoCommitFailureClosesAllResultSets: " + md.autoCommitFailureClosesAllResultSets()); + System.out.println("dataDefinitionCausesTransactionCommit: " + md.dataDefinitionCausesTransactionCommit()); + System.out.println("dataDefinitionIgnoredInTransactions: " + md.dataDefinitionIgnoredInTransactions()); + System.out.println("doesMaxRowSizeIncludeBlobs: " + md.doesMaxRowSizeIncludeBlobs()); + System.out.println("generatedKeyAlwaysReturned: " + md.generatedKeyAlwaysReturned()); + System.out.println("getCatalogSeparator: " + md.getCatalogSeparator()); + System.out.println("getCatalogTerm: " + md.getCatalogTerm()); + System.out.println("getDatabaseMajorVersion: " + md.getDatabaseMajorVersion()); + System.out.println("getDatabaseMinorVersion: " + md.getDatabaseMinorVersion()); + System.out.println("getDatabaseProductName: " + md.getDatabaseProductName()); + System.out.println("getDatabaseProductVersion: " + md.getDatabaseProductVersion()); + System.out.println("getDefaultTransactionIsolation: " + md.getDefaultTransactionIsolation()); + System.out.println("getDriverMajorVersion: " + md.getDriverMajorVersion()); + System.out.println("getDriverMinorVersion: " + md.getDriverMinorVersion()); + System.out.println("getDriverName: " + md.getDriverName()); + System.out.println("getDriverVersion: " + md.getDriverVersion()); + System.out.println("getExtraNameCharacters: " + md.getExtraNameCharacters()); + System.out.println("getIdentifierQuoteString: " + md.getIdentifierQuoteString()); + System.out.println("getJDBCMajorVersion: " + md.getJDBCMajorVersion()); + System.out.println("getJDBCMinorVersion: " + md.getJDBCMinorVersion()); + System.out.println("getMaxBinaryLiteralLength: " + md.getMaxBinaryLiteralLength()); + System.out.println("getMaxCatalogNameLength: " + md.getMaxCatalogNameLength()); + System.out.println("getMaxCharLiteralLength: " + md.getMaxCharLiteralLength()); + System.out.println("getMaxColumnNameLength: " + md.getMaxColumnNameLength()); + System.out.println("getMaxColumnsInGroupBy: " + md.getMaxColumnsInGroupBy()); + System.out.println("getMaxColumnsInIndex: " + md.getMaxColumnsInIndex()); + System.out.println("getMaxColumnsInOrderBy: " + md.getMaxColumnsInOrderBy()); + System.out.println("getMaxColumnsInSelect: " + md.getMaxColumnsInSelect()); + System.out.println("getMaxColumnsInTable: " + md.getMaxColumnsInTable()); + System.out.println("getMaxConnections: " + md.getMaxConnections()); + System.out.println("getMaxCursorNameLength: " + md.getMaxCursorNameLength()); + System.out.println("getMaxIndexLength: " + md.getMaxIndexLength()); + System.out.println("getMaxLogicalLobSize: " + md.getMaxLogicalLobSize()); + System.out.println("getMaxProcedureNameLength: " + md.getMaxProcedureNameLength()); + System.out.println("getMaxRowSize: " + md.getMaxRowSize()); + System.out.println("getMaxSchemaNameLength: " + md.getMaxSchemaNameLength()); + System.out.println("getMaxStatementLength: " + md.getMaxStatementLength()); + System.out.println("getMaxStatements: " + md.getMaxStatements()); + System.out.println("getMaxTableNameLength: " + md.getMaxTableNameLength()); + System.out.println("getMaxTablesInSelect: " + md.getMaxTablesInSelect()); + System.out.println("getMaxUserNameLength: " + md.getMaxUserNameLength()); + System.out.println("getNumericFunctions: " + md.getNumericFunctions()); + System.out.println("getProcedureTerm: " + md.getProcedureTerm()); + System.out.println("getResultSetHoldability: " + md.getResultSetHoldability()); + System.out.println("getSchemaTerm: " + md.getSchemaTerm()); + System.out.println("getSearchStringEscape: " + md.getSearchStringEscape()); + System.out.println("getSQLKeywords: " + md.getSQLKeywords()); + System.out.println("getSQLStateType: " + md.getSQLStateType()); + System.out.println("getStringFunctions: " + md.getStringFunctions()); + System.out.println("getSystemFunctions: " + md.getSystemFunctions()); + System.out.println("getTimeDateFunctions: " + md.getTimeDateFunctions()); + System.out.println("getURL: " + md.getURL()); + System.out.println("getUserName: " + md.getUserName()); + System.out.println("isCatalogAtStart: " + md.isCatalogAtStart()); + System.out.println("isReadOnly: " + md.isReadOnly()); + System.out.println("locatorsUpdateCopy: " + md.locatorsUpdateCopy()); + System.out.println("nullPlusNonNullIsNull: " + md.nullPlusNonNullIsNull()); + System.out.println("nullsAreSortedAtEnd: " + md.nullsAreSortedAtEnd()); + System.out.println("nullsAreSortedAtStart: " + md.nullsAreSortedAtStart()); + System.out.println("nullsAreSortedHigh: " + md.nullsAreSortedHigh()); + System.out.println("nullsAreSortedLow: " + md.nullsAreSortedLow()); + System.out.println("storesLowerCaseIdentifiers: " + md.storesLowerCaseIdentifiers()); + System.out.println("storesLowerCaseQuotedIdentifiers: " + md.storesLowerCaseQuotedIdentifiers()); + System.out.println("storesMixedCaseIdentifiers: " + md.storesMixedCaseIdentifiers()); + System.out.println("storesMixedCaseQuotedIdentifiers: " + md.storesMixedCaseQuotedIdentifiers()); + System.out.println("storesUpperCaseIdentifiers: " + md.storesUpperCaseIdentifiers()); + System.out.println("storesUpperCaseQuotedIdentifiers: " + md.storesUpperCaseQuotedIdentifiers()); + System.out.println("supportsAlterTableWithAddColumn: " + md.supportsAlterTableWithAddColumn()); + System.out.println("supportsAlterTableWithDropColumn: " + md.supportsAlterTableWithDropColumn()); + System.out.println("supportsANSI92EntryLevelSQL: " + md.supportsANSI92EntryLevelSQL()); + System.out.println("supportsANSI92FullSQL: " + md.supportsANSI92FullSQL()); + System.out.println("supportsANSI92IntermediateSQL: " + md.supportsANSI92IntermediateSQL()); + System.out.println("supportsBatchUpdates: " + md.supportsBatchUpdates()); + System.out.println("supportsCatalogsInDataManipulation: " + md.supportsCatalogsInDataManipulation()); + System.out.println("supportsCatalogsInIndexDefinitions: " + md.supportsCatalogsInIndexDefinitions()); + System.out.println("supportsCatalogsInPrivilegeDefinitions: " + md.supportsCatalogsInPrivilegeDefinitions()); + System.out.println("supportsCatalogsInProcedureCalls: " + md.supportsCatalogsInProcedureCalls()); + System.out.println("supportsCatalogsInTableDefinitions: " + md.supportsCatalogsInTableDefinitions()); + System.out.println("supportsColumnAliasing: " + md.supportsColumnAliasing()); + // Convert not yet supported + // https://github.com/influxdata/influxdb_iox/issues/7253 + //System.out.println("supportsConvert: " + md.supportsConvert()); + System.out.println("supportsCoreSQLGrammar: " + md.supportsCoreSQLGrammar()); + System.out.println("supportsCorrelatedSubqueries: " + md.supportsCorrelatedSubqueries()); + System.out.println("supportsDataDefinitionAndDataManipulationTransactions: " + md.supportsDataDefinitionAndDataManipulationTransactions()); + System.out.println("supportsDataManipulationTransactionsOnly: " + md.supportsDataManipulationTransactionsOnly()); + System.out.println("supportsDifferentTableCorrelationNames: " + md.supportsDifferentTableCorrelationNames()); + System.out.println("supportsExpressionsInOrderBy: " + md.supportsExpressionsInOrderBy()); + System.out.println("supportsExtendedSQLGrammar: " + md.supportsExtendedSQLGrammar()); + System.out.println("supportsFullOuterJoins: " + md.supportsFullOuterJoins()); + System.out.println("supportsGetGeneratedKeys: " + md.supportsGetGeneratedKeys()); + System.out.println("supportsGroupBy: " + md.supportsGroupBy()); + System.out.println("supportsGroupByBeyondSelect: " + md.supportsGroupByBeyondSelect()); + System.out.println("supportsGroupByUnrelated: " + md.supportsGroupByUnrelated()); + System.out.println("supportsIntegrityEnhancementFacility: " + md.supportsIntegrityEnhancementFacility()); + System.out.println("supportsLikeEscapeClause: " + md.supportsLikeEscapeClause()); + System.out.println("supportsLimitedOuterJoins: " + md.supportsLimitedOuterJoins()); + System.out.println("supportsMinimumSQLGrammar: " + md.supportsMinimumSQLGrammar()); + System.out.println("supportsMixedCaseIdentifiers: " + md.supportsMixedCaseIdentifiers()); + System.out.println("supportsMixedCaseQuotedIdentifiers: " + md.supportsMixedCaseQuotedIdentifiers()); + System.out.println("supportsMultipleOpenResults: " + md.supportsMultipleOpenResults()); + System.out.println("supportsMultipleResultSets: " + md.supportsMultipleResultSets()); + System.out.println("supportsMultipleTransactions: " + md.supportsMultipleTransactions()); + System.out.println("supportsNamedParameters: " + md.supportsNamedParameters()); + System.out.println("supportsNonNullableColumns: " + md.supportsNonNullableColumns()); + System.out.println("supportsOpenCursorsAcrossCommit: " + md.supportsOpenCursorsAcrossCommit()); + System.out.println("supportsOpenCursorsAcrossRollback: " + md.supportsOpenCursorsAcrossRollback()); + System.out.println("supportsOpenStatementsAcrossCommit: " + md.supportsOpenStatementsAcrossCommit()); + System.out.println("supportsOpenStatementsAcrossRollback: " + md.supportsOpenStatementsAcrossRollback()); + System.out.println("supportsOrderByUnrelated: " + md.supportsOrderByUnrelated()); + System.out.println("supportsOuterJoins: " + md.supportsOuterJoins()); + System.out.println("supportsPositionedDelete: " + md.supportsPositionedDelete()); + System.out.println("supportsPositionedUpdate: " + md.supportsPositionedUpdate()); + System.out.println("supportsRefCursors: " + md.supportsRefCursors()); + System.out.println("supportsSavepoints: " + md.supportsSavepoints()); + System.out.println("supportsSchemasInDataManipulation: " + md.supportsSchemasInDataManipulation()); + System.out.println("supportsSchemasInIndexDefinitions: " + md.supportsSchemasInIndexDefinitions()); + System.out.println("supportsSchemasInPrivilegeDefinitions: " + md.supportsSchemasInPrivilegeDefinitions()); + System.out.println("supportsSchemasInProcedureCalls: " + md.supportsSchemasInProcedureCalls()); + System.out.println("supportsSchemasInTableDefinitions: " + md.supportsSchemasInTableDefinitions()); + System.out.println("supportsSelectForUpdate: " + md.supportsSelectForUpdate()); + System.out.println("supportsStatementPooling: " + md.supportsStatementPooling()); + System.out.println("supportsStoredFunctionsUsingCallSyntax: " + md.supportsStoredFunctionsUsingCallSyntax()); + System.out.println("supportsStoredProcedures: " + md.supportsStoredProcedures()); + System.out.println("supportsSubqueriesInComparisons: " + md.supportsSubqueriesInComparisons()); + System.out.println("supportsSubqueriesInExists: " + md.supportsSubqueriesInExists()); + System.out.println("supportsSubqueriesInIns: " + md.supportsSubqueriesInIns()); + System.out.println("supportsSubqueriesInQuantifieds: " + md.supportsSubqueriesInQuantifieds()); + System.out.println("supportsTableCorrelationNames: " + md.supportsTableCorrelationNames()); + System.out.println("supportsTransactionIsolationLevel: " + md.supportsTransactionIsolationLevel(0)); + System.out.println("supportsTransactions: " + md.supportsTransactions()); + System.out.println("supportsUnion: " + md.supportsUnion()); + System.out.println("supportsUnionAll: " + md.supportsUnionAll()); + System.out.println("usesLocalFilePerTable: " + md.usesLocalFilePerTable()); + System.out.println("usesLocalFiles: " + md.usesLocalFiles()); } diff --git a/influxdb_iox_client/src/client/flightsql.rs b/influxdb_iox_client/src/client/flightsql.rs index ff436cdc48..c128555575 100644 --- a/influxdb_iox_client/src/client/flightsql.rs +++ b/influxdb_iox_client/src/client/flightsql.rs @@ -29,8 +29,8 @@ use arrow_flight::{ error::{FlightError, Result}, sql::{ ActionCreatePreparedStatementRequest, ActionCreatePreparedStatementResult, Any, - CommandGetCatalogs, CommandGetDbSchemas, CommandGetTableTypes, CommandGetTables, - CommandPreparedStatementQuery, CommandStatementQuery, ProstMessageExt, + CommandGetCatalogs, CommandGetDbSchemas, CommandGetSqlInfo, CommandGetTableTypes, + CommandGetTables, CommandPreparedStatementQuery, CommandStatementQuery, ProstMessageExt, }, Action, FlightClient, FlightDescriptor, FlightInfo, IpcMessage, Ticket, }; @@ -125,6 +125,18 @@ impl FlightSqlClient { self.do_get_with_cmd(msg.as_any()).await } + /// Get information about sql compatibility from this server using [`CommandGetSqlInfo`] + /// + /// This implementation does not support alternate endpoints + /// + /// * If omitted, then all metadata will be retrieved. + /// + /// [`CommandGetSqlInfo`]: https://github.com/apache/arrow/blob/3a6fc1f9eedd41df2d8ffbcbdfbdab911ff6d82e/format/FlightSql.proto#L45-L68 + pub async fn get_sql_info(&mut self, info: Vec<u32>) -> Result<FlightRecordBatchStream> { + let msg = CommandGetSqlInfo { info }; + self.do_get_with_cmd(msg.as_any()).await + } + /// List the catalogs on this server using a [`CommandGetCatalogs`] message. /// /// This implementation does not support alternate endpoints diff --git a/iox_query/src/exec/context.rs b/iox_query/src/exec/context.rs index bfd6b444b0..5678be8420 100644 --- a/iox_query/src/exec/context.rs +++ b/iox_query/src/exec/context.rs @@ -324,6 +324,14 @@ impl IOxSessionContext { ctx.inner.state().create_logical_plan(sql).await } + /// Create a logical plan that reads a single [`RecordBatch`]. Use + /// `create_physical_plan` to actually execute the query. + pub fn batch_to_logical_plan(&self, batch: RecordBatch) -> Result<LogicalPlan> { + let ctx = self.child_ctx("batch_to_logical_plan"); + debug!(num_rows = batch.num_rows(), "planning RecordBatch query"); + ctx.inner.read_batch(batch)?.into_optimized_plan() + } + /// Plan a SQL statement and convert it to an execution plan. This assumes that any /// tables referenced in the SQL have been registered with this context pub async fn sql_to_physical_plan(&self, sql: &str) -> Result<Arc<dyn ExecutionPlan>> {
8410998408e288ef28cc4241d3dd1c6ea5bfd059
Andrew Lamb
2023-01-18 16:51:24
Update datafusion to Jan 17, 2023 (2 / 2) and arrow/parquet `30.0.1` (#6604)
* chore: Update datafusion to Jan 9, 2023 (2 / 2) and arrow/parquet `30.0.1` * chore: Update for changes in arrow ipc * chore: Run cargo hakari tasks
Co-authored-by: CircleCI[bot] <[email protected]>
chore: Update datafusion to Jan 17, 2023 (2 / 2) and arrow/parquet `30.0.1` (#6604) * chore: Update datafusion to Jan 9, 2023 (2 / 2) and arrow/parquet `30.0.1` * chore: Update for changes in arrow ipc * chore: Run cargo hakari tasks Co-authored-by: CircleCI[bot] <[email protected]>
diff --git a/Cargo.lock b/Cargo.lock index 3a1da02d92..1bb5d58880 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -100,11 +100,12 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "arrow" -version = "29.0.0" +version = "30.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fe17dc0113da7e2eaeaedbd304d347aa8ea64916d225b79a5c3f3b6b5d8da4c" +checksum = "1948f504d736dc6f71ea33773c5c7475998c44925be5321e9d18087a626845f5" dependencies = [ "ahash 0.8.2", + "arrow-arith", "arrow-array", "arrow-buffer", "arrow-cast", @@ -113,23 +114,33 @@ dependencies = [ "arrow-ipc", "arrow-json", "arrow-ord", + "arrow-row", "arrow-schema", "arrow-select", "arrow-string", - "chrono", "comfy-table", +] + +[[package]] +name = "arrow-arith" +version = "30.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5984187a7913813ffd5bb034fdc6810bdbe0ae4cff2292f0eb92797342dc02c8" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "chrono", "half 2.1.0", - "hashbrown 0.13.2", - "multiversion", "num", - "regex", ] [[package]] name = "arrow-array" -version = "29.0.0" +version = "30.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9452131e027aec3276e43449162af084db611c42ef875e54d231e6580bc6254" +checksum = "bf71dc342bb42343d331b58c0bcad095dc045e367493d47b7f4c4509e2adfee5" dependencies = [ "ahash 0.8.2", "arrow-buffer", @@ -143,9 +154,9 @@ dependencies = [ [[package]] name = "arrow-buffer" -version = "29.0.0" +version = "30.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a301001e8ed7da638a12fa579ac5f3f154c44c0655f2ca6ed0f8586b418a779" +checksum = "a7b328d9f3e124cca761ec85a6d3fcea9bf8de1b8531c7a3b6abd367472024df" dependencies = [ "half 2.1.0", "num", @@ -153,9 +164,9 @@ dependencies = [ [[package]] name = "arrow-cast" -version = "29.0.0" +version = "30.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048c91d067f2eb8cc327f086773e5b0f0d7714780807fc4db09366584e23bac8" +checksum = "03976edbf66ac00a582af10a51743f0a9611777adfd68c71799d783344c3bdd2" dependencies = [ "arrow-array", "arrow-buffer", @@ -169,9 +180,9 @@ dependencies = [ [[package]] name = "arrow-csv" -version = "29.0.0" +version = "30.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed914cd0006a3bb9cac8136b3098ac7796ad26b82362f00d4f2e7c1a54684b86" +checksum = "c1b610dc9e3b43bcebeacede47381252ea41363fbcc3c3eb641ff24fc94e567e" dependencies = [ "arrow-array", "arrow-buffer", @@ -180,6 +191,7 @@ dependencies = [ "arrow-schema", "chrono", "csv", + "csv-core", "lazy_static", "lexical-core", "regex", @@ -187,9 +199,9 @@ dependencies = [ [[package]] name = "arrow-data" -version = "29.0.0" +version = "30.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e59619d9d102e4e6b22087b2bd60c07df76fcb68683620841718f6bc8e8f02cb" +checksum = "174df8602dedcdb9149538809c11bd3c0888af30b915f763c66a3d724391c8b9" dependencies = [ "arrow-buffer", "arrow-schema", @@ -199,22 +211,21 @@ dependencies = [ [[package]] name = "arrow-flight" -version = "29.0.0" +version = "30.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb6e49945f93a8fbd3ec0568167f42097b56134b88686602b9e639a7042ef38" +checksum = "cf32bc58976e558f1ba3ff0aa7b07914d4b70850906fc55d5d16790a9ee79e3f" dependencies = [ "arrow-array", "arrow-buffer", "arrow-ipc", "arrow-schema", - "base64 0.13.1", + "base64 0.20.0", "bytes", "futures", "proc-macro2", "prost 0.11.6", - "prost-build 0.11.3", + "prost-build 0.11.5", "prost-derive 0.11.6", - "prost-types 0.11.6", "tokio", "tonic", "tonic-build", @@ -222,9 +233,9 @@ dependencies = [ [[package]] name = "arrow-ipc" -version = "29.0.0" +version = "30.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb7ad6d2fa06a1cebdaa213c59fc953b9230e560d8374aba133b572b864ec55e" +checksum = "2a316907980e70fbf87b006c52993a22d93e4a9bca4ec2ac42cfedb2fdc204ac" dependencies = [ "arrow-array", "arrow-buffer", @@ -236,9 +247,9 @@ dependencies = [ [[package]] name = "arrow-json" -version = "29.0.0" +version = "30.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e22efab3ad70336057660c5e5f2b72e2417e3444c27cb42dc477d678ddd6979" +checksum = "2cc1a1b2e98be0d8d20f932f76a8d976b779d502c8f6b828becc835d6879e903" dependencies = [ "arrow-array", "arrow-buffer", @@ -254,9 +265,9 @@ dependencies = [ [[package]] name = "arrow-ord" -version = "29.0.0" +version = "30.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e23b623332804a65ad11e7732c351896dcb132c19f8e25d99fdb13b00aae5206" +checksum = "7db83c14ddddf81c1d10ce303670f70b7687c8f52de7425b09ae905e4357fda5" dependencies = [ "arrow-array", "arrow-buffer", @@ -266,17 +277,32 @@ dependencies = [ "num", ] +[[package]] +name = "arrow-row" +version = "30.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db41abdf586f1dba8c2973711d5c69ffb9d63688ffa46354b8c85bf9347a921c" +dependencies = [ + "ahash 0.8.2", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "half 2.1.0", + "hashbrown 0.13.2", +] + [[package]] name = "arrow-schema" -version = "29.0.0" +version = "30.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69ef17c144f1253b9864f5a3e8f4c6f1e436bdd52394855d5942f132f776b64e" +checksum = "a99dcc494fe6224e5ece572c5935d5109120a71df06bd8e04c4e23ac14dd8fac" [[package]] name = "arrow-select" -version = "29.0.0" +version = "30.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2accaf218ff107e3df0ee8f1e09b092249a1cc741c4377858a1470fd27d7096" +checksum = "4e3a2cde3ea85b28f64704045d7d54e0fcc4b17efffced574d2dd3320218298f" dependencies = [ "arrow-array", "arrow-buffer", @@ -287,9 +313,9 @@ dependencies = [ [[package]] name = "arrow-string" -version = "29.0.0" +version = "30.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a0954f9e1f45b04815ddacbde72899bf3c03a08fa6c0375f42178c4a01a510" +checksum = "04cf8d0003ebe0aecc716e0ac8c858c570872a7485c7c6284975f31469703a0d" dependencies = [ "arrow-array", "arrow-buffer", @@ -521,6 +547,12 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +[[package]] +name = "base64" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" + [[package]] name = "base64" version = "0.21.0" @@ -1338,7 +1370,7 @@ dependencies = [ [[package]] name = "datafusion" version = "16.0.0" -source = "git+https://github.com/alamb/arrow-datafusion.git?branch=alamb/patched_for_iox#baeb5a042a0169027df5345ace5f5ccc146f4603" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=279440b2ab92d18675b8102e342d4d82182287dc#279440b2ab92d18675b8102e342d4d82182287dc" dependencies = [ "ahash 0.8.2", "arrow", @@ -1384,7 +1416,7 @@ dependencies = [ [[package]] name = "datafusion-common" version = "16.0.0" -source = "git+https://github.com/alamb/arrow-datafusion.git?branch=alamb/patched_for_iox#baeb5a042a0169027df5345ace5f5ccc146f4603" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=279440b2ab92d18675b8102e342d4d82182287dc#279440b2ab92d18675b8102e342d4d82182287dc" dependencies = [ "arrow", "chrono", @@ -1397,7 +1429,7 @@ dependencies = [ [[package]] name = "datafusion-expr" version = "16.0.0" -source = "git+https://github.com/alamb/arrow-datafusion.git?branch=alamb/patched_for_iox#baeb5a042a0169027df5345ace5f5ccc146f4603" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=279440b2ab92d18675b8102e342d4d82182287dc#279440b2ab92d18675b8102e342d4d82182287dc" dependencies = [ "ahash 0.8.2", "arrow", @@ -1409,7 +1441,7 @@ dependencies = [ [[package]] name = "datafusion-optimizer" version = "16.0.0" -source = "git+https://github.com/alamb/arrow-datafusion.git?branch=alamb/patched_for_iox#baeb5a042a0169027df5345ace5f5ccc146f4603" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=279440b2ab92d18675b8102e342d4d82182287dc#279440b2ab92d18675b8102e342d4d82182287dc" dependencies = [ "arrow", "async-trait", @@ -1425,7 +1457,7 @@ dependencies = [ [[package]] name = "datafusion-physical-expr" version = "16.0.0" -source = "git+https://github.com/alamb/arrow-datafusion.git?branch=alamb/patched_for_iox#baeb5a042a0169027df5345ace5f5ccc146f4603" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=279440b2ab92d18675b8102e342d4d82182287dc#279440b2ab92d18675b8102e342d4d82182287dc" dependencies = [ "ahash 0.8.2", "arrow", @@ -1455,7 +1487,7 @@ dependencies = [ [[package]] name = "datafusion-proto" version = "16.0.0" -source = "git+https://github.com/alamb/arrow-datafusion.git?branch=alamb/patched_for_iox#baeb5a042a0169027df5345ace5f5ccc146f4603" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=279440b2ab92d18675b8102e342d4d82182287dc#279440b2ab92d18675b8102e342d4d82182287dc" dependencies = [ "arrow", "chrono", @@ -1466,13 +1498,13 @@ dependencies = [ "parking_lot 0.12.1", "pbjson-build", "prost 0.11.6", - "prost-build 0.11.3", + "prost-build 0.11.5", ] [[package]] name = "datafusion-row" version = "16.0.0" -source = "git+https://github.com/alamb/arrow-datafusion.git?branch=alamb/patched_for_iox#baeb5a042a0169027df5345ace5f5ccc146f4603" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=279440b2ab92d18675b8102e342d4d82182287dc#279440b2ab92d18675b8102e342d4d82182287dc" dependencies = [ "arrow", "datafusion-common", @@ -1483,7 +1515,7 @@ dependencies = [ [[package]] name = "datafusion-sql" version = "16.0.0" -source = "git+https://github.com/alamb/arrow-datafusion.git?branch=alamb/patched_for_iox#baeb5a042a0169027df5345ace5f5ccc146f4603" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=279440b2ab92d18675b8102e342d4d82182287dc#279440b2ab92d18675b8102e342d4d82182287dc" dependencies = [ "arrow-schema", "datafusion-common", @@ -1899,7 +1931,7 @@ dependencies = [ "pbjson-types", "predicate", "prost 0.11.6", - "prost-build 0.11.3", + "prost-build 0.11.5", "query_functions", "serde", "snafu", @@ -1925,10 +1957,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if", - "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "wasm-bindgen", ] [[package]] @@ -1959,7 +1989,7 @@ dependencies = [ "hyper", "pin-project", "prost 0.11.6", - "prost-build 0.11.3", + "prost-build 0.11.5", "prost-types 0.11.6", "tokio", "tokio-stream", @@ -1974,7 +2004,7 @@ name = "grpc-binary-logger-proto" version = "0.1.0" dependencies = [ "prost 0.11.6", - "prost-build 0.11.3", + "prost-build 0.11.5", "prost-types 0.11.6", "tonic", "tonic-build", @@ -1986,7 +2016,7 @@ name = "grpc-binary-logger-test-proto" version = "0.1.0" dependencies = [ "prost 0.11.6", - "prost-build 0.11.3", + "prost-build 0.11.5", "prost-types 0.11.6", "tonic", "tonic-build", @@ -3427,26 +3457,6 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" -[[package]] -name = "multiversion" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "025c962a3dd3cc5e0e520aa9c612201d127dcdf28616974961a649dca64f5373" -dependencies = [ - "multiversion-macros", -] - -[[package]] -name = "multiversion-macros" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a3e2bde382ebf960c1f3e79689fa5941625fe9bf694a1cb64af3e85faff3af" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "mutable_batch" version = "0.1.0" @@ -3671,19 +3681,19 @@ dependencies = [ [[package]] name = "object_store" -version = "0.5.2" -source = "git+https://github.com/apache/arrow-rs.git?rev=f5c165acc0e6cc4b34e0eaea006aab7e5bd28d66#f5c165acc0e6cc4b34e0eaea006aab7e5bd28d66" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4201837dc4c27a8670f0363b1255cd3845a4f0c521211cced1ed14c1d0cc6d2" dependencies = [ "async-trait", - "base64 0.13.1", + "base64 0.20.0", "bytes", "chrono", "futures", - "getrandom", "itertools", "parking_lot 0.12.1", "percent-encoding", - "quick-xml 0.26.0", + "quick-xml 0.27.1", "rand", "reqwest", "ring", @@ -3835,9 +3845,9 @@ dependencies = [ [[package]] name = "parquet" -version = "29.0.0" +version = "30.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d906343fd18ace6b998d5074697743e8e9358efa8c3c796a1381b98cba813338" +checksum = "4bba2a7630d2946f9e2020225062ad5619d70320e06dae6ae1074febf4c4e932" dependencies = [ "ahash 0.8.2", "arrow-array", @@ -3847,7 +3857,7 @@ dependencies = [ "arrow-ipc", "arrow-schema", "arrow-select", - "base64 0.13.1", + "base64 0.20.0", "brotli", "bytes", "chrono", @@ -3963,7 +3973,7 @@ dependencies = [ "pbjson", "pbjson-build", "prost 0.11.6", - "prost-build 0.11.3", + "prost-build 0.11.5", "serde", ] @@ -4153,7 +4163,7 @@ dependencies = [ "once_cell", "parking_lot 0.12.1", "prost 0.11.6", - "prost-build 0.11.3", + "prost-build 0.11.5", "prost-derive 0.11.6", "sha2", "smallvec", @@ -4271,9 +4281,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.47" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5" dependencies = [ "unicode-ident", ] @@ -4353,9 +4363,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.3" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e330bf1316db56b12c2bcfa399e8edddd4821965ea25ddb2c134b610b1c1c604" +checksum = "cb5320c680de74ba083512704acb90fe00f28f79207286a848e730c45dd73ed6" dependencies = [ "bytes", "heck", @@ -4557,9 +4567,9 @@ dependencies = [ [[package]] name = "quick-xml" -version = "0.26.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f50b1c63b38611e7d4d7f68b82d3ad0cc71a2ad2e7f61fc10f1328d917c93cd" +checksum = "ffc053f057dd768a56f62cd7e434c42c831d296968997e9ac1f76ea7c2d14c41" dependencies = [ "memchr", "serde", @@ -5912,7 +5922,7 @@ checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4" dependencies = [ "prettyplease", "proc-macro2", - "prost-build 0.11.3", + "prost-build 0.11.5", "quote", "syn", ] @@ -6568,7 +6578,6 @@ dependencies = [ "arrow-flight", "arrow-ord", "arrow-string", - "base64 0.13.1", "bitflags", "byteorder", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 55692f0fbf..ef6c380821 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -113,16 +113,12 @@ edition = "2021" license = "MIT OR Apache-2.0" [workspace.dependencies] -arrow = { version = "29.0.0" } -arrow-flight = { version = "29.0.0" } -#datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="07f49803a3d7a9e9b3c2c9a7714c1bb08db71385", default-features = false } -#datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="07f49803a3d7a9e9b3c2c9a7714c1bb08db71385" } -# Temporary patch to https://github.com/alamb/arrow-datafusion/tree/alamb/patched_for_iox -# See https://github.com/alamb/arrow-datafusion/pull/7 for details -datafusion = { git = "https://github.com/alamb/arrow-datafusion.git", branch="alamb/patched_for_iox", default-features = false } -datafusion-proto = { git = "https://github.com/alamb/arrow-datafusion.git", branch="alamb/patched_for_iox" } +arrow = { version = "30.0.0" } +arrow-flight = { version = "30.0.0" } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="279440b2ab92d18675b8102e342d4d82182287dc", default-features = false } +datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="279440b2ab92d18675b8102e342d4d82182287dc" } hashbrown = { version = "0.13.2" } -parquet = { version = "29.0.0" } +parquet = { version = "30.0.0" } # This profile optimizes for runtime performance and small binary size at the expense of longer # build times. It's most suitable for final release builds. @@ -147,9 +143,4 @@ incremental = true opt-level = 3 [profile.dev.package.similar] -opt-level = 3 - -[patch.crates-io] -# remove and bump object_store dep version once this revision is released. -# patch for https://github.com/influxdata/idpe/issues/16611 -object_store = { git = 'https://github.com/apache/arrow-rs.git', rev = "f5c165acc0e6cc4b34e0eaea006aab7e5bd28d66", package = "object_store" } +opt-level = 3 \ No newline at end of file diff --git a/ingest_replica/src/grpc/query.rs b/ingest_replica/src/grpc/query.rs index a9bd1d9569..35ea32dabf 100644 --- a/ingest_replica/src/grpc/query.rs +++ b/ingest_replica/src/grpc/query.rs @@ -7,11 +7,10 @@ use futures::{Stream, StreamExt}; use generated_types::influxdata::iox::ingester::v1::{self as proto, PartitionStatus}; use iox_arrow_flight::{ encode::{ - prepare_batch_for_flight, prepare_schema_for_flight, split_batch_for_grpc_response, - GRPC_TARGET_MAX_BATCH_SIZE_BYTES, + flight_data_from_arrow_batch, prepare_batch_for_flight, prepare_schema_for_flight, + split_batch_for_grpc_response, GRPC_TARGET_MAX_BATCH_SIZE_BYTES, }, flight_service_server::FlightService as Flight, - utils::flight_data_from_arrow_batch, Action, ActionType, Criteria, Empty, FlightData, FlightDescriptor, FlightInfo, HandshakeRequest, HandshakeResponse, IpcMessage, PutResult, SchemaAsIpc, SchemaResult, Ticket, }; @@ -435,7 +434,7 @@ impl Stream for FlightFrameCodec { let flight_data = FlightData::new( None, - IpcMessage(build_none_flight_msg()), + IpcMessage(build_none_flight_msg().into()), bytes.to_vec(), vec![], ); @@ -477,6 +476,7 @@ fn build_none_flight_msg() -> Vec<u8> { #[cfg(test)] mod tests { use arrow::{error::ArrowError, ipc::MessageHeader}; + use bytes::Bytes; use data_types::PartitionId; use futures::StreamExt; use generated_types::influxdata::iox::ingester::v1::{self as proto}; @@ -649,7 +649,9 @@ mod tests { let mut flight = FlightService::new(MockQueryExec::default(), 100, &metric::Registry::default()); - let req = tonic::Request::new(Ticket { ticket: vec![] }); + let req = tonic::Request::new(Ticket { + ticket: Bytes::new(), + }); match flight.do_get(req).await { Ok(_) => panic!("expected error because of invalid ticket"), Err(s) => { @@ -659,7 +661,9 @@ mod tests { flight.request_sem = Semaphore::new(0); - let req = tonic::Request::new(Ticket { ticket: vec![] }); + let req = tonic::Request::new(Ticket { + ticket: Bytes::new(), + }); match flight.do_get(req).await { Ok(_) => panic!("expected error because of request limit"), Err(s) => { diff --git a/ingester/src/server/grpc/query.rs b/ingester/src/server/grpc/query.rs index f7787b264b..66295c4268 100644 --- a/ingester/src/server/grpc/query.rs +++ b/ingester/src/server/grpc/query.rs @@ -13,7 +13,7 @@ use flatbuffers::FlatBufferBuilder; use futures::Stream; use generated_types::influxdata::iox::ingester::v1::{self as proto}; use iox_arrow_flight::{ - flight_service_server::FlightService as Flight, utils::flight_data_from_arrow_batch, Action, + encode::flight_data_from_arrow_batch, flight_service_server::FlightService as Flight, Action, ActionType, Criteria, Empty, FlightData, FlightDescriptor, FlightInfo, HandshakeRequest, HandshakeResponse, IpcMessage, PutResult, SchemaAsIpc, SchemaResult, Ticket, }; @@ -332,7 +332,7 @@ impl Stream for GetStream { let flight_data = FlightData::new( None, - IpcMessage(build_none_flight_msg()), + IpcMessage(build_none_flight_msg().into()), bytes.to_vec(), vec![], ); diff --git a/ingester2/src/server/grpc/query.rs b/ingester2/src/server/grpc/query.rs index c2e9bd5384..d7a2dbfbbe 100644 --- a/ingester2/src/server/grpc/query.rs +++ b/ingester2/src/server/grpc/query.rs @@ -7,11 +7,10 @@ use futures::{Stream, StreamExt}; use generated_types::influxdata::iox::ingester::v1::{self as proto, PartitionStatus}; use iox_arrow_flight::{ encode::{ - prepare_batch_for_flight, prepare_schema_for_flight, split_batch_for_grpc_response, - GRPC_TARGET_MAX_BATCH_SIZE_BYTES, + flight_data_from_arrow_batch, prepare_batch_for_flight, prepare_schema_for_flight, + split_batch_for_grpc_response, GRPC_TARGET_MAX_BATCH_SIZE_BYTES, }, flight_service_server::FlightService as Flight, - utils::flight_data_from_arrow_batch, Action, ActionType, Criteria, Empty, FlightData, FlightDescriptor, FlightInfo, HandshakeRequest, HandshakeResponse, IpcMessage, PutResult, SchemaAsIpc, SchemaResult, Ticket, }; @@ -442,7 +441,7 @@ impl Stream for FlightFrameCodec { let flight_data = FlightData::new( None, - IpcMessage(build_none_flight_msg()), + IpcMessage(build_none_flight_msg().into()), bytes.to_vec(), vec![], ); @@ -484,6 +483,7 @@ fn build_none_flight_msg() -> Vec<u8> { #[cfg(test)] mod tests { use arrow::{error::ArrowError, ipc::MessageHeader}; + use bytes::Bytes; use data_types::PartitionId; use futures::StreamExt; use generated_types::influxdata::iox::ingester::v1::{self as proto}; @@ -660,7 +660,9 @@ mod tests { &metric::Registry::default(), ); - let req = tonic::Request::new(Ticket { ticket: vec![] }); + let req = tonic::Request::new(Ticket { + ticket: Bytes::new(), + }); match flight.do_get(req).await { Ok(_) => panic!("expected error because of invalid ticket"), Err(s) => { @@ -670,7 +672,9 @@ mod tests { flight.request_sem = Semaphore::new(0); - let req = tonic::Request::new(Ticket { ticket: vec![] }); + let req = tonic::Request::new(Ticket { + ticket: Bytes::new(), + }); match flight.do_get(req).await { Ok(_) => panic!("expected error because of request limit"), Err(s) => { diff --git a/iox_arrow_flight/src/client.rs b/iox_arrow_flight/src/client.rs index 03ff39af0a..c93d3f2fd5 100644 --- a/iox_arrow_flight/src/client.rs +++ b/iox_arrow_flight/src/client.rs @@ -108,7 +108,7 @@ impl FlightClient { pub async fn handshake(&mut self, payload: Vec<u8>) -> Result<Vec<u8>> { let request = HandshakeRequest { protocol_version: 0, - payload, + payload: payload.into(), }; let mut response_stream = self @@ -128,7 +128,7 @@ impl FlightClient { )); } - Ok(response.payload) + Ok(response.payload.to_vec()) } else { Err(FlightError::protocol("No response from handshake")) } @@ -138,7 +138,9 @@ impl FlightClient { /// returning a [`FlightRecordBatchStream`] for reading /// [`RecordBatch`]es. pub async fn do_get(&mut self, ticket: Vec<u8>) -> Result<FlightRecordBatchStream> { - let t = Ticket { ticket }; + let t = Ticket { + ticket: ticket.into(), + }; let request = self.make_request(t); let response = self diff --git a/iox_arrow_flight/src/encode.rs b/iox_arrow_flight/src/encode.rs index b001e1a4f6..9a2acfbbe8 100644 --- a/iox_arrow_flight/src/encode.rs +++ b/iox_arrow_flight/src/encode.rs @@ -6,7 +6,7 @@ use arrow::{ ipc::writer::IpcWriteOptions, record_batch::RecordBatch, }; -use arrow_flight::{utils::flight_data_from_arrow_batch, FlightData, SchemaAsIpc}; +use arrow_flight::{FlightData, SchemaAsIpc}; use futures::{stream::BoxStream, StreamExt}; /// Creates a stream which encodes a [`Stream`](futures::Stream) of @@ -93,7 +93,7 @@ impl StreamEncoderBuilder { // to have that schema too let schema = Arc::new(prepare_schema_for_flight(&schema)); let mut schema_flight_data: FlightData = SchemaAsIpc::new(&schema, &options).into(); - schema_flight_data.app_metadata = app_metadata; + schema_flight_data.app_metadata = app_metadata.into(); let schema_stream = futures::stream::once(async move { Ok(schema_flight_data) }); @@ -248,6 +248,25 @@ fn hydrate_dictionary(array: &ArrayRef) -> Result<ArrayRef, tonic::Status> { } } +/// TODO remove when arrow 31.0.0 is released +/// and instead use the FlightDataEncoder directly +pub fn flight_data_from_arrow_batch( + batch: &RecordBatch, + options: &IpcWriteOptions, +) -> (Vec<FlightData>, FlightData) { + let data_gen = arrow::ipc::writer::IpcDataGenerator::default(); + let mut dictionary_tracker = arrow::ipc::writer::DictionaryTracker::new(false); + + let (encoded_dictionaries, encoded_batch) = data_gen + .encoded_batch(batch, &mut dictionary_tracker, options) + .expect("DictionaryTracker configured above to not error on replacement"); + + let flight_dictionaries = encoded_dictionaries.into_iter().map(Into::into).collect(); + let flight_batch = encoded_batch.into(); + + (flight_dictionaries, flight_batch) +} + #[cfg(test)] mod tests { use arrow::{ diff --git a/iox_arrow_flight/src/flightsql.rs b/iox_arrow_flight/src/flightsql.rs index 3150c7f830..9df54c5bf0 100644 --- a/iox_arrow_flight/src/flightsql.rs +++ b/iox_arrow_flight/src/flightsql.rs @@ -155,6 +155,6 @@ impl FlightSqlClient { })? .ticket; - self.inner.do_get(ticket).await + self.inner.do_get(ticket.into()).await } } diff --git a/iox_query/src/exec/seriesset/series.rs b/iox_query/src/exec/seriesset/series.rs index eca17f32d8..3290ff4ca3 100644 --- a/iox_query/src/exec/seriesset/series.rs +++ b/iox_query/src/exec/seriesset/series.rs @@ -285,7 +285,7 @@ impl SeriesSet { let tags = self.create_frame_tags(schema.field(index.value_index).name()); - let mut timestamps = compute::nullif::nullif( + let mut timestamps = compute::kernels::nullif::nullif( batch.column(index.timestamp_index), &compute::is_null(array).expect("is_null"), ) diff --git a/service_grpc_flight/src/lib.rs b/service_grpc_flight/src/lib.rs index 7d8c9cd3cb..ad0c42286f 100644 --- a/service_grpc_flight/src/lib.rs +++ b/service_grpc_flight/src/lib.rs @@ -341,7 +341,7 @@ where }?; let message: prost_types::Any = - prost::Message::decode(cmd.as_slice()).context(DeserializationSnafu)?; + prost::Message::decode(cmd.as_ref()).context(DeserializationSnafu)?; let flight_info = self.dispatch(&namespace_name, request, message).await?; Ok(tonic::Response::new(flight_info)) @@ -449,7 +449,7 @@ where }]; Ok(FlightInfo { - schema, + schema: schema.into(), flight_descriptor: Some(flight_descriptor), endpoint, total_records, @@ -578,7 +578,9 @@ mod tests { server: Arc::clone(&test_storage), }; let ticket = Ticket { - ticket: br#"{"namespace_name": "my_db", "sql_query": "SELECT 1;"}"#.to_vec(), + ticket: br#"{"namespace_name": "my_db", "sql_query": "SELECT 1;"}"# + .to_vec() + .into(), }; let streaming_resp1 = service .do_get(tonic::Request::new(ticket.clone())) diff --git a/service_grpc_flight/src/request.rs b/service_grpc_flight/src/request.rs index 244b1ff589..ba66157fc5 100644 --- a/service_grpc_flight/src/request.rs +++ b/service_grpc_flight/src/request.rs @@ -95,7 +95,9 @@ impl IoxGetRequest { let ticket = read_info.encode_to_vec(); - Ok(Ticket { ticket }) + Ok(Ticket { + ticket: ticket.into(), + }) } /// The Go clients still use an older form of ticket encoding, JSON tickets @@ -241,7 +243,7 @@ mod tests { #[test] fn proto_ticket_decoding_error() { let ticket = Ticket { - ticket: b"invalid ticket".to_vec(), + ticket: b"invalid ticket".to_vec().into(), }; // Reverts to default (unspecified) for invalid query_type enumeration, and thus SQL @@ -279,13 +281,13 @@ mod tests { fn make_proto_ticket(read_info: &proto::ReadInfo) -> Ticket { Ticket { - ticket: read_info.encode_to_vec(), + ticket: read_info.encode_to_vec().into(), } } fn make_json_ticket(json: &str) -> Ticket { Ticket { - ticket: json.as_bytes().to_vec(), + ticket: json.as_bytes().to_vec().into(), } } } diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index f3f8aa6442..7ab17d99d3 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -17,18 +17,17 @@ license.workspace = true ### BEGIN HAKARI SECTION [dependencies] ahash = { version = "0.8", default-features = false, features = ["getrandom", "runtime-rng"] } -arrow = { version = "29", features = ["arrow-csv", "arrow-ipc", "arrow-json", "comfy-table", "csv", "dyn_cmp_dict", "ipc", "json", "prettyprint"] } -arrow-flight = { version = "29", features = ["flight-sql-experimental", "prost-types"] } -arrow-ord = { version = "29", default-features = false, features = ["dyn_cmp_dict"] } -arrow-string = { version = "29", default-features = false, features = ["dyn_cmp_dict"] } -base64 = { version = "0.13", features = ["std"] } +arrow = { version = "30", features = ["arrow-csv", "arrow-ipc", "arrow-json", "comfy-table", "csv", "dyn_cmp_dict", "ipc", "json", "prettyprint"] } +arrow-flight = { version = "30", features = ["flight-sql-experimental"] } +arrow-ord = { version = "30", default-features = false, features = ["dyn_cmp_dict"] } +arrow-string = { version = "30", default-features = false, features = ["dyn_cmp_dict"] } bitflags = { version = "1" } byteorder = { version = "1", features = ["std"] } bytes = { version = "1", features = ["std"] } chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "iana-time-zone", "serde", "std", "winapi"] } crossbeam-utils = { version = "0.8", features = ["std"] } crypto-common = { version = "0.1", default-features = false, features = ["std"] } -datafusion = { git = "https://github.com/alamb/arrow-datafusion.git", branch = "alamb/patched_for_iox", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "279440b2ab92d18675b8102e342d4d82182287dc", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] } digest = { version = "0.10", features = ["alloc", "block-buffer", "core-api", "mac", "std", "subtle"] } either = { version = "1", features = ["use_std"] } fixedbitset = { version = "0.4", features = ["std"] } @@ -40,7 +39,7 @@ futures-io = { version = "0.3", features = ["std"] } futures-sink = { version = "0.3", features = ["alloc", "std"] } futures-task = { version = "0.3", default-features = false, features = ["alloc", "std"] } futures-util = { version = "0.3", features = ["alloc", "async-await", "async-await-macro", "channel", "futures-channel", "futures-io", "futures-macro", "futures-sink", "io", "memchr", "sink", "slab", "std"] } -getrandom = { version = "0.2", default-features = false, features = ["js", "js-sys", "std", "wasm-bindgen"] } +getrandom = { version = "0.2", default-features = false, features = ["std"] } hashbrown-5ef9efb8ec2df382 = { package = "hashbrown", version = "0.12", features = ["ahash", "inline-more", "raw"] } hashbrown-594e8ee84c453af0 = { package = "hashbrown", version = "0.13", features = ["ahash", "inline-more", "raw"] } indexmap = { version = "1", default-features = false, features = ["std"] } @@ -53,10 +52,10 @@ memchr = { version = "2", features = ["std"] } nom = { version = "7", features = ["alloc", "std"] } num-integer = { version = "0.1", default-features = false, features = ["i128", "std"] } num-traits = { version = "0.2", features = ["i128", "libm", "std"] } -object_store = { git = "https://github.com/apache/arrow-rs.git", rev = "f5c165acc0e6cc4b34e0eaea006aab7e5bd28d66", default-features = false, features = ["aws", "azure", "base64", "cloud", "gcp", "getrandom", "quick-xml", "rand", "reqwest", "ring", "rustls-pemfile", "serde", "serde_json"] } +object_store = { version = "0.5", default-features = false, features = ["aws", "azure", "base64", "cloud", "gcp", "quick-xml", "rand", "reqwest", "ring", "rustls-pemfile", "serde", "serde_json"] } once_cell = { version = "1", features = ["alloc", "parking_lot", "parking_lot_core", "race", "std"] } parking_lot = { version = "0.12", features = ["arc_lock"] } -parquet = { version = "29", features = ["arrow", "arrow-array", "arrow-buffer", "arrow-cast", "arrow-data", "arrow-ipc", "arrow-schema", "arrow-select", "async", "base64", "brotli", "experimental", "flate2", "futures", "lz4", "snap", "tokio", "zstd"] } +parquet = { version = "30", features = ["arrow", "arrow-array", "arrow-buffer", "arrow-cast", "arrow-data", "arrow-ipc", "arrow-schema", "arrow-select", "async", "base64", "brotli", "experimental", "flate2", "futures", "lz4", "snap", "tokio", "zstd"] } phf_shared = { version = "0.11", features = ["std"] } predicates = { version = "2", features = ["diff", "difflib", "float-cmp", "normalize-line-endings", "regex"] } prost = { version = "0.11", features = ["prost-derive", "std"] } @@ -93,7 +92,6 @@ zstd-safe = { version = "6", default-features = false, features = ["arrays", "le zstd-sys = { version = "2", default-features = false, features = ["legacy", "std", "zdict_builder"] } [build-dependencies] -base64 = { version = "0.13", features = ["std"] } bitflags = { version = "1" } byteorder = { version = "1", features = ["std"] } bytes = { version = "1", features = ["std"] } @@ -109,7 +107,7 @@ futures-io = { version = "0.3", features = ["std"] } futures-sink = { version = "0.3", features = ["alloc", "std"] } futures-task = { version = "0.3", default-features = false, features = ["alloc", "std"] } futures-util = { version = "0.3", features = ["alloc", "async-await", "async-await-macro", "channel", "futures-channel", "futures-io", "futures-macro", "futures-sink", "io", "memchr", "sink", "slab", "std"] } -getrandom = { version = "0.2", default-features = false, features = ["js", "js-sys", "std", "wasm-bindgen"] } +getrandom = { version = "0.2", default-features = false, features = ["std"] } hashbrown-5ef9efb8ec2df382 = { package = "hashbrown", version = "0.12", features = ["ahash", "inline-more", "raw"] } heck = { version = "0.4", features = ["unicode", "unicode-segmentation"] } indexmap = { version = "1", default-features = false, features = ["std"] }
a86c77213e7a5d547d047e8e5593574d4e9175ac
Nga Tran
2023-09-12 11:10:57
compare sort_key_ids in cas_sort_key (#8579)
* feat: have ingester's SortKeyState include sort_key_ids * fix: test failures * chore: address review comments * feat: first step to compare sort_key_ids * feat: compare sort_key_ids in cas_sort_key * fix: comment typos * refactor: use direct assert instead of going true a function * chore: fix typo * test: add tests and comments * chore: fix typos * test: add more test to handle empty sort key * chore: address review comments ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
feat: compare sort_key_ids in cas_sort_key (#8579) * feat: have ingester's SortKeyState include sort_key_ids * fix: test failures * chore: address review comments * feat: first step to compare sort_key_ids * feat: compare sort_key_ids in cas_sort_key * fix: comment typos * refactor: use direct assert instead of going true a function * chore: fix typo * test: add tests and comments * chore: fix typos * test: add more test to handle empty sort key * chore: address review comments --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/data_types/src/columns.rs b/data_types/src/columns.rs index a56d70b47d..8d3a18aa3f 100644 --- a/data_types/src/columns.rs +++ b/data_types/src/columns.rs @@ -403,7 +403,7 @@ impl Deref for ColumnSet { } /// Set of sorted columns in a specific given order at created time -#[derive(Debug, Clone, PartialEq, Eq, Hash, sqlx::Type)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, sqlx::Type, Default)] #[sqlx(transparent, no_pg_array)] pub struct SortedColumnSet(Vec<ColumnId>); diff --git a/import_export/src/file/import.rs b/import_export/src/file/import.rs index cf36f798f4..01e4f0a843 100644 --- a/import_export/src/file/import.rs +++ b/import_export/src/file/import.rs @@ -576,6 +576,7 @@ impl RemoteImporter { .cas_sort_key( &partition.transition_partition_id(), Some(partition.sort_key.clone()), + partition.sort_key_ids.clone(), &new_sort_key, &new_sort_key_ids, ) diff --git a/ingester/src/buffer_tree/partition.rs b/ingester/src/buffer_tree/partition.rs index eec07b7466..bb728c8f1e 100644 --- a/ingester/src/buffer_tree/partition.rs +++ b/ingester/src/buffer_tree/partition.rs @@ -50,6 +50,14 @@ impl SortKeyState { Self::Provided(_sort_key, sort_key_ids) => sort_key_ids.clone(), } } + + // get both sort key and sort key ids + pub(crate) async fn get(&self) -> (Option<SortKey>, Option<SortedColumnSet>) { + match self { + Self::Deferred(v) => v.get().await, + Self::Provided(sort_key, sort_key_ids) => (sort_key.clone(), sort_key_ids.clone()), + } + } } /// Data of an IOx Partition of a given Table of a Namespace @@ -1033,6 +1041,7 @@ mod tests { .cas_sort_key( &partition.transition_partition_id(), None, + None, &["terrific"], &SortedColumnSet::from([1]), ) diff --git a/ingester/src/buffer_tree/partition/resolver/sort_key.rs b/ingester/src/buffer_tree/partition/resolver/sort_key.rs index aa5b1ee573..097b1befd5 100644 --- a/ingester/src/buffer_tree/partition/resolver/sort_key.rs +++ b/ingester/src/buffer_tree/partition/resolver/sort_key.rs @@ -106,6 +106,7 @@ mod tests { .cas_sort_key( &partition.transition_partition_id(), None, + None, &["uno", "dos", "bananas"], &SortedColumnSet::from([1, 2, 3]), ) diff --git a/ingester/src/persist/mod.rs b/ingester/src/persist/mod.rs index d871ada6e1..9b042c65b4 100644 --- a/ingester/src/persist/mod.rs +++ b/ingester/src/persist/mod.rs @@ -389,6 +389,7 @@ mod tests { .cas_sort_key( &partition_id, None, + None, // must use column names that exist in the partition data &["region"], // column id of region diff --git a/ingester/src/persist/worker.rs b/ingester/src/persist/worker.rs index 76fa64ee7f..1775aae805 100644 --- a/ingester/src/persist/worker.rs +++ b/ingester/src/persist/worker.rs @@ -2,7 +2,7 @@ use std::{ops::ControlFlow, sync::Arc}; use async_channel::RecvError; use backoff::Backoff; -use data_types::{ColumnsByName, CompactionLevel, ParquetFile, ParquetFileParams}; +use data_types::{ColumnsByName, CompactionLevel, ParquetFile, ParquetFileParams, SortedColumnSet}; use iox_catalog::interface::{get_table_columns_by_id, CasFailure, Catalog}; use iox_query::exec::Executor; use iox_time::{SystemProvider, TimeProvider}; @@ -173,7 +173,7 @@ where // // Sort keys may be updated by any ingester at any time, and updates to the // sort key MUST be serialised. - let sort_key = ctx.sort_key().get_sort_key().await; + let (sort_key, sort_key_ids) = ctx.sort_key().get().await; // Fetch the "column name -> column ID" map. // @@ -192,6 +192,7 @@ where ctx, worker_state, sort_key, // Old sort key prior to this persist job + sort_key_ids, // Corresponding old sort key IDs prior to this persist job sort_key_update, // New sort key updated by this persist job parquet_table_data.object_store_id, &column_map, @@ -372,10 +373,19 @@ where /// If a concurrent sort key change is detected (issued by another node) then /// this method updates the sort key in `ctx` to reflect the newly observed /// value and returns [`PersistError::ConcurrentSortKeyUpdate`] to the caller. +/// +/// For now we provide both old_sort_key and old_sort_key_ids to the function. +/// In near future, when the sort_key field is removed from the partition, +/// we will remove old_sort_key here and only keep old_sort_key_ids. +/// +/// Similarly, to avoid too much changes, we will compute new_sort_key_ids from +/// the provided new_sort_key and the columns. In the future, we will optimize to use +/// new_sort_key_ids directly. async fn update_catalog_sort_key<O>( ctx: &mut Context, worker_state: &SharedWorkerState<O>, - old_sort_key: Option<SortKey>, + old_sort_key: Option<SortKey>, // todo: remove this argument in the future + old_sort_key_ids: Option<SortedColumnSet>, new_sort_key: SortKey, object_store_id: Uuid, columns: &ColumnsByName, @@ -383,6 +393,7 @@ async fn update_catalog_sort_key<O>( where O: Send + Sync, { + // convert old_sort_key into a vector of string let old_sort_key = old_sort_key.map(|v| v.to_columns().map(|v| v.to_string()).collect::<Vec<_>>()); @@ -402,6 +413,7 @@ where let update_result = Backoff::new(&Default::default()) .retry_with_backoff("cas_sort_key", || { let old_sort_key = old_sort_key.clone(); + let old_sort_key_ids = old_sort_key_ids.clone(); let new_sort_key_str = new_sort_key.to_columns().collect::<Vec<_>>(); let new_sort_key_colids = columns.ids_for_names(&new_sort_key_str); let catalog = Arc::clone(&worker_state.catalog); @@ -413,6 +425,7 @@ where .cas_sort_key( ctx.partition_id(), old_sort_key.clone(), + old_sort_key_ids.clone(), &new_sort_key_str, &new_sort_key_colids, ) @@ -421,11 +434,12 @@ where Ok(_) => ControlFlow::Break(Ok(new_sort_key_colids)), Err(CasFailure::QueryError(e)) => ControlFlow::Continue(e), Err(CasFailure::ValueMismatch((observed_sort_key, observed_sort_key_ids))) - if observed_sort_key == new_sort_key_str => + if observed_sort_key_ids.as_ref() == Some(&new_sort_key_colids) => { - // Invariant: if the column name sort keys match, the sort - // key IDs must also match. - assert_eq!(&observed_sort_key_ids, &Some(new_sort_key_colids.clone())); + // Invariant: if the column name sort IDs match, the + // sort key column strings must also match. + assert_eq!(observed_sort_key, new_sort_key_str); + // A CAS failure occurred because of a concurrent // sort key update, however the new catalog sort key // exactly matches the sort key this node wants to @@ -441,7 +455,8 @@ where table = %ctx.table(), partition_id = %ctx.partition_id(), partition_key = %ctx.partition_key(), - expected=?old_sort_key, + ?old_sort_key, + ?old_sort_key_ids, ?observed_sort_key, ?observed_sort_key_ids, update_sort_key=?new_sort_key_str, @@ -469,7 +484,8 @@ where table = %ctx.table(), partition_id = %ctx.partition_id(), partition_key = %ctx.partition_key(), - expected=?old_sort_key, + ?old_sort_key, + ?old_sort_key_ids, ?observed_sort_key, ?observed_sort_key_ids, update_sort_key=?new_sort_key_str, @@ -504,6 +520,7 @@ where partition_id = %ctx.partition_id(), partition_key = %ctx.partition_key(), ?old_sort_key, + ?old_sort_key_ids, %new_sort_key, ?new_sort_key_ids, "adjusted partition sort key" diff --git a/iox_catalog/src/interface.rs b/iox_catalog/src/interface.rs index 24aeb49eed..8c81d2922f 100644 --- a/iox_catalog/src/interface.rs +++ b/iox_catalog/src/interface.rs @@ -418,8 +418,9 @@ pub trait PartitionRepo: Send + Sync { async fn cas_sort_key( &mut self, partition_id: &TransitionPartitionId, - old_sort_key: Option<Vec<String>>, - new_sort_key: &[&str], + old_sort_key: Option<Vec<String>>, // todo: remove this old_sort_key + old_sort_key_ids: Option<SortedColumnSet>, + new_sort_key: &[&str], //todo: remove this new_sort_key new_sort_key_ids: &SortedColumnSet, ) -> Result<Partition, CasFailure<(Vec<String>, Option<SortedColumnSet>)>>; @@ -759,17 +760,6 @@ pub async fn list_schemas( Ok(iter) } -/// panic if sort_key and sort_key_ids have different lengths -pub(crate) fn verify_sort_key_length(sort_key: &[&str], sort_key_ids: &SortedColumnSet) { - assert_eq!( - sort_key.len(), - sort_key_ids.len(), - "sort_key {:?} and sort_key_ids {:?} are not the same length", - sort_key, - sort_key_ids - ); -} - #[cfg(test)] pub(crate) mod test_helpers { use crate::{ @@ -1682,32 +1672,74 @@ pub(crate) mod test_helpers { // sort_key should be empty on creation assert!(to_skip_partition.sort_key.is_empty()); + assert!(to_skip_partition.sort_key_ids.as_ref().unwrap().is_empty()); - // test update_sort_key from None to Some + // test that updates sort_key and sort_key_ids from None to Some let updated_partition = repos .partitions() .cas_sort_key( &to_skip_partition.transition_partition_id(), None, + None, &["tag2", "tag1", "time"], &SortedColumnSet::from([2, 1, 3]), ) .await .unwrap(); - // verify sort key and sort key ids are updated + // verify sort_key and sort_key_ids are updated correctly assert_eq!(updated_partition.sort_key, &["tag2", "tag1", "time"]); assert_eq!( updated_partition.sort_key_ids, Some(SortedColumnSet::from([2, 1, 3])) ); - // test sort key CAS with an incorrect value + // test that provides values of both old_sort_key and old_sort_key_ids but they do not match the existing ones + // --> the new sort key will not be updated let err = repos .partitions() .cas_sort_key( &to_skip_partition.transition_partition_id(), Some(["bananas".to_string()].to_vec()), + Some(SortedColumnSet::from([1])), + &["tag2", "tag1", "tag3 , with comma", "time"], + &SortedColumnSet::from([1, 2, 3, 4]), + ) + .await + .expect_err("CAS with incorrect value should fail"); + // verify the sort key is not updated + assert_matches!(err, CasFailure::ValueMismatch((old_sort_key, old_sort_key_ids)) => { + assert_eq!(old_sort_key, &["tag2", "tag1", "time"]); + assert_eq!(old_sort_key_ids, Some(SortedColumnSet::from([2, 1, 3]))); + }); + + // test that provides matched old_sort_key but not-matched old_sort_key_ids + // --> the new sort key will not be updated + let err = repos + .partitions() + .cas_sort_key( + &to_skip_partition.transition_partition_id(), + Some(["tag2".to_string(), "tag1".to_string(), "time".to_string()].to_vec()), + Some(SortedColumnSet::from([1, 5, 10])), + &["tag2", "tag1", "tag3 , with comma", "time"], + &SortedColumnSet::from([1, 2, 3, 4]), + ) + .await + .expect_err("CAS with incorrect value should fail"); + // verify the sort key is not updated + assert_matches!(err, CasFailure::ValueMismatch((old_sort_key, old_sort_key_ids)) => { + assert_eq!(old_sort_key, &["tag2", "tag1", "time"]); + assert_eq!(old_sort_key_ids, Some(SortedColumnSet::from([2, 1, 3]))); + }); + + // test that provide None sort_key and None sort_key_ids that do not match with existing values that are not None + // --> the new sort key will not be updated + let err = repos + .partitions() + .cas_sort_key( + &to_skip_partition.transition_partition_id(), + None, + None, &["tag2", "tag1", "tag3 , with comma", "time"], &SortedColumnSet::from([1, 2, 3, 4]), ) @@ -1718,7 +1750,7 @@ pub(crate) mod test_helpers { assert_eq!(old_sort_key_ids, Some(SortedColumnSet::from([2, 1, 3]))); }); - // test getting the new sort key + // test getting partition from partition id and verify values of sort_key and sort_key_ids let updated_other_partition = repos .partitions() .get_by_id(to_skip_partition.id) @@ -1735,55 +1767,24 @@ pub(crate) mod test_helpers { Some(SortedColumnSet::from([2, 1, 3])) ); + // test getting partition from hash_id and verify values of sort_key and sort_key_ids let updated_other_partition = repos .partitions() .get_by_hash_id(to_skip_partition.hash_id().unwrap()) .await .unwrap() .unwrap(); + // still has the old sort key assert_eq!( updated_other_partition.sort_key, vec!["tag2", "tag1", "time"] ); - // Test: sort_key_ids from get_by_hash_id assert_eq!( updated_other_partition.sort_key_ids, Some(SortedColumnSet::from([2, 1, 3])) ); - // test sort key CAS with no value - let err = repos - .partitions() - .cas_sort_key( - &to_skip_partition.transition_partition_id(), - None, - &["tag2", "tag1", "tag3 , with comma", "time"], - &SortedColumnSet::from([1, 2, 3, 4]), - ) - .await - .expect_err("CAS with incorrect value should fail"); - assert_matches!(err, CasFailure::ValueMismatch((old_sort_key, old_sort_key_ids)) => { - assert_eq!(old_sort_key, &["tag2", "tag1", "time"]); - assert_eq!(old_sort_key_ids, Some(SortedColumnSet::from([2, 1, 3]))); - }); - - // test sort key CAS with an incorrect value - let err = repos - .partitions() - .cas_sort_key( - &to_skip_partition.transition_partition_id(), - Some(["bananas".to_string()].to_vec()), - &["tag2", "tag1", "tag3 , with comma", "time"], - &SortedColumnSet::from([1, 2, 3, 4]), - ) - .await - .expect_err("CAS with incorrect value should fail"); - assert_matches!(err, CasFailure::ValueMismatch((old_sort_key, old_sort_key_ids)) => { - assert_eq!(old_sort_key, &["tag2", "tag1", "time"]); - assert_eq!(old_sort_key_ids, Some(SortedColumnSet::from([2, 1, 3]))); - }); - - // test update_sort_key from Some value to Some other value + // test that updates sort_key and sort_key_ids from Some matching values to Some other values let updated_partition = repos .partitions() .cas_sort_key( @@ -1794,11 +1795,13 @@ pub(crate) mod test_helpers { .map(ToString::to_string) .collect(), ), + Some(SortedColumnSet::from([2, 1, 3])), &["tag2", "tag1", "tag3 , with comma", "time"], &SortedColumnSet::from([2, 1, 4, 3]), ) .await .unwrap(); + // verify the new values are updated assert_eq!( updated_partition.sort_key, vec!["tag2", "tag1", "tag3 , with comma", "time"] @@ -1808,7 +1811,7 @@ pub(crate) mod test_helpers { Some(SortedColumnSet::from([2, 1, 4, 3])) ); - // test getting the new sort key + // test getting the new sort key from partition id let updated_partition = repos .partitions() .get_by_id(to_skip_partition.id) @@ -1824,6 +1827,7 @@ pub(crate) mod test_helpers { Some(SortedColumnSet::from([2, 1, 4, 3])) ); + // test getting the new sort key from partition hash_id let updated_partition = repos .partitions() .get_by_hash_id(to_skip_partition.hash_id().unwrap()) @@ -1839,6 +1843,35 @@ pub(crate) mod test_helpers { Some(SortedColumnSet::from([2, 1, 4, 3])) ); + // use to_skip_partition_too to update sort key from empty old values + // first make sure the old values are empty + assert!(to_skip_partition_too.sort_key.is_empty()); + assert!(to_skip_partition_too + .sort_key_ids + .as_ref() + .unwrap() + .is_empty()); + + // test that provides empty old_sort_key and empty old_sort_key_ids + // --> the new sort key will be updated + let updated_to_skip_partition_too = repos + .partitions() + .cas_sort_key( + &to_skip_partition_too.transition_partition_id(), + Some(vec![]), + Some(SortedColumnSet::from([])), + &["tag3", "time"], + &SortedColumnSet::from([3, 4]), + ) + .await + .unwrap(); + // verify the new values are updated + assert_eq!(updated_to_skip_partition_too.sort_key, vec!["tag3", "time"]); + assert_eq!( + updated_to_skip_partition_too.sort_key_ids, + Some(SortedColumnSet::from([3, 4])) + ); + // The compactor can log why compaction was skipped let skipped_compactions = repos.partitions().list_skipped_compactions().await.unwrap(); assert!( @@ -2021,10 +2054,17 @@ pub(crate) mod test_helpers { assert_eq!(recent.len(), 4); // Test: sort_key_ids from most_recent_n - // Only the second one has vallues, the other 3 are empty + // Only the first two partitions (represent to_skip_partition_too and to_skip_partition) have vallues, the others are empty let empty_vec_string: Vec<String> = vec![]; - assert_eq!(recent[0].sort_key, empty_vec_string); - assert_eq!(recent[0].sort_key_ids, Some(SortedColumnSet::from(vec![]))); + + assert_eq!( + recent[0].sort_key, + vec!["tag3".to_string(), "time".to_string(),] + ); + assert_eq!( + recent[0].sort_key_ids, + Some(SortedColumnSet::from(vec![3, 4])) + ); assert_eq!( recent[1].sort_key, diff --git a/iox_catalog/src/mem.rs b/iox_catalog/src/mem.rs index 3a7685520b..37dde46210 100644 --- a/iox_catalog/src/mem.rs +++ b/iox_catalog/src/mem.rs @@ -1,7 +1,7 @@ //! This module implements an in-memory implementation of the iox_catalog interface. It can be //! used for testing or for an IOx designed to run without catalog persistence. -use crate::interface::{verify_sort_key_length, MAX_PARQUET_FILES_SELECTED_ONCE_FOR_DELETE}; +use crate::interface::MAX_PARQUET_FILES_SELECTED_ONCE_FOR_DELETE; use crate::{ interface::{ CasFailure, Catalog, ColumnRepo, ColumnTypeMismatchSnafu, Error, NamespaceRepo, @@ -662,13 +662,21 @@ impl PartitionRepo for MemTxn { &mut self, partition_id: &TransitionPartitionId, old_sort_key: Option<Vec<String>>, + old_sort_key_ids: Option<SortedColumnSet>, new_sort_key: &[&str], new_sort_key_ids: &SortedColumnSet, ) -> Result<Partition, CasFailure<(Vec<String>, Option<SortedColumnSet>)>> { - verify_sort_key_length(new_sort_key, new_sort_key_ids); + // These asserts are here to cacth bugs. They will be removed when we remove the sort_key + // field from the Partition + assert_eq!( + old_sort_key.as_ref().map(|v| v.len()), + old_sort_key_ids.as_ref().map(|v| v.len()) + ); + assert_eq!(new_sort_key.len(), new_sort_key_ids.len()); let stage = self.stage(); let old_sort_key = old_sort_key.unwrap_or_default(); + let old_sort_key_ids = old_sort_key_ids.unwrap_or_default(); match stage.partitions.iter_mut().find(|p| match partition_id { TransitionPartitionId::Deterministic(hash_id) => { @@ -676,7 +684,9 @@ impl PartitionRepo for MemTxn { } TransitionPartitionId::Deprecated(id) => p.id == *id, }) { - Some(p) if p.sort_key == old_sort_key => { + Some(p) if p.sort_key_ids == Some(old_sort_key_ids) => { + // This is here to catch bugs. It will be removed when we remove the sort_key + assert_eq!(p.sort_key, old_sort_key); p.sort_key = new_sort_key.iter().map(|s| s.to_string()).collect(); p.sort_key_ids = Some(new_sort_key_ids.clone()); Ok(p.clone()) diff --git a/iox_catalog/src/metrics.rs b/iox_catalog/src/metrics.rs index d850254134..c7687c8961 100644 --- a/iox_catalog/src/metrics.rs +++ b/iox_catalog/src/metrics.rs @@ -176,7 +176,7 @@ decorate!( "partition_get_by_hash_id_batch" = get_by_hash_id_batch(&mut self, partition_hash_ids: &[&PartitionHashId]) -> Result<Vec<Partition>>; "partition_list_by_table_id" = list_by_table_id(&mut self, table_id: TableId) -> Result<Vec<Partition>>; "partition_list_ids" = list_ids(&mut self) -> Result<Vec<PartitionId>>; - "partition_update_sort_key" = cas_sort_key(&mut self, partition_id: &TransitionPartitionId, old_sort_key: Option<Vec<String>>, new_sort_key: &[&str], new_sort_key_ids: &SortedColumnSet) -> Result<Partition, CasFailure<(Vec<String>, Option<SortedColumnSet>)>>; + "partition_update_sort_key" = cas_sort_key(&mut self, partition_id: &TransitionPartitionId, old_sort_key: Option<Vec<String>>, old_sort_key_ids: Option<SortedColumnSet>, new_sort_key: &[&str], new_sort_key_ids: &SortedColumnSet) -> Result<Partition, CasFailure<(Vec<String>, Option<SortedColumnSet>)>>; "partition_record_skipped_compaction" = record_skipped_compaction(&mut self, partition_id: PartitionId, reason: &str, num_files: usize, limit_num_files: usize, limit_num_files_first_in_partition: usize, estimated_bytes: u64, limit_bytes: u64) -> Result<()>; "partition_list_skipped_compactions" = list_skipped_compactions(&mut self) -> Result<Vec<SkippedCompaction>>; "partition_delete_skipped_compactions" = delete_skipped_compactions(&mut self, partition_id: PartitionId) -> Result<Option<SkippedCompaction>>; diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs index ef48af08e9..5192b0e1dc 100644 --- a/iox_catalog/src/postgres.rs +++ b/iox_catalog/src/postgres.rs @@ -1,6 +1,6 @@ //! A Postgres backed implementation of the Catalog -use crate::interface::{verify_sort_key_length, MAX_PARQUET_FILES_SELECTED_ONCE_FOR_DELETE}; +use crate::interface::MAX_PARQUET_FILES_SELECTED_ONCE_FOR_DELETE; use crate::{ interface::{ self, CasFailure, Catalog, ColumnRepo, ColumnTypeMismatchSnafu, Error, NamespaceRepo, @@ -1316,38 +1316,49 @@ WHERE table_id = $1; &mut self, partition_id: &TransitionPartitionId, old_sort_key: Option<Vec<String>>, + old_sort_key_ids: Option<SortedColumnSet>, new_sort_key: &[&str], new_sort_key_ids: &SortedColumnSet, ) -> Result<Partition, CasFailure<(Vec<String>, Option<SortedColumnSet>)>> { - verify_sort_key_length(new_sort_key, new_sort_key_ids); + // These asserts are here to cacth bugs. They will be removed when we remove the sort_key + // field from the Partition + assert_eq!( + old_sort_key.as_ref().map(|v| v.len()), + old_sort_key_ids.as_ref().map(|v| v.len()) + ); + assert_eq!(new_sort_key.len(), new_sort_key_ids.len()); let old_sort_key = old_sort_key.unwrap_or_default(); + let old_sort_key_ids = old_sort_key_ids.unwrap_or_default(); + // This `match` will go away when all partitions have hash IDs in the database. let query = match partition_id { TransitionPartitionId::Deterministic(hash_id) => sqlx::query_as::<_, Partition>( r#" UPDATE partition SET sort_key = $1, sort_key_ids = $4 -WHERE hash_id = $2 AND sort_key = $3 +WHERE hash_id = $2 AND sort_key = $3 AND sort_key_ids = $5 RETURNING id, hash_id, table_id, partition_key, sort_key, sort_key_ids, new_file_at; "#, ) .bind(new_sort_key) // $1 .bind(hash_id) // $2 .bind(&old_sort_key) // $3 - .bind(new_sort_key_ids), // $4 + .bind(new_sort_key_ids) // $4 + .bind(old_sort_key_ids), // $5 TransitionPartitionId::Deprecated(id) => sqlx::query_as::<_, Partition>( r#" UPDATE partition SET sort_key = $1, sort_key_ids = $4 -WHERE id = $2 AND sort_key = $3 +WHERE id = $2 AND sort_key = $3 AND sort_key_ids = $5 RETURNING id, hash_id, table_id, partition_key, sort_key, sort_key_ids, new_file_at; "#, ) .bind(new_sort_key) // $1 .bind(id) // $2 .bind(&old_sort_key) // $3 - .bind(new_sort_key_ids), // $4 + .bind(new_sort_key_ids) // $4 + .bind(old_sort_key_ids), // $5 }; let res = query.fetch_one(&mut self.inner).await; diff --git a/iox_catalog/src/sqlite.rs b/iox_catalog/src/sqlite.rs index 67083c6e37..37e539d075 100644 --- a/iox_catalog/src/sqlite.rs +++ b/iox_catalog/src/sqlite.rs @@ -2,9 +2,9 @@ use crate::{ interface::{ - self, verify_sort_key_length, CasFailure, Catalog, ColumnRepo, ColumnTypeMismatchSnafu, - Error, NamespaceRepo, ParquetFileRepo, PartitionRepo, RepoCollection, Result, - SoftDeletedRows, TableRepo, MAX_PARQUET_FILES_SELECTED_ONCE_FOR_RETENTION, + self, CasFailure, Catalog, ColumnRepo, ColumnTypeMismatchSnafu, Error, NamespaceRepo, + ParquetFileRepo, PartitionRepo, RepoCollection, Result, SoftDeletedRows, TableRepo, + MAX_PARQUET_FILES_SELECTED_ONCE_FOR_RETENTION, }, kafkaless_transition::{ SHARED_QUERY_POOL, SHARED_QUERY_POOL_ID, SHARED_TOPIC_ID, SHARED_TOPIC_NAME, @@ -1011,12 +1011,24 @@ WHERE table_id = $1; &mut self, partition_id: &TransitionPartitionId, old_sort_key: Option<Vec<String>>, + old_sort_key_ids: Option<SortedColumnSet>, new_sort_key: &[&str], new_sort_key_ids: &SortedColumnSet, ) -> Result<Partition, CasFailure<(Vec<String>, Option<SortedColumnSet>)>> { - verify_sort_key_length(new_sort_key, new_sort_key_ids); + // These asserts are here to cacth bugs. They will be removed when we remove the sort_key + // field from the Partition + assert_eq!( + old_sort_key.as_ref().map(|v| v.len()), + old_sort_key_ids.as_ref().map(|v| v.len()) + ); + assert_eq!(new_sort_key.len(), new_sort_key_ids.len()); let old_sort_key = old_sort_key.unwrap_or_default(); + let raw_old_sort_key_ids: Vec<_> = old_sort_key_ids + .unwrap_or_default() + .iter() + .map(|c| c.get()) + .collect(); let raw_new_sort_key_ids: Vec<_> = new_sort_key_ids.iter().map(|cid| cid.get()).collect(); // This `match` will go away when all partitions have hash IDs in the database. @@ -1025,26 +1037,28 @@ WHERE table_id = $1; r#" UPDATE partition SET sort_key = $1, sort_key_ids = $4 -WHERE hash_id = $2 AND sort_key = $3 +WHERE hash_id = $2 AND sort_key = $3 AND sort_key_ids = $5 RETURNING id, hash_id, table_id, partition_key, sort_key, sort_key_ids, new_file_at; "#, ) .bind(Json(new_sort_key)) // $1 .bind(hash_id) // $2 .bind(Json(&old_sort_key)) // $3 - .bind(Json(&raw_new_sort_key_ids)), // $4 + .bind(Json(&raw_new_sort_key_ids)) // $4 + .bind(Json(&raw_old_sort_key_ids)), // $5 TransitionPartitionId::Deprecated(id) => sqlx::query_as::<_, PartitionPod>( r#" UPDATE partition SET sort_key = $1, sort_key_ids = $4 -WHERE id = $2 AND sort_key = $3 +WHERE id = $2 AND sort_key = $3 AND sort_key_ids = $5 RETURNING id, hash_id, table_id, partition_key, sort_key, sort_key_ids, new_file_at; "#, ) .bind(Json(new_sort_key)) // $1 .bind(id) // $2 .bind(Json(&old_sort_key)) // $3 - .bind(Json(&raw_new_sort_key_ids)), // $4 + .bind(Json(&raw_new_sort_key_ids)) // $4 + .bind(Json(&raw_old_sort_key_ids)), // $5 }; let res = query.fetch_one(self.inner.get_mut()).await; diff --git a/iox_tests/src/catalog.rs b/iox_tests/src/catalog.rs index 170f6e45df..971600151e 100644 --- a/iox_tests/src/catalog.rs +++ b/iox_tests/src/catalog.rs @@ -333,6 +333,7 @@ impl TestTable { .cas_sort_key( &TransitionPartitionId::Deprecated(partition.id), None, + None, sort_key, &SortedColumnSet::from(sort_key_ids.iter().cloned()), ) @@ -450,14 +451,16 @@ impl TestPartition { sort_key: SortKey, sort_key_ids: &SortedColumnSet, ) -> Arc<Self> { - let old_sort_key = partition_lookup( + let partition = partition_lookup( self.catalog.catalog.repositories().await.as_mut(), &self.partition.transition_partition_id(), ) .await .unwrap() - .unwrap() - .sort_key; + .unwrap(); + + let old_sort_key = partition.sort_key; + let old_sort_key_ids = partition.sort_key_ids; let partition = self .catalog @@ -468,6 +471,7 @@ impl TestPartition { .cas_sort_key( &self.partition.transition_partition_id(), Some(old_sort_key), + old_sort_key_ids, &sort_key.to_columns().collect::<Vec<_>>(), sort_key_ids, ) @@ -795,20 +799,23 @@ async fn update_catalog_sort_key_if_needed<R>( // Similarly to what the ingester does, if there's an existing sort key in the catalog, add new // columns onto the end - match partition.sort_key() { - Some(catalog_sort_key) => { + + match (partition.sort_key(), partition.sort_key_ids_none_if_empty()) { + (Some(catalog_sort_key), Some(catalog_sort_key_ids)) => { let new_sort_key = sort_key.to_columns().collect::<Vec<_>>(); let (_metadata, update) = adjust_sort_key_columns(&catalog_sort_key, &new_sort_key); if let Some(new_sort_key) = update { - let new_columns = new_sort_key.to_columns().collect::<Vec<_>>(); + let new_sort_key = new_sort_key.to_columns().collect::<Vec<_>>(); + let new_sort_key_ids = columns.ids_for_names(&new_sort_key); + debug!( - "Updating sort key from {:?} to {:?}", + "Updating (sort_key, sort_key_ids) from ({:?}, {:?}) to ({:?}, {:?})", catalog_sort_key.to_columns().collect::<Vec<_>>(), - &new_columns, + catalog_sort_key_ids, + &new_sort_key, + &new_sort_key_ids, ); - let column_ids = columns.ids_for_names(&new_columns); - repos .partitions() .cas_sort_key( @@ -819,23 +826,29 @@ async fn update_catalog_sort_key_if_needed<R>( .map(ToString::to_string) .collect::<Vec<_>>(), ), - &new_columns, - &column_ids, + partition.sort_key_ids, + &new_sort_key, + &new_sort_key_ids, ) .await .unwrap(); } } - None => { + (None, None) => { let new_columns = sort_key.to_columns().collect::<Vec<_>>(); debug!("Updating sort key from None to {:?}", &new_columns); let column_ids = columns.ids_for_names(&new_columns); repos .partitions() - .cas_sort_key(id, None, &new_columns, &column_ids) + .cas_sort_key(id, None, None, &new_columns, &column_ids) .await .unwrap(); } + _ => panic!( + "sort_key {:?} and sort_key_ids {:?} should be both None or both Some", + partition.sort_key(), + partition.sort_key_ids_none_if_empty() + ), } }
779234eb2042b185952e2d91f42abb237f8492a7
Andrew Lamb
2023-03-21 16:40:21
Add tests that schema and data matches (#7280)
* fix(flightsql): Tests for matching schema in FligthSQL * fix: Update influxdb_iox/tests/end_to_end_cases/flightsql.rs
null
chore(flightsql): Add tests that schema and data matches (#7280) * fix(flightsql): Tests for matching schema in FligthSQL * fix: Update influxdb_iox/tests/end_to_end_cases/flightsql.rs
diff --git a/influxdb_iox/tests/end_to_end_cases/flightsql.rs b/influxdb_iox/tests/end_to_end_cases/flightsql.rs index c7d669c3be..b584c6763a 100644 --- a/influxdb_iox/tests/end_to_end_cases/flightsql.rs +++ b/influxdb_iox/tests/end_to_end_cases/flightsql.rs @@ -1,13 +1,21 @@ use std::path::PathBuf; use arrow::record_batch::RecordBatch; -use arrow_flight::{decode::FlightRecordBatchStream, sql::SqlInfo}; +use arrow_flight::{ + decode::FlightRecordBatchStream, + sql::{ + Any, CommandGetCatalogs, CommandGetDbSchemas, CommandGetSqlInfo, CommandGetTableTypes, + CommandGetTables, ProstMessageExt, SqlInfo, + }, + FlightClient, FlightDescriptor, +}; use arrow_util::test_util::batches_to_sorted_lines; use assert_cmd::Command; use datafusion::common::assert_contains; use futures::{FutureExt, TryStreamExt}; use influxdb_iox_client::flightsql::FlightSqlClient; use predicates::prelude::*; +use prost::Message; use test_helpers_end_to_end::{maybe_skip_integration, MiniCluster, Step, StepTest, StepTestState}; #[tokio::test] @@ -625,6 +633,101 @@ async fn flightsql_jdbc() { .await } +/// test ensures that the schema returned as part of GetFlightInfo matches that of the +/// actual response. +#[tokio::test] +async fn flightsql_schema_matches() { + test_helpers::maybe_start_logging(); + let database_url = maybe_skip_integration!(); + + let table_name = "the_table"; + + // Set up the cluster ==================================== + let mut cluster = MiniCluster::create_shared2(database_url).await; + + StepTest::new( + &mut cluster, + vec![ + Step::WriteLineProtocol(format!( + "{table_name},tag1=A,tag2=B val=42i 123456\n\ + {table_name},tag1=A,tag2=C val=43i 123457" + )), + Step::Custom(Box::new(move |state: &mut StepTestState| { + async move { + let mut client = flightsql_client(state.cluster()).into_inner(); + + // Verify schema for each type of command + let cases = vec![ + // CommandStatementQuery fails because of + // https://github.com/influxdata/influxdb_iox/issues/7279> + // CommandStatementQuery { + // query: format!("select * from {table_name}"), + // } + // .as_any(), + CommandGetSqlInfo { info: vec![] }.as_any(), + CommandGetCatalogs {}.as_any(), + CommandGetDbSchemas { + catalog: None, + db_schema_filter_pattern: None, + } + .as_any(), + CommandGetTables { + catalog: None, + db_schema_filter_pattern: None, + table_name_filter_pattern: None, + table_types: vec![], + include_schema: false, + } + .as_any(), + CommandGetTableTypes {}.as_any(), + ]; + + for cmd in cases { + assert_schema(&mut client, cmd).await; + } + } + .boxed() + })), + ], + ) + .run() + .await +} + +/// Verifies that the schema returned by `GetFlightInfo` and `DoGet` +/// match for `cmd`. +async fn assert_schema(client: &mut FlightClient, cmd: Any) { + println!("Checking schema for message type {}", cmd.type_url); + + let descriptor = FlightDescriptor::new_cmd(cmd.encode_to_vec()); + let flight_info = client.get_flight_info(descriptor).await.unwrap(); + + assert_eq!(flight_info.endpoint.len(), 1); + let ticket = flight_info.endpoint[0] + .ticket + .as_ref() + .expect("Need ticket") + .clone(); + + // Schema reported by `GetFlightInfo` + let flight_info_schema = flight_info.try_decode_schema().unwrap(); + + // Get results and ensure they match the schema reported by GetFlightInfo + let mut result_stream = client.do_get(ticket).await.unwrap(); + let mut saw_data = false; + while let Some(batch) = result_stream.try_next().await.unwrap() { + saw_data = true; + assert_eq!(batch.schema().as_ref(), &flight_info_schema); + // The stream itself also may report a schema + if let Some(stream_schema) = result_stream.schema() { + assert_eq!(stream_schema.as_ref(), &flight_info_schema); + } + } + // verify we have seen at least one RecordBatch + // (all FlightSQL endpoints return at least one) + assert!(saw_data); +} + /// Return a [`FlightSqlClient`] configured for use fn flightsql_client(cluster: &MiniCluster) -> FlightSqlClient { let connection = cluster.querier().querier_grpc_connection();
18c6d9e306c5a62e584d245dd6740b0bd3a4283e
Dom Dwyer
2023-05-09 11:51:42
remove unnecessary "to_owned()" call
This method now takes an owned name, so no need to call to_owned()!
null
refactor: remove unnecessary "to_owned()" call This method now takes an owned name, so no need to call to_owned()!
diff --git a/data_types/src/columns.rs b/data_types/src/columns.rs index 06502319db..a2142af6b8 100644 --- a/data_types/src/columns.rs +++ b/data_types/src/columns.rs @@ -49,7 +49,7 @@ impl ColumnsByName { .into_iter() .map(|c| { ( - c.name.to_owned(), + c.name, ColumnSchema { id: c.id, column_type: c.column_type,
9b57cb17e6695b00d9f93ff61b10d9a87c2ab8d5
Fraser Savage
2023-09-01 16:35:42
Cover WAL replay of empty files
Clearing out the TODO
null
test(ingester): Cover WAL replay of empty files Clearing out the TODO
diff --git a/ingester/src/init/wal_replay.rs b/ingester/src/init/wal_replay.rs index 5ae06447f5..11029feb27 100644 --- a/ingester/src/init/wal_replay.rs +++ b/ingester/src/init/wal_replay.rs @@ -124,9 +124,7 @@ where "dropping empty wal segment", ); - // TODO(test): empty WAL replay - - // A failure to delete an empty file should not prevent WAL + // A failure to delete an empty file MUST not prevent WAL // replay from continuing. if let Err(error) = wal.delete(file.id()).await { error!( @@ -381,7 +379,8 @@ mod tests { // The write portion of this test. // - // Write two ops, rotate the file, and write a third op. + // Write two ops, rotate the file twice (ensuring an empty file is + // handled ok), write a third op and finally an empty op. { let inner = Arc::new(MockDmlSink::default().with_apply_return(vec![ Ok(()), @@ -414,6 +413,10 @@ mod tests { // Rotate the log file wal.rotate().expect("failed to rotate WAL file"); + // Rotate the log file again, in order to create an empty segment and ensure + // replay is tolerant to it + wal.rotate().expect("failed to rotate WAL file"); + // Write the third op wal_sink .apply(IngestOp::Write(op3.clone())) @@ -435,7 +438,8 @@ mod tests { .await .expect("failed to initialise WAL"); - assert_eq!(wal.closed_segments().len(), 2); + // Must be 3 segments, 1 OK, 1 Empty and 1 with a normal op and blank op + assert_eq!(wal.closed_segments().len(), 3); // Initialise the mock persist system let persist = Arc::new(MockPersistQueue::default()); @@ -497,7 +501,7 @@ mod tests { .join() .await; - // Ensure the replayed segments were dropped + // Ensure the replayed segments were dropped, including the empty one let wal = Wal::new(dir.path()) .await .expect("failed to initialise WAL"); @@ -511,7 +515,7 @@ mod tests { .get_observer(&Attributes::from([])) .expect("attributes not found") .fetch(); - assert_eq!(files, 2); + assert_eq!(files, 3); let ops = metrics .get_instrument::<Metric<U64Counter>>("ingester_wal_replay_ops") .expect("file counter not found")
43e236e040bd685b26ac81b5afcc8a7185fc4a08
Andrew Lamb
2023-03-28 18:21:49
Update datafusion again (#7353)
* chore: Update DataFusion * refactor: Update predicate crate for new transform API * refactor: Update iox_query crate for new APIs * refactor: Update influxql for new API * chore: Run cargo hakari tasks ---------
Co-authored-by: CircleCI[bot] <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
chore: Update datafusion again (#7353) * chore: Update DataFusion * refactor: Update predicate crate for new transform API * refactor: Update iox_query crate for new APIs * refactor: Update influxql for new API * chore: Run cargo hakari tasks --------- Co-authored-by: CircleCI[bot] <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/Cargo.lock b/Cargo.lock index 596c32c986..40a3992499 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,16 +112,16 @@ checksum = "f410d3907b6b3647b9e7bca4551274b2e3d716aa940afb67b7287257401da921" dependencies = [ "ahash 0.8.3", "arrow-arith", - "arrow-array 34.0.0", - "arrow-buffer 34.0.0", + "arrow-array", + "arrow-buffer", "arrow-cast", "arrow-csv", - "arrow-data 34.0.0", + "arrow-data", "arrow-ipc", "arrow-json", "arrow-ord", "arrow-row", - "arrow-schema 34.0.0", + "arrow-schema", "arrow-select", "arrow-string", "comfy-table", @@ -133,10 +133,10 @@ version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f87391cf46473c9bc53dab68cb8872c3a81d4dfd1703f1c8aa397dba9880a043" dependencies = [ - "arrow-array 34.0.0", - "arrow-buffer 34.0.0", - "arrow-data 34.0.0", - "arrow-schema 34.0.0", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", "chrono", "half 2.2.1", "num", @@ -149,25 +149,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d35d5475e65c57cffba06d0022e3006b677515f99b54af33a7cd54f6cdd4a5b5" dependencies = [ "ahash 0.8.3", - "arrow-buffer 34.0.0", - "arrow-data 34.0.0", - "arrow-schema 34.0.0", - "chrono", - "half 2.2.1", - "hashbrown 0.13.2", - "num", -] - -[[package]] -name = "arrow-array" -version = "35.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43489bbff475545b78b0e20bde1d22abd6c99e54499839f9e815a2fa5134a51b" -dependencies = [ - "ahash 0.8.3", - "arrow-buffer 35.0.0", - "arrow-data 35.0.0", - "arrow-schema 35.0.0", + "arrow-buffer", + "arrow-data", + "arrow-schema", "chrono", "chrono-tz", "half 2.2.1", @@ -185,26 +169,16 @@ dependencies = [ "num", ] -[[package]] -name = "arrow-buffer" -version = "35.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3759e4a52c593281184787af5435671dc8b1e78333e5a30242b2e2d6e3c9d1f" -dependencies = [ - "half 2.2.1", - "num", -] - [[package]] name = "arrow-cast" version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a7285272c9897321dfdba59de29f5b05aeafd3cdedf104a941256d155f6d304" dependencies = [ - "arrow-array 34.0.0", - "arrow-buffer 34.0.0", - "arrow-data 34.0.0", - "arrow-schema 34.0.0", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", "arrow-select", "chrono", "lexical-core", @@ -217,11 +191,11 @@ version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "981ee4e7f6a120da04e00d0b39182e1eeacccb59c8da74511de753c56b7fddf7" dependencies = [ - "arrow-array 34.0.0", - "arrow-buffer 34.0.0", + "arrow-array", + "arrow-buffer", "arrow-cast", - "arrow-data 34.0.0", - "arrow-schema 34.0.0", + "arrow-data", + "arrow-schema", "chrono", "csv", "csv-core", @@ -236,20 +210,8 @@ version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27cc673ee6989ea6e4b4e8c7d461f7e06026a096c8f0b1a7288885ff71ae1e56" dependencies = [ - "arrow-buffer 34.0.0", - "arrow-schema 34.0.0", - "half 2.2.1", - "num", -] - -[[package]] -name = "arrow-data" -version = "35.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19c7787c6cdbf9539b1ffb860bfc18c5848926ec3d62cbd52dc3b1ea35c874fd" -dependencies = [ - "arrow-buffer 35.0.0", - "arrow-schema 35.0.0", + "arrow-buffer", + "arrow-schema", "half 2.2.1", "num", ] @@ -260,11 +222,11 @@ version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd16945f8f3be0f6170b8ced60d414e56239d91a16a3f8800bc1504bc58b2592" dependencies = [ - "arrow-array 34.0.0", - "arrow-buffer 34.0.0", + "arrow-array", + "arrow-buffer", "arrow-cast", "arrow-ipc", - "arrow-schema 34.0.0", + "arrow-schema", "base64 0.21.0", "bytes", "futures", @@ -283,11 +245,11 @@ version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e37b8b69d9e59116b6b538e8514e0ec63a30f08b617ce800d31cb44e3ef64c1a" dependencies = [ - "arrow-array 34.0.0", - "arrow-buffer 34.0.0", + "arrow-array", + "arrow-buffer", "arrow-cast", - "arrow-data 34.0.0", - "arrow-schema 34.0.0", + "arrow-data", + "arrow-schema", "flatbuffers", ] @@ -297,11 +259,11 @@ version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80c3fa0bed7cfebf6d18e46b733f9cb8a1cb43ce8e6539055ca3e1e48a426266" dependencies = [ - "arrow-array 34.0.0", - "arrow-buffer 34.0.0", + "arrow-array", + "arrow-buffer", "arrow-cast", - "arrow-data 34.0.0", - "arrow-schema 34.0.0", + "arrow-data", + "arrow-schema", "chrono", "half 2.2.1", "indexmap", @@ -316,10 +278,10 @@ version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d247dce7bed6a8d6a3c6debfa707a3a2f694383f0c692a39d736a593eae5ef94" dependencies = [ - "arrow-array 34.0.0", - "arrow-buffer 34.0.0", - "arrow-data 34.0.0", - "arrow-schema 34.0.0", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", "arrow-select", "num", ] @@ -331,10 +293,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d609c0181f963cea5c70fddf9a388595b5be441f3aa1d1cdbf728ca834bbd3a" dependencies = [ "ahash 0.8.3", - "arrow-array 34.0.0", - "arrow-buffer 34.0.0", - "arrow-data 34.0.0", - "arrow-schema 34.0.0", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", "half 2.2.1", "hashbrown 0.13.2", ] @@ -345,22 +307,16 @@ version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64951898473bfb8e22293e83a44f02874d2257514d49cd95f9aa4afcff183fbc" -[[package]] -name = "arrow-schema" -version = "35.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf6b26f6a6f8410e3b9531cbd1886399b99842701da77d4b4cf2013f7708f20f" - [[package]] name = "arrow-select" version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a513d89c2e1ac22b28380900036cf1f3992c6443efc5e079de631dcf83c6888" dependencies = [ - "arrow-array 34.0.0", - "arrow-buffer 34.0.0", - "arrow-data 34.0.0", - "arrow-schema 34.0.0", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", "num", ] @@ -370,10 +326,10 @@ version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5288979b2705dae1114c864d73150629add9153b9b8f1d7ee3963db94c372ba5" dependencies = [ - "arrow-array 34.0.0", - "arrow-buffer 34.0.0", - "arrow-data 34.0.0", - "arrow-schema 34.0.0", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", "arrow-select", "regex", "regex-syntax", @@ -1497,7 +1453,7 @@ dependencies = [ [[package]] name = "datafusion" version = "21.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f30671760285f242950437c3c0f520ef418c1068#f30671760285f242950437c3c0f520ef418c1068" dependencies = [ "ahash 0.8.3", "arrow", @@ -1544,10 +1500,10 @@ dependencies = [ [[package]] name = "datafusion-common" version = "21.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f30671760285f242950437c3c0f520ef418c1068#f30671760285f242950437c3c0f520ef418c1068" dependencies = [ "arrow", - "arrow-array 35.0.0", + "arrow-array", "chrono", "num_cpus", "object_store", @@ -1558,7 +1514,7 @@ dependencies = [ [[package]] name = "datafusion-execution" version = "21.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f30671760285f242950437c3c0f520ef418c1068#f30671760285f242950437c3c0f520ef418c1068" dependencies = [ "dashmap", "datafusion-common", @@ -1575,7 +1531,7 @@ dependencies = [ [[package]] name = "datafusion-expr" version = "21.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f30671760285f242950437c3c0f520ef418c1068#f30671760285f242950437c3c0f520ef418c1068" dependencies = [ "ahash 0.8.3", "arrow", @@ -1586,7 +1542,7 @@ dependencies = [ [[package]] name = "datafusion-optimizer" version = "21.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f30671760285f242950437c3c0f520ef418c1068#f30671760285f242950437c3c0f520ef418c1068" dependencies = [ "arrow", "async-trait", @@ -1603,12 +1559,12 @@ dependencies = [ [[package]] name = "datafusion-physical-expr" version = "21.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f30671760285f242950437c3c0f520ef418c1068#f30671760285f242950437c3c0f520ef418c1068" dependencies = [ "ahash 0.8.3", "arrow", - "arrow-buffer 34.0.0", - "arrow-schema 34.0.0", + "arrow-buffer", + "arrow-schema", "blake2", "blake3", "chrono", @@ -1633,7 +1589,7 @@ dependencies = [ [[package]] name = "datafusion-proto" version = "21.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f30671760285f242950437c3c0f520ef418c1068#f30671760285f242950437c3c0f520ef418c1068" dependencies = [ "arrow", "chrono", @@ -1641,15 +1597,13 @@ dependencies = [ "datafusion-common", "datafusion-expr", "object_store", - "pbjson-build", "prost", - "prost-build", ] [[package]] name = "datafusion-row" version = "21.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f30671760285f242950437c3c0f520ef418c1068#f30671760285f242950437c3c0f520ef418c1068" dependencies = [ "arrow", "datafusion-common", @@ -1660,9 +1614,9 @@ dependencies = [ [[package]] name = "datafusion-sql" version = "21.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=74c3955db48f7ef6458125100eed3999512a56ba#74c3955db48f7ef6458125100eed3999512a56ba" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f30671760285f242950437c3c0f520ef418c1068#f30671760285f242950437c3c0f520ef418c1068" dependencies = [ - "arrow-schema 34.0.0", + "arrow-schema", "datafusion-common", "datafusion-expr", "log", @@ -4135,12 +4089,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ac135ecf63ebb5f53dda0921b0b76d6048b3ef631a5f4760b9e8f863ff00cfa" dependencies = [ "ahash 0.8.3", - "arrow-array 34.0.0", - "arrow-buffer 34.0.0", + "arrow-array", + "arrow-buffer", "arrow-cast", - "arrow-data 34.0.0", + "arrow-data", "arrow-ipc", - "arrow-schema 34.0.0", + "arrow-schema", "arrow-select", "base64 0.21.0", "brotli", @@ -6848,6 +6802,7 @@ version = "0.1.0" dependencies = [ "ahash 0.8.3", "arrow", + "arrow-array", "arrow-flight", "arrow-ord", "arrow-string", diff --git a/Cargo.toml b/Cargo.toml index 1af05f976c..3b8bb791bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -121,8 +121,8 @@ license = "MIT OR Apache-2.0" [workspace.dependencies] arrow = { version = "34.0.0" } arrow-flight = { version = "34.0.0" } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="74c3955db48f7ef6458125100eed3999512a56ba", default-features = false } -datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="74c3955db48f7ef6458125100eed3999512a56ba" } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="f30671760285f242950437c3c0f520ef418c1068", default-features = false } +datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="f30671760285f242950437c3c0f520ef418c1068" } hashbrown = { version = "0.13.2" } parquet = { version = "34.0.0" } diff --git a/iox_query/src/frontend/common.rs b/iox_query/src/frontend/common.rs index c1b1b4fbe6..55a32e0f5b 100644 --- a/iox_query/src/frontend/common.rs +++ b/iox_query/src/frontend/common.rs @@ -1,9 +1,8 @@ use std::sync::Arc; use datafusion::{ - catalog::TableReference, - datasource::provider_as_source, - logical_expr::{expr_rewriter::ExprRewritable, LogicalPlanBuilder}, + catalog::TableReference, common::tree_node::TreeNode, datasource::provider_as_source, + logical_expr::LogicalPlanBuilder, }; use observability_deps::tracing::trace; use predicate::Predicate; diff --git a/iox_query/src/logical_optimizer/handle_gapfill.rs b/iox_query/src/logical_optimizer/handle_gapfill.rs index 98b9eef720..a36e1d631b 100644 --- a/iox_query/src/logical_optimizer/handle_gapfill.rs +++ b/iox_query/src/logical_optimizer/handle_gapfill.rs @@ -5,12 +5,10 @@ mod range_predicate; use crate::exec::gapfill::{FillStrategy, GapFill, GapFillParams}; use datafusion::{ + common::tree_node::{RewriteRecursion, TreeNode, TreeNodeRewriter, VisitRecursion}, error::{DataFusionError, Result}, logical_expr::{ - expr_rewriter::{ExprRewritable, ExprRewriter, RewriteRecursion}, - expr_visitor::{ExprVisitable, ExpressionVisitor, Recursion}, - utils::expr_to_columns, - Aggregate, BuiltinScalarFunction, Extension, LogicalPlan, + utils::expr_to_columns, Aggregate, BuiltinScalarFunction, Extension, LogicalPlan, }, optimizer::{optimizer::ApplyOrder, OptimizerConfig, OptimizerRule}, prelude::{col, Expr}, @@ -305,7 +303,8 @@ struct DateBinGapfillRewriter { args: Option<Vec<Expr>>, } -impl ExprRewriter for DateBinGapfillRewriter { +impl TreeNodeRewriter for DateBinGapfillRewriter { + type N = Expr; fn pre_visit(&mut self, expr: &Expr) -> Result<RewriteRecursion> { match expr { Expr::ScalarUDF { fun, .. } if fun.name == DATE_BIN_GAPFILL_UDF_NAME => { @@ -330,23 +329,19 @@ impl ExprRewriter for DateBinGapfillRewriter { } fn count_date_bin_gapfill(e: &Expr) -> Result<usize> { - struct Finder { - count: usize, - } - impl ExpressionVisitor for Finder { - fn pre_visit(mut self, expr: &Expr) -> Result<Recursion<Self>> { - match expr { - Expr::ScalarUDF { fun, .. } if fun.name == DATE_BIN_GAPFILL_UDF_NAME => { - self.count += 1; - } - _ => (), - }; - Ok(Recursion::Continue(self)) - } - } - let f = Finder { count: 0 }; - let f = e.accept(f)?; - Ok(f.count) + let mut count = 0; + e.apply(&mut |expr| { + match expr { + Expr::ScalarUDF { fun, .. } if fun.name == DATE_BIN_GAPFILL_UDF_NAME => { + count += 1; + } + _ => (), + }; + Ok(VisitRecursion::Continue) + }) + .expect("no errors"); + + Ok(count) } fn check_node(node: &LogicalPlan) -> Result<()> { diff --git a/iox_query/src/logical_optimizer/handle_gapfill/range_predicate.rs b/iox_query/src/logical_optimizer/handle_gapfill/range_predicate.rs index 1702df41b4..20a0cdeb68 100644 --- a/iox_query/src/logical_optimizer/handle_gapfill/range_predicate.rs +++ b/iox_query/src/logical_optimizer/handle_gapfill/range_predicate.rs @@ -2,9 +2,12 @@ use std::ops::{Bound, Range}; use datafusion::{ - common::DFSchema, - error::{DataFusionError, Result}, - logical_expr::{Between, BinaryExpr, LogicalPlan, Operator, PlanVisitor}, + common::{ + tree_node::{TreeNode, TreeNodeVisitor, VisitRecursion}, + DFSchema, + }, + error::Result, + logical_expr::{Between, BinaryExpr, LogicalPlan, Operator}, optimizer::utils::split_conjunction, prelude::{Column, Expr}, }; @@ -16,7 +19,7 @@ pub(super) fn find_time_range(plan: &LogicalPlan, time_col: &Column) -> Result<R col: time_col.clone(), range: TimeRange::default(), }; - plan.accept(&mut v)?; + plan.visit(&mut v)?; Ok(v.range.0) } @@ -25,19 +28,19 @@ struct TimeRangeVisitor { range: TimeRange, } -impl PlanVisitor for TimeRangeVisitor { - type Error = DataFusionError; +impl TreeNodeVisitor for TimeRangeVisitor { + type N = LogicalPlan; - fn pre_visit(&mut self, plan: &LogicalPlan) -> Result<bool, Self::Error> { + fn pre_visit(&mut self, plan: &LogicalPlan) -> Result<VisitRecursion> { match plan { LogicalPlan::Projection(p) => { let idx = p.schema.index_of_column(&self.col)?; match unwrap_alias(&p.expr[idx]) { Expr::Column(ref c) => { self.col = c.clone(); - Ok(true) + Ok(VisitRecursion::Continue) } - _ => Ok(false), + _ => Ok(VisitRecursion::Stop), } } LogicalPlan::Filter(f) => { @@ -48,15 +51,15 @@ impl PlanVisitor for TimeRangeVisitor { range.with_expr(f.input.schema().as_ref(), &self.col, expr) })?; self.range = range; - Ok(true) + Ok(VisitRecursion::Continue) } // These nodes do not alter their schema, so we can recurse through them LogicalPlan::Sort(_) | LogicalPlan::Repartition(_) | LogicalPlan::Limit(_) - | LogicalPlan::Distinct(_) => Ok(true), + | LogicalPlan::Distinct(_) => Ok(VisitRecursion::Continue), // At some point we may wish to handle joins here too. - _ => Ok(false), + _ => Ok(VisitRecursion::Stop), } } } diff --git a/iox_query/src/logical_optimizer/influx_regex_to_datafusion_regex.rs b/iox_query/src/logical_optimizer/influx_regex_to_datafusion_regex.rs index 0cc586d843..75ea9f92a6 100644 --- a/iox_query/src/logical_optimizer/influx_regex_to_datafusion_regex.rs +++ b/iox_query/src/logical_optimizer/influx_regex_to_datafusion_regex.rs @@ -1,7 +1,7 @@ use datafusion::{ - common::DFSchema, + common::{tree_node::TreeNodeRewriter, DFSchema}, error::DataFusionError, - logical_expr::{expr_rewriter::ExprRewriter, utils::from_plan, LogicalPlan, Operator}, + logical_expr::{utils::from_plan, LogicalPlan, Operator}, optimizer::{utils::rewrite_preserving_name, OptimizerConfig, OptimizerRule}, prelude::{binary_expr, lit, Expr}, scalar::ScalarValue, @@ -67,7 +67,9 @@ fn optimize(plan: &LogicalPlan) -> Result<LogicalPlan, DataFusionError> { from_plan(plan, new_exprs.as_slice(), new_inputs.as_slice()) } -impl ExprRewriter for InfluxRegexToDataFusionRegex { +impl TreeNodeRewriter for InfluxRegexToDataFusionRegex { + type N = Expr; + fn mutate(&mut self, expr: Expr) -> Result<Expr, DataFusionError> { match expr { Expr::ScalarUDF { fun, mut args } => { diff --git a/iox_query/src/physical_optimizer/chunk_extraction.rs b/iox_query/src/physical_optimizer/chunk_extraction.rs index 1279c79392..a43b088611 100644 --- a/iox_query/src/physical_optimizer/chunk_extraction.rs +++ b/iox_query/src/physical_optimizer/chunk_extraction.rs @@ -172,7 +172,8 @@ mod tests { use arrow::datatypes::{DataType, Field, Schema as ArrowSchema}; use data_types::ChunkId; use datafusion::{ - physical_plan::{expressions::Literal, filter::FilterExec, tree_node::TreeNodeRewritable}, + common::tree_node::{Transformed, TreeNode}, + physical_plan::{expressions::Literal, filter::FilterExec}, prelude::{col, lit}, scalar::ScalarValue, }; @@ -347,9 +348,9 @@ mod tests { Some(Arc::new(Literal::new(ScalarValue::from(false)))), None, ); - return Ok(Some(Arc::new(exec))); + return Ok(Transformed::Yes(Arc::new(exec))); } - Ok(None) + Ok(Transformed::No(plan)) }) .unwrap(); assert!(extract_chunks(plan.as_ref()).is_none()); diff --git a/iox_query/src/physical_optimizer/combine_chunks.rs b/iox_query/src/physical_optimizer/combine_chunks.rs index bc06e2d2f2..2840250865 100644 --- a/iox_query/src/physical_optimizer/combine_chunks.rs +++ b/iox_query/src/physical_optimizer/combine_chunks.rs @@ -1,10 +1,11 @@ use std::sync::Arc; use datafusion::{ + common::tree_node::{Transformed, TreeNode}, config::ConfigOptions, error::Result, physical_optimizer::PhysicalOptimizerRule, - physical_plan::{tree_node::TreeNodeRewritable, ExecutionPlan}, + physical_plan::ExecutionPlan, }; use predicate::Predicate; @@ -35,7 +36,7 @@ impl PhysicalOptimizerRule for CombineChunks { ) -> Result<Arc<dyn ExecutionPlan>> { plan.transform_up(&|plan| { if let Some((schema, chunks, output_sort_key)) = extract_chunks(plan.as_ref()) { - return Ok(Some(chunks_to_physical_nodes( + return Ok(Transformed::Yes(chunks_to_physical_nodes( &schema, output_sort_key.as_ref(), chunks, @@ -44,7 +45,7 @@ impl PhysicalOptimizerRule for CombineChunks { ))); } - Ok(None) + Ok(Transformed::No(plan)) }) } diff --git a/iox_query/src/physical_optimizer/dedup/dedup_null_columns.rs b/iox_query/src/physical_optimizer/dedup/dedup_null_columns.rs index 3d36ed7819..609eb6272c 100644 --- a/iox_query/src/physical_optimizer/dedup/dedup_null_columns.rs +++ b/iox_query/src/physical_optimizer/dedup/dedup_null_columns.rs @@ -1,10 +1,11 @@ use std::{collections::HashSet, sync::Arc}; use datafusion::{ + common::tree_node::{Transformed, TreeNode}, config::ConfigOptions, error::Result, physical_optimizer::PhysicalOptimizerRule, - physical_plan::{tree_node::TreeNodeRewritable, ExecutionPlan}, + physical_plan::ExecutionPlan, }; use predicate::Predicate; use schema::{sort::SortKeyBuilder, TIME_COLUMN_NAME}; @@ -41,7 +42,7 @@ impl PhysicalOptimizerRule for DedupNullColumns { assert_eq!(children.len(), 1); let child = children.remove(0); let Some((schema, chunks, _output_sort_key)) = extract_chunks(child.as_ref()) else { - return Ok(None); + return Ok(Transformed::No(plan)); }; let pk_cols = dedup_exec.sort_columns(); @@ -73,14 +74,14 @@ impl PhysicalOptimizerRule for DedupNullColumns { ); let sort_exprs = arrow_sort_key_exprs(&sort_key, &schema); - return Ok(Some(Arc::new(DeduplicateExec::new( + return Ok(Transformed::Yes(Arc::new(DeduplicateExec::new( child, sort_exprs, dedup_exec.use_chunk_order_col(), )))); } - Ok(None) + Ok(Transformed::No(plan)) }) } diff --git a/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs b/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs index 87f713b0ba..cc42e7ef43 100644 --- a/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs +++ b/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs @@ -2,10 +2,11 @@ use std::{cmp::Reverse, sync::Arc}; use arrow::compute::SortOptions; use datafusion::{ + common::tree_node::{Transformed, TreeNode}, config::ConfigOptions, error::Result, physical_optimizer::PhysicalOptimizerRule, - physical_plan::{tree_node::TreeNodeRewritable, ExecutionPlan}, + physical_plan::ExecutionPlan, }; use indexmap::IndexSet; use predicate::Predicate; @@ -58,7 +59,7 @@ impl PhysicalOptimizerRule for DedupSortOrder { assert_eq!(children.len(), 1); let child = children.remove(0); let Some((schema, chunks, _output_sort_key)) = extract_chunks(child.as_ref()) else { - return Ok(None); + return Ok(Transformed::No(plan)) }; let mut chunk_sort_keys: Vec<IndexSet<_>> = chunks @@ -135,14 +136,14 @@ impl PhysicalOptimizerRule for DedupSortOrder { ); let sort_exprs = arrow_sort_key_exprs(&quorum_sort_key, &schema); - return Ok(Some(Arc::new(DeduplicateExec::new( + return Ok(Transformed::Yes(Arc::new(DeduplicateExec::new( child, sort_exprs, dedup_exec.use_chunk_order_col(), )))); } - Ok(None) + Ok(Transformed::No(plan)) }) } diff --git a/iox_query/src/physical_optimizer/dedup/partition_split.rs b/iox_query/src/physical_optimizer/dedup/partition_split.rs index cbc562e6f0..b4dd530803 100644 --- a/iox_query/src/physical_optimizer/dedup/partition_split.rs +++ b/iox_query/src/physical_optimizer/dedup/partition_split.rs @@ -2,10 +2,11 @@ use std::sync::Arc; use data_types::PartitionId; use datafusion::{ + common::tree_node::{Transformed, TreeNode}, config::ConfigOptions, error::Result, physical_optimizer::PhysicalOptimizerRule, - physical_plan::{tree_node::TreeNodeRewritable, union::UnionExec, ExecutionPlan}, + physical_plan::{union::UnionExec, ExecutionPlan}, }; use hashbrown::HashMap; use observability_deps::tracing::warn; @@ -38,7 +39,7 @@ impl PhysicalOptimizerRule for PartitionSplit { assert_eq!(children.len(), 1); let child = children.remove(0); let Some((schema, chunks, output_sort_key)) = extract_chunks(child.as_ref()) else { - return Ok(None); + return Ok(Transformed::No(plan)); }; let mut chunks_by_partition: HashMap<PartitionId, Vec<Arc<dyn QueryChunk>>> = @@ -53,7 +54,7 @@ impl PhysicalOptimizerRule for PartitionSplit { // If there not multiple partitions (0 or 1), then this optimizer is a no-op. Signal that to the // optimizer framework. if chunks_by_partition.len() < 2 { - return Ok(None); + return Ok(Transformed::No(plan)); } // Protect against degenerative plans @@ -69,7 +70,7 @@ impl PhysicalOptimizerRule for PartitionSplit { max_dedup_partition_split, "cannot split dedup operation based on partition, too many partitions" ); - return Ok(None); + return Ok(Transformed::No(plan)); } // ensure deterministic order @@ -94,10 +95,10 @@ impl PhysicalOptimizerRule for PartitionSplit { }) .collect(), ); - return Ok(Some(Arc::new(out))); + return Ok(Transformed::Yes(Arc::new(out))); } - Ok(None) + Ok(Transformed::No(plan)) }) } diff --git a/iox_query/src/physical_optimizer/dedup/remove_dedup.rs b/iox_query/src/physical_optimizer/dedup/remove_dedup.rs index 60bd73b515..1ac473f7ee 100644 --- a/iox_query/src/physical_optimizer/dedup/remove_dedup.rs +++ b/iox_query/src/physical_optimizer/dedup/remove_dedup.rs @@ -1,10 +1,11 @@ use std::sync::Arc; use datafusion::{ + common::tree_node::{Transformed, TreeNode}, config::ConfigOptions, error::Result, physical_optimizer::PhysicalOptimizerRule, - physical_plan::{tree_node::TreeNodeRewritable, ExecutionPlan}, + physical_plan::ExecutionPlan, }; use predicate::Predicate; @@ -31,11 +32,11 @@ impl PhysicalOptimizerRule for RemoveDedup { assert_eq!(children.len(), 1); let child = children.remove(0); let Some((schema, chunks, output_sort_key)) = extract_chunks(child.as_ref()) else { - return Ok(None); + return Ok(Transformed::No(plan)); }; if (chunks.len() < 2) && chunks.iter().all(|c| !c.may_contain_pk_duplicates()) { - return Ok(Some(chunks_to_physical_nodes( + return Ok(Transformed::Yes(chunks_to_physical_nodes( &schema, output_sort_key.as_ref(), chunks, @@ -45,7 +46,7 @@ impl PhysicalOptimizerRule for RemoveDedup { } } - Ok(None) + Ok(Transformed::No(plan)) }) } diff --git a/iox_query/src/physical_optimizer/dedup/time_split.rs b/iox_query/src/physical_optimizer/dedup/time_split.rs index 61f9aa4022..8f92899f20 100644 --- a/iox_query/src/physical_optimizer/dedup/time_split.rs +++ b/iox_query/src/physical_optimizer/dedup/time_split.rs @@ -1,10 +1,11 @@ use std::sync::Arc; use datafusion::{ + common::tree_node::{Transformed, TreeNode}, config::ConfigOptions, error::Result, physical_optimizer::PhysicalOptimizerRule, - physical_plan::{tree_node::TreeNodeRewritable, union::UnionExec, ExecutionPlan}, + physical_plan::{union::UnionExec, ExecutionPlan}, }; use observability_deps::tracing::warn; use predicate::Predicate; @@ -37,14 +38,14 @@ impl PhysicalOptimizerRule for TimeSplit { assert_eq!(children.len(), 1); let child = children.remove(0); let Some((schema, chunks, output_sort_key)) = extract_chunks(child.as_ref()) else { - return Ok(None); + return Ok(Transformed::No(plan)); }; let groups = group_potential_duplicates(chunks); // if there are no chunks or there is only one group, we don't need to split if groups.len() < 2 { - return Ok(None); + return Ok(Transformed::No(plan)); } // Protect against degenerative plans @@ -60,7 +61,7 @@ impl PhysicalOptimizerRule for TimeSplit { max_dedup_time_split, "cannot split dedup operation based on time overlaps, too many groups" ); - return Ok(None); + return Ok(Transformed::No(plan)); } let out = UnionExec::new( @@ -81,10 +82,10 @@ impl PhysicalOptimizerRule for TimeSplit { }) .collect(), ); - return Ok(Some(Arc::new(out))); + return Ok(Transformed::Yes(Arc::new(out))); } - Ok(None) + Ok(Transformed::No(plan)) }) } diff --git a/iox_query/src/physical_optimizer/predicate_pushdown.rs b/iox_query/src/physical_optimizer/predicate_pushdown.rs index 0386a8f337..2da2ec9869 100644 --- a/iox_query/src/physical_optimizer/predicate_pushdown.rs +++ b/iox_query/src/physical_optimizer/predicate_pushdown.rs @@ -1,21 +1,17 @@ use std::{collections::HashSet, sync::Arc}; use datafusion::{ + common::tree_node::{RewriteRecursion, Transformed, TreeNode, TreeNodeRewriter}, config::ConfigOptions, error::{DataFusionError, Result}, logical_expr::Operator, - physical_expr::{ - rewrite::{RewriteRecursion, TreeNodeRewriter}, - split_conjunction, - utils::collect_columns, - }, + physical_expr::{split_conjunction, utils::collect_columns}, physical_optimizer::PhysicalOptimizerRule, physical_plan::{ empty::EmptyExec, expressions::{BinaryExpr, Column}, file_format::ParquetExec, filter::FilterExec, - tree_node::TreeNodeRewritable, union::UnionExec, ExecutionPlan, PhysicalExpr, }, @@ -44,7 +40,7 @@ impl PhysicalOptimizerRule for PredicatePushdown { let child_any = child.as_any(); if let Some(child_empty) = child_any.downcast_ref::<EmptyExec>() { if !child_empty.produce_one_row() { - return Ok(Some(child)); + return Ok(Transformed::Yes(child)); } } else if let Some(child_union) = child_any.downcast_ref::<UnionExec>() { let new_inputs = child_union @@ -59,7 +55,7 @@ impl PhysicalOptimizerRule for PredicatePushdown { }) .collect::<Result<Vec<_>>>()?; let new_union = UnionExec::new(new_inputs); - return Ok(Some(Arc::new(new_union))); + return Ok(Transformed::Yes(Arc::new(new_union))); } else if let Some(child_parquet) = child_any.downcast_ref::<ParquetExec>() { let existing = child_parquet .predicate() @@ -80,7 +76,7 @@ impl PhysicalOptimizerRule for PredicatePushdown { None, )), )?); - return Ok(Some(new_node)); + return Ok(Transformed::Yes(new_node)); } else if let Some(child_dedup) = child_any.downcast_ref::<DeduplicateExec>() { let dedup_cols = child_dedup.sort_columns(); let (pushdown, no_pushdown): (Vec<_>, Vec<_>) = @@ -112,12 +108,12 @@ impl PhysicalOptimizerRule for PredicatePushdown { new_node, )?); } - return Ok(Some(new_node)); + return Ok(Transformed::Yes(new_node)); } } } - Ok(None) + Ok(Transformed::No(plan)) }) } @@ -135,7 +131,9 @@ struct ColumnCollector { cols: HashSet<Column>, } -impl TreeNodeRewriter<Arc<dyn PhysicalExpr>> for ColumnCollector { +impl TreeNodeRewriter for ColumnCollector { + type N = Arc<dyn PhysicalExpr>; + fn pre_visit( &mut self, node: &Arc<dyn PhysicalExpr>, diff --git a/iox_query/src/physical_optimizer/projection_pushdown.rs b/iox_query/src/physical_optimizer/projection_pushdown.rs index a465d6af05..04c94589ad 100644 --- a/iox_query/src/physical_optimizer/projection_pushdown.rs +++ b/iox_query/src/physical_optimizer/projection_pushdown.rs @@ -5,6 +5,7 @@ use std::{ use arrow::datatypes::SchemaRef; use datafusion::{ + common::tree_node::{Transformed, TreeNode}, config::ConfigOptions, error::{DataFusionError, Result}, physical_expr::{ @@ -19,7 +20,6 @@ use datafusion::{ filter::FilterExec, projection::ProjectionExec, sorts::{sort::SortExec, sort_preserving_merge::SortPreservingMergeExec}, - tree_node::TreeNodeRewritable, union::UnionExec, ExecutionPlan, PhysicalExpr, }, @@ -53,11 +53,11 @@ impl PhysicalOptimizerRule for ProjectionPushdown { column_names.push(output_name.as_str()); } else { // don't bother w/ renames - return Ok(None); + return Ok(Transformed::No(plan)); } } else { // don't bother to deal w/ calculation within projection nodes - return Ok(None); + return Ok(Transformed::No(plan)); } } @@ -67,7 +67,7 @@ impl PhysicalOptimizerRule for ProjectionPushdown { child_empty.produce_one_row(), Arc::new(child_empty.schema().project(&column_indices)?), ); - return Ok(Some(Arc::new(new_child))); + return Ok(Transformed::Yes(Arc::new(new_child))); } else if let Some(child_union) = child_any.downcast_ref::<UnionExec>() { let new_inputs = child_union .inputs() @@ -81,7 +81,7 @@ impl PhysicalOptimizerRule for ProjectionPushdown { }) .collect::<Result<Vec<_>>>()?; let new_union = UnionExec::new(new_inputs); - return Ok(Some(Arc::new(new_union))); + return Ok(Transformed::Yes(Arc::new(new_union))); } else if let Some(child_parquet) = child_any.downcast_ref::<ParquetExec>() { let projection = match child_parquet.base_config().projection.as_ref() { Some(projection) => column_indices @@ -100,7 +100,7 @@ impl PhysicalOptimizerRule for ProjectionPushdown { }; let new_child = ParquetExec::new(base_config, child_parquet.predicate().cloned(), None); - return Ok(Some(Arc::new(new_child))); + return Ok(Transformed::Yes(Arc::new(new_child))); } else if let Some(child_filter) = child_any.downcast_ref::<FilterExec>() { let filter_required_cols = collect_columns(child_filter.predicate()); let filter_required_cols = filter_required_cols @@ -124,7 +124,7 @@ impl PhysicalOptimizerRule for ProjectionPushdown { }, )?; - return Ok(Some(plan)); + return Ok(Transformed::Yes(plan)); } else if let Some(child_sort) = child_any.downcast_ref::<SortExec>() { let sort_required_cols = child_sort .expr() @@ -150,7 +150,7 @@ impl PhysicalOptimizerRule for ProjectionPushdown { }, )?; - return Ok(Some(plan)); + return Ok(Transformed::Yes(plan)); } else if let Some(child_sort) = child_any.downcast_ref::<SortPreservingMergeExec>() { let sort_required_cols = child_sort @@ -176,7 +176,7 @@ impl PhysicalOptimizerRule for ProjectionPushdown { }, )?; - return Ok(Some(plan)); + return Ok(Transformed::Yes(plan)); } else if let Some(child_proj) = child_any.downcast_ref::<ProjectionExec>() { let expr = column_indices .iter() @@ -191,7 +191,7 @@ impl PhysicalOptimizerRule for ProjectionPushdown { // and miss the optimization of that particular new ProjectionExec let plan = self.optimize(plan, config)?; - return Ok(Some(plan)); + return Ok(Transformed::Yes(plan)); } else if let Some(child_dedup) = child_any.downcast_ref::<DeduplicateExec>() { let dedup_required_cols = child_dedup.sort_columns(); @@ -216,7 +216,7 @@ impl PhysicalOptimizerRule for ProjectionPushdown { }, )?; - return Ok(Some(plan)); + return Ok(Transformed::Yes(plan)); } else if let Some(child_recordbatches) = child_any.downcast_ref::<RecordBatchesExec>() { @@ -225,11 +225,11 @@ impl PhysicalOptimizerRule for ProjectionPushdown { Arc::new(child_recordbatches.schema().project(&column_indices)?), child_recordbatches.output_sort_key_memo().cloned(), ); - return Ok(Some(Arc::new(new_child))); + return Ok(Transformed::Yes(Arc::new(new_child))); } } - Ok(None) + Ok(Transformed::No(plan)) }) } diff --git a/iox_query/src/physical_optimizer/sort/redundant_sort.rs b/iox_query/src/physical_optimizer/sort/redundant_sort.rs index 01eb7a5578..4edefebb72 100644 --- a/iox_query/src/physical_optimizer/sort/redundant_sort.rs +++ b/iox_query/src/physical_optimizer/sort/redundant_sort.rs @@ -1,10 +1,11 @@ use std::sync::Arc; use datafusion::{ + common::tree_node::{Transformed, TreeNode}, config::ConfigOptions, error::Result, physical_optimizer::PhysicalOptimizerRule, - physical_plan::{sorts::sort::SortExec, tree_node::TreeNodeRewritable, ExecutionPlan}, + physical_plan::{sorts::sort::SortExec, ExecutionPlan}, }; /// Removes [`SortExec`] if it is no longer needed. @@ -24,11 +25,11 @@ impl PhysicalOptimizerRule for RedundantSort { let child = sort_exec.input(); if child.output_ordering() == Some(sort_exec.expr()) { - return Ok(Some(Arc::clone(child))); + return Ok(Transformed::Yes(Arc::clone(child))); } } - Ok(None) + Ok(Transformed::No(plan)) }) } diff --git a/iox_query/src/physical_optimizer/sort/sort_pushdown.rs b/iox_query/src/physical_optimizer/sort/sort_pushdown.rs index f1fef8328d..7f820d7740 100644 --- a/iox_query/src/physical_optimizer/sort/sort_pushdown.rs +++ b/iox_query/src/physical_optimizer/sort/sort_pushdown.rs @@ -1,12 +1,11 @@ use std::sync::Arc; use datafusion::{ + common::tree_node::{Transformed, TreeNode}, config::ConfigOptions, error::Result, physical_optimizer::PhysicalOptimizerRule, - physical_plan::{ - sorts::sort::SortExec, tree_node::TreeNodeRewritable, union::UnionExec, ExecutionPlan, - }, + physical_plan::{sorts::sort::SortExec, union::UnionExec, ExecutionPlan}, }; /// Pushes [`SortExec`] closer to the data source. @@ -44,11 +43,11 @@ impl PhysicalOptimizerRule for SortPushdown { }) .collect::<Result<Vec<_>>>()?, ); - return Ok(Some(Arc::new(new_union))); + return Ok(Transformed::Yes(Arc::new(new_union))); } } - Ok(None) + Ok(Transformed::No(plan)) }) } diff --git a/iox_query/src/physical_optimizer/union/nested_union.rs b/iox_query/src/physical_optimizer/union/nested_union.rs index 97961217fa..4b12cdf49a 100644 --- a/iox_query/src/physical_optimizer/union/nested_union.rs +++ b/iox_query/src/physical_optimizer/union/nested_union.rs @@ -1,10 +1,11 @@ use std::sync::Arc; use datafusion::{ + common::tree_node::{Transformed, TreeNode}, config::ConfigOptions, error::Result, physical_optimizer::PhysicalOptimizerRule, - physical_plan::{tree_node::TreeNodeRewritable, union::UnionExec, ExecutionPlan}, + physical_plan::{union::UnionExec, ExecutionPlan}, }; /// Optimizer that replaces nested [`UnionExec`]s with a single level. @@ -51,11 +52,11 @@ impl PhysicalOptimizerRule for NestedUnion { } if found_union { - return Ok(Some(Arc::new(UnionExec::new(children_new)))); + return Ok(Transformed::Yes(Arc::new(UnionExec::new(children_new)))); } } - Ok(None) + Ok(Transformed::No(plan)) }) } diff --git a/iox_query/src/physical_optimizer/union/one_union.rs b/iox_query/src/physical_optimizer/union/one_union.rs index fc52130704..311ab1e831 100644 --- a/iox_query/src/physical_optimizer/union/one_union.rs +++ b/iox_query/src/physical_optimizer/union/one_union.rs @@ -1,10 +1,11 @@ use std::sync::Arc; use datafusion::{ + common::tree_node::{Transformed, TreeNode}, config::ConfigOptions, error::Result, physical_optimizer::PhysicalOptimizerRule, - physical_plan::{tree_node::TreeNodeRewritable, union::UnionExec, ExecutionPlan}, + physical_plan::{union::UnionExec, ExecutionPlan}, }; /// Optimizer that replaces [`UnionExec`] with a single child node w/ the child note itself. @@ -33,11 +34,11 @@ impl PhysicalOptimizerRule for OneUnion { if let Some(union_exec) = plan_any.downcast_ref::<UnionExec>() { let mut children = union_exec.children(); if children.len() == 1 { - return Ok(Some(children.remove(0))); + return Ok(Transformed::Yes(children.remove(0))); } } - Ok(None) + Ok(Transformed::No(plan)) }) } diff --git a/iox_query/src/util.rs b/iox_query/src/util.rs index 3167cdacdf..2996026876 100644 --- a/iox_query/src/util.rs +++ b/iox_query/src/util.rs @@ -18,13 +18,11 @@ use data_types::{ }; use datafusion::{ self, - common::{DFSchema, ToDFSchema}, + common::{tree_node::TreeNodeRewriter, DFSchema, ToDFSchema}, datasource::{provider_as_source, MemTable}, error::{DataFusionError, Result as DatafusionResult}, execution::context::ExecutionProps, - logical_expr::{ - expr_rewriter::ExprRewriter, BinaryExpr, ExprSchemable, LogicalPlan, LogicalPlanBuilder, - }, + logical_expr::{BinaryExpr, ExprSchemable, LogicalPlan, LogicalPlanBuilder}, optimizer::simplify_expressions::{ExprSimplifier, SimplifyContext}, physical_expr::create_physical_expr, physical_plan::{ @@ -205,7 +203,9 @@ impl<'a> MissingColumnsToNull<'a> { } } -impl<'a> ExprRewriter for MissingColumnsToNull<'a> { +impl<'a> TreeNodeRewriter for MissingColumnsToNull<'a> { + type N = Expr; + fn mutate(&mut self, expr: Expr) -> DatafusionResult<Expr> { // Ideally this would simply find all Expr::Columns and // replace them with a constant NULL value. However, doing do @@ -358,7 +358,7 @@ pub fn create_basic_summary( mod tests { use arrow::datatypes::DataType; use datafusion::{ - logical_expr::expr_rewriter::ExprRewritable, + common::tree_node::TreeNode, prelude::{col, lit}, scalar::ScalarValue, }; diff --git a/iox_query_influxql/src/plan/planner.rs b/iox_query_influxql/src/plan/planner.rs index e54137369a..2158e78d4b 100644 --- a/iox_query_influxql/src/plan/planner.rs +++ b/iox_query_influxql/src/plan/planner.rs @@ -14,8 +14,9 @@ use crate::plan::var_ref::{column_type_to_var_ref_data_type, var_ref_data_type_t use arrow::datatypes::DataType; use chrono_tz::Tz; use datafusion::catalog::TableReference; +use datafusion::common::tree_node::{TreeNode, TreeNodeRewriter}; use datafusion::common::{DFSchema, DFSchemaRef, DataFusionError, Result, ScalarValue, ToDFSchema}; -use datafusion::logical_expr::expr_rewriter::{normalize_col, ExprRewritable, ExprRewriter}; +use datafusion::logical_expr::expr_rewriter::normalize_col; use datafusion::logical_expr::logical_plan::builder::project; use datafusion::logical_expr::logical_plan::Analyze; use datafusion::logical_expr::utils::{expr_as_column_expr, find_aggregate_exprs}; @@ -1049,7 +1050,9 @@ struct FixRegularExpressions<'a> { schemas: &'a Schemas, } -impl<'a> ExprRewriter for FixRegularExpressions<'a> { +impl<'a> TreeNodeRewriter for FixRegularExpressions<'a> { + type N = Expr; + fn mutate(&mut self, expr: Expr) -> Result<Expr> { match expr { // InfluxQL evaluates regular expression conditions to false if the column is numeric diff --git a/iox_query_influxql/src/plan/planner_rewrite_expression.rs b/iox_query_influxql/src/plan/planner_rewrite_expression.rs index 1e3bf8f1e6..f4128f8789 100644 --- a/iox_query_influxql/src/plan/planner_rewrite_expression.rs +++ b/iox_query_influxql/src/plan/planner_rewrite_expression.rs @@ -123,43 +123,46 @@ //! [`Eval`]: https://github.com/influxdata/influxql/blob/1ba470371ec093d57a726b143fe6ccbacf1b452b/ast.go#L4137 use crate::plan::util::Schemas; use arrow::datatypes::DataType; +use datafusion::common::tree_node::{Transformed, TreeNode}; use datafusion::common::{Result, ScalarValue}; -use datafusion::logical_expr::expr_rewriter::{ExprRewritable, ExprRewriter}; use datafusion::logical_expr::{ binary_expr, cast, coalesce, lit, BinaryExpr, Expr, ExprSchemable, Operator, }; /// Rewrite the expression tree and return a boolean result. pub(in crate::plan) fn rewrite_conditional(expr: Expr, schemas: &Schemas) -> Result<Expr> { - let expr = expr.rewrite(&mut RewriteAndCoerce { schemas })?; + let expr = rewrite_expr(expr, schemas)?; Ok(match expr { Expr::Literal(ScalarValue::Null) => lit(false), _ => expr, }) } -/// Rewrite the expression tree and return a result or `NULL` if some of the operands are -/// incompatible. -pub(in crate::plan) fn rewrite_expr(expr: Expr, schemas: &Schemas) -> Result<Expr> { - expr.rewrite(&mut RewriteAndCoerce { schemas }) +/// The expression was rewritten +fn yes(expr: Expr) -> Result<Transformed<Expr>> { + Ok(Transformed::Yes(expr)) } -/// Rewrite and coerce the expression tree to model the behavior -/// of an InfluxQL query. -struct RewriteAndCoerce<'a> { - schemas: &'a Schemas, +/// The expression was not rewritten +fn no(expr: Expr) -> Result<Transformed<Expr>> { + Ok(Transformed::No(expr)) } -impl<'a> ExprRewriter for RewriteAndCoerce<'a> { - fn mutate(&mut self, expr: Expr) -> Result<Expr> { +/// Rewrite the expression tree and return a result or `NULL` if some of the operands are +/// incompatible. +/// +/// Rewrite and coerce the expression tree to model the behavior +/// of an InfluxQL query. +pub(in crate::plan) fn rewrite_expr(expr: Expr, schemas: &Schemas) -> Result<Expr> { + expr.transform(&|expr| { match expr { Expr::BinaryExpr(BinaryExpr { ref left, op, ref right, }) => { - let lhs_type = left.get_type(&self.schemas.df_schema)?; - let rhs_type = right.get_type(&self.schemas.df_schema)?; + let lhs_type = left.get_type(&schemas.df_schema)?; + let rhs_type = right.get_type(&schemas.df_schema)?; match (lhs_type, op, rhs_type) { // @@ -183,7 +186,7 @@ impl<'a> ExprRewriter for RewriteAndCoerce<'a> { DataType::Null, _, DataType::Null - ) => Ok(lit(ScalarValue::Null)), + ) => yes(lit(ScalarValue::Null)), // NULL using AND or OR is rewritten as `false`, which the optimiser // may short circuit. @@ -191,12 +194,12 @@ impl<'a> ExprRewriter for RewriteAndCoerce<'a> { DataType::Null, Operator::Or | Operator::And, _ - ) => Ok(binary_expr(lit(false), op, (**right).clone())), + ) => yes(binary_expr(lit(false), op, (**right).clone())), ( _, Operator::Or | Operator::And, DataType::Null - ) => Ok(binary_expr((**left).clone(), op, lit(false))), + ) => yes(binary_expr((**left).clone(), op, lit(false))), // NULL with other operators is passed through to DataFusion, which is expected // evaluate to false. @@ -209,10 +212,10 @@ impl<'a> ExprRewriter for RewriteAndCoerce<'a> { _, Operator::Eq | Operator::NotEq | Operator::Gt | Operator::Lt | Operator::GtEq | Operator::LtEq, DataType::Null - ) => Ok(expr), + ) => no(expr), // Any other operations with NULL should return false - (DataType::Null, ..) | (.., DataType::Null) => Ok(lit(false)), + (DataType::Null, ..) | (.., DataType::Null) => yes(lit(false)), // // Boolean types @@ -222,7 +225,7 @@ impl<'a> ExprRewriter for RewriteAndCoerce<'a> { DataType::Boolean, Operator::And | Operator::Or | Operator::Eq | Operator::NotEq | Operator::BitwiseAnd | Operator::BitwiseXor | Operator::BitwiseOr, DataType::Boolean, - ) => Ok(rewrite_boolean((**left).clone(), op, (**right).clone())), + ) => yes(rewrite_boolean((**left).clone(), op, (**right).clone())), // // Numeric types @@ -247,16 +250,16 @@ impl<'a> ExprRewriter for RewriteAndCoerce<'a> { // implementations, however, InfluxQL coalesces the result to `0`. // See: https://github.com/influxdata/influxql/blob/1ba470371ec093d57a726b143fe6ccbacf1b452b/ast.go#L4268-L4270 - Operator::Divide => Ok(coalesce(vec![expr, lit(0_f64)])), - _ => Ok(expr), + Operator::Divide => yes(coalesce(vec![expr, lit(0_f64)])), + _ => no(expr), }, // // If either of the types UInt64 and the other is UInt64 or Int64 // (DataType::UInt64, ..) | (.., DataType::UInt64) => match op { - Operator::Divide => Ok(coalesce(vec![expr, lit(0_u64)])), - _ => Ok(expr), + Operator::Divide => yes(coalesce(vec![expr, lit(0_u64)])), + _ => no(expr), } // // Finally, if both sides are Int64 @@ -269,8 +272,8 @@ impl<'a> ExprRewriter for RewriteAndCoerce<'a> { // Like Float64, dividing by zero should return 0 for InfluxQL // // See: https://github.com/influxdata/influxql/blob/1ba470371ec093d57a726b143fe6ccbacf1b452b/ast.go#L4338-L4340 - Operator::Divide => Ok(coalesce(vec![expr, lit(0_i64)])), - _ => Ok(expr), + Operator::Divide => yes(coalesce(vec![expr, lit(0_i64)])), + _ => no(expr), }, // @@ -282,13 +285,13 @@ impl<'a> ExprRewriter for RewriteAndCoerce<'a> { DataType::Utf8, Operator::Eq | Operator::NotEq | Operator::RegexMatch | Operator::RegexNotMatch | Operator::StringConcat, DataType::Utf8 - ) => Ok(expr), + ) => no(expr), // Rewrite the + operator to the string-concatenation operator ( DataType::Utf8, Operator::Plus, DataType::Utf8 - ) => Ok(binary_expr((**left).clone(), Operator::StringConcat, (**right).clone())), + ) => yes(binary_expr((**left).clone(), Operator::StringConcat, (**right).clone())), // // Dictionary (tag column) is treated the same as Utf8 @@ -302,7 +305,7 @@ impl<'a> ExprRewriter for RewriteAndCoerce<'a> { DataType::Utf8, Operator::Eq | Operator::NotEq | Operator::RegexMatch | Operator::RegexNotMatch | Operator::StringConcat, DataType::Dictionary(..) - ) => Ok(expr), + ) => no(expr), ( DataType::Dictionary(..), Operator::Plus, @@ -312,13 +315,13 @@ impl<'a> ExprRewriter for RewriteAndCoerce<'a> { DataType::Utf8, Operator::Plus, DataType::Dictionary(..) - ) => Ok(expr), + ) => no(expr), // // Timestamp (time-range) expressions should pass through to DataFusion. // - (DataType::Timestamp(..), ..) => Ok(expr), - (.., DataType::Timestamp(..)) => Ok(expr), + (DataType::Timestamp(..), ..) => no(expr), + (.., DataType::Timestamp(..)) => no(expr), // // Unhandled binary expressions with conditional operators @@ -337,13 +340,13 @@ impl<'a> ExprRewriter for RewriteAndCoerce<'a> { | Operator::And | Operator::Or, _ - ) => Ok(lit(false)), + ) => yes(lit(false)), // // Everything else should result in `NULL`. // // See: https://github.com/influxdata/influxql/blob/1ba470371ec093d57a726b143fe6ccbacf1b452b/ast.go#L4558 - _ => Ok(lit(ScalarValue::Null)), + _ => yes(lit(ScalarValue::Null)), } } // @@ -351,9 +354,9 @@ impl<'a> ExprRewriter for RewriteAndCoerce<'a> { // as it will handle evaluating function calls, etc // // See: https://github.com/influxdata/influxql/blob/1ba470371ec093d57a726b143fe6ccbacf1b452b/ast.go#L4638-L4647 - _ => Ok(expr), + _ => no(expr), } - } + }) } /// Rewrite conditional operators to `false` and any diff --git a/predicate/src/lib.rs b/predicate/src/lib.rs index 824a9b8660..171232b708 100644 --- a/predicate/src/lib.rs +++ b/predicate/src/lib.rs @@ -22,13 +22,9 @@ use arrow::{ }; use data_types::{InfluxDbType, TableSummary, TimestampRange}; use datafusion::{ + common::tree_node::{TreeNode, TreeNodeVisitor, VisitRecursion}, error::DataFusionError, - logical_expr::{ - binary_expr, - expr_visitor::{ExprVisitable, ExpressionVisitor, Recursion}, - utils::expr_to_columns, - BinaryExpr, - }, + logical_expr::{binary_expr, utils::expr_to_columns, BinaryExpr}, optimizer::utils::split_conjunction, physical_expr::execution_props::ExecutionProps, physical_optimizer::pruning::PruningStatistics, @@ -535,9 +531,10 @@ impl Predicate { return false; } - expr.accept(RowBasedVisitor::default()) - .expect("never fails") - .row_based + let mut visitor = RowBasedVisitor::default(); + expr.visit(&mut visitor).expect("never fails"); + + visitor.row_based }) .cloned() .collect(); @@ -622,8 +619,10 @@ impl Default for RowBasedVisitor { } } -impl ExpressionVisitor for RowBasedVisitor { - fn pre_visit(mut self, expr: &Expr) -> Result<Recursion<Self>, DataFusionError> { +impl TreeNodeVisitor for RowBasedVisitor { + type N = Expr; + + fn pre_visit(&mut self, expr: &Expr) -> Result<VisitRecursion, DataFusionError> { match expr { Expr::Alias(_, _) | Expr::Between { .. } @@ -658,13 +657,13 @@ impl ExpressionVisitor for RowBasedVisitor { | Expr::SimilarTo { .. } | Expr::Sort { .. } | Expr::TryCast { .. } - | Expr::Wildcard => Ok(Recursion::Continue(self)), + | Expr::Wildcard => Ok(VisitRecursion::Continue), Expr::AggregateFunction { .. } | Expr::AggregateUDF { .. } | Expr::GroupingSet(_) | Expr::WindowFunction { .. } => { self.row_based = false; - Ok(Recursion::Stop(self)) + Ok(VisitRecursion::Stop) } } } diff --git a/predicate/src/rpc_predicate.rs b/predicate/src/rpc_predicate.rs index e6ac8639e9..ce346fbe51 100644 --- a/predicate/src/rpc_predicate.rs +++ b/predicate/src/rpc_predicate.rs @@ -4,12 +4,13 @@ mod measurement_rewrite; mod rewrite; mod value_rewrite; +use crate::rpc_predicate::column_rewrite::missing_tag_to_null; use crate::Predicate; +use datafusion::common::tree_node::TreeNode; use datafusion::common::ToDFSchema; use datafusion::error::Result as DataFusionResult; use datafusion::execution::context::ExecutionProps; -use datafusion::logical_expr::expr_rewriter::ExprRewritable; use datafusion::optimizer::simplify_expressions::{ExprSimplifier, SimplifyContext}; use datafusion::prelude::{lit, Expr}; use observability_deps::tracing::{debug, trace}; @@ -17,7 +18,6 @@ use schema::Schema; use std::collections::BTreeSet; use std::sync::Arc; -use self::column_rewrite::MissingTagColumnRewriter; use self::field_rewrite::FieldProjectionRewriter; use self::measurement_rewrite::rewrite_measurement_references; use self::value_rewrite::rewrite_field_value_references; @@ -211,7 +211,6 @@ fn normalize_predicate( let mut predicate = predicate.clone(); let mut field_projections = FieldProjectionRewriter::new(schema.clone()); - let mut missing_tag_columns = MissingTagColumnRewriter::new(schema.clone()); let mut field_value_exprs = vec![]; @@ -226,7 +225,8 @@ fn normalize_predicate( .map(|e| { debug!(?e, "rewriting expr"); - let e = rewrite_measurement_references(table_name, e) + let e = e + .transform(&|e| rewrite_measurement_references(table_name, e)) .map(|e| log_rewrite(e, "rewrite_measurement_references")) // Rewrite any references to `_value = some_value` to literal true values. // Keeps track of these expressions, which can then be used to @@ -242,10 +242,10 @@ fn normalize_predicate( // in the table's schema as tags. Replace any column references that // do not exist, or that are not tags, with NULL. // Field values always use `_value` as a name and are handled above. - .and_then(|e| e.rewrite(&mut missing_tag_columns)) + .and_then(|e| e.transform(&|e| missing_tag_to_null(&schema, e))) .map(|e| log_rewrite(e, "missing_columums")) // apply IOx specific rewrites (that unlock other simplifications) - .and_then(rewrite::rewrite) + .and_then(rewrite::iox_expr_rewrite) .map(|e| log_rewrite(e, "rewrite")) // apply type_coercing so datafuson simplification can deal with this .and_then(|e| simplifier.coerce(e, Arc::clone(&df_schema))) diff --git a/predicate/src/rpc_predicate/column_rewrite.rs b/predicate/src/rpc_predicate/column_rewrite.rs index eeed0202bc..c58914fa95 100644 --- a/predicate/src/rpc_predicate/column_rewrite.rs +++ b/predicate/src/rpc_predicate/column_rewrite.rs @@ -1,53 +1,37 @@ use datafusion::{ - error::Result as DataFusionResult, logical_expr::expr_rewriter::ExprRewriter, prelude::*, + common::tree_node::Transformed, error::Result as DataFusionResult, prelude::*, scalar::ScalarValue, }; use schema::{InfluxColumnType, Schema}; /// Logic for rewriting expressions from influxrpc that reference non /// existent columns, or columns that are not tags, to NULL. -#[derive(Debug)] -pub(crate) struct MissingTagColumnRewriter { - /// The input schema - schema: Schema, +pub fn missing_tag_to_null(schema: &Schema, expr: Expr) -> DataFusionResult<Transformed<Expr>> { + Ok(match expr { + Expr::Column(col) if !tag_column_exists(schema, &col)? => Transformed::Yes(lit_null()), + expr => Transformed::No(expr), + }) } -impl MissingTagColumnRewriter { - /// Create a new [`MissingTagColumnRewriter`] targeting the given schema - pub(crate) fn new(schema: Schema) -> Self { - Self { schema } - } - - fn tag_column_exists(&self, col: &Column) -> DataFusionResult<bool> { - // todo a real error here (rpc_predicates shouldn't have table/relation qualifiers) - assert!(col.relation.is_none()); +fn tag_column_exists(schema: &Schema, col: &Column) -> DataFusionResult<bool> { + // todo a real error here (rpc_predicates shouldn't have table/relation qualifiers) + assert!(col.relation.is_none()); - let exists = self - .schema - .find_index_of(&col.name) - .map(|i| self.schema.field(i).0) - .map(|influx_column_type| influx_column_type == InfluxColumnType::Tag) - .unwrap_or(false); - Ok(exists) - } + let exists = schema + .find_index_of(&col.name) + .map(|i| schema.field(i).0) + .map(|influx_column_type| influx_column_type == InfluxColumnType::Tag) + .unwrap_or(false); + Ok(exists) } fn lit_null() -> Expr { lit(ScalarValue::Utf8(None)) } -impl ExprRewriter for MissingTagColumnRewriter { - fn mutate(&mut self, expr: Expr) -> DataFusionResult<Expr> { - Ok(match expr { - Expr::Column(col) if !self.tag_column_exists(&col)? => lit_null(), - expr => expr, - }) - } -} - #[cfg(test)] mod tests { - use datafusion::{arrow::datatypes::DataType, logical_expr::expr_rewriter::ExprRewritable}; + use datafusion::{arrow::datatypes::DataType, common::tree_node::TreeNode}; use schema::SchemaBuilder; use super::*; @@ -103,7 +87,7 @@ mod tests { .build() .unwrap(); - let mut rewriter = MissingTagColumnRewriter::new(schema); - expr.rewrite(&mut rewriter).unwrap() + expr.transform(&|expr| missing_tag_to_null(&schema, expr)) + .unwrap() } } diff --git a/predicate/src/rpc_predicate/field_rewrite.rs b/predicate/src/rpc_predicate/field_rewrite.rs index 0d26c27938..eb8de42e98 100644 --- a/predicate/src/rpc_predicate/field_rewrite.rs +++ b/predicate/src/rpc_predicate/field_rewrite.rs @@ -4,9 +4,9 @@ use super::FIELD_COLUMN_NAME; use arrow::array::{as_boolean_array, as_string_array, ArrayRef, StringArray}; use arrow::compute::kernels; use arrow::record_batch::RecordBatch; +use datafusion::common::tree_node::{TreeNode, TreeNodeVisitor, VisitRecursion}; use datafusion::common::DFSchema; use datafusion::error::{DataFusionError, Result as DataFusionResult}; -use datafusion::logical_expr::expr_visitor::{ExprVisitable, ExpressionVisitor, Recursion}; use datafusion::optimizer::utils::split_conjunction_owned; use datafusion::physical_expr::create_physical_expr; use datafusion::physical_expr::execution_props::ExecutionProps; @@ -78,7 +78,8 @@ impl FieldProjectionRewriter { // Rewrites a single predicate. Does not handle AND specially fn rewrite_single_conjunct(&mut self, expr: Expr) -> DataFusionResult<Expr> { - let finder = expr.accept(ColumnReferencesFinder::default())?; + let mut finder = ColumnReferencesFinder::default(); + expr.visit(&mut finder)?; // rewrite any expression that only references _field to `true` match (finder.saw_field_reference, finder.saw_non_field_reference) { @@ -217,8 +218,9 @@ struct ColumnReferencesFinder { saw_non_field_reference: bool, } -impl ExpressionVisitor for ColumnReferencesFinder { - fn pre_visit(mut self, expr: &Expr) -> DataFusionResult<Recursion<Self>> { +impl TreeNodeVisitor for ColumnReferencesFinder { + type N = Expr; + fn pre_visit(&mut self, expr: &Expr) -> DataFusionResult<VisitRecursion> { if let Expr::Column(col) = expr { if col.name == FIELD_COLUMN_NAME { self.saw_field_reference = true; @@ -229,9 +231,9 @@ impl ExpressionVisitor for ColumnReferencesFinder { // terminate early if we have already found both if self.saw_field_reference && self.saw_non_field_reference { - Ok(Recursion::Stop(self)) + Ok(VisitRecursion::Stop) } else { - Ok(Recursion::Continue(self)) + Ok(VisitRecursion::Continue) } } } diff --git a/predicate/src/rpc_predicate/measurement_rewrite.rs b/predicate/src/rpc_predicate/measurement_rewrite.rs index e5fcf2a0a6..ea367efaae 100644 --- a/predicate/src/rpc_predicate/measurement_rewrite.rs +++ b/predicate/src/rpc_predicate/measurement_rewrite.rs @@ -1,5 +1,5 @@ +use datafusion::common::tree_node::Transformed; use datafusion::error::Result as DataFusionResult; -use datafusion::logical_expr::expr_rewriter::{ExprRewritable, ExprRewriter}; use datafusion::prelude::{lit, Column, Expr}; use super::MEASUREMENT_COLUMN_NAME; @@ -9,27 +9,16 @@ use super::MEASUREMENT_COLUMN_NAME; pub(crate) fn rewrite_measurement_references( table_name: &str, expr: Expr, -) -> DataFusionResult<Expr> { - let mut rewriter = MeasurementRewriter { table_name }; - expr.rewrite(&mut rewriter) -} - -struct MeasurementRewriter<'a> { - table_name: &'a str, -} - -impl ExprRewriter for MeasurementRewriter<'_> { - fn mutate(&mut self, expr: Expr) -> DataFusionResult<Expr> { - Ok(match expr { - // rewrite col("_measurement") --> "table_name" - Expr::Column(Column { relation, name }) if name == MEASUREMENT_COLUMN_NAME => { - // should not have a qualified foo._measurement - // reference - assert!(relation.is_none()); - lit(self.table_name) - } - // no rewrite needed - _ => expr, - }) - } +) -> DataFusionResult<Transformed<Expr>> { + Ok(match expr { + // rewrite col("_measurement") --> "table_name" + Expr::Column(Column { relation, name }) if name == MEASUREMENT_COLUMN_NAME => { + // should not have a qualified foo._measurement + // reference + assert!(relation.is_none()); + Transformed::Yes(lit(table_name)) + } + // no rewrite needed + _ => Transformed::No(expr), + }) } diff --git a/predicate/src/rpc_predicate/rewrite.rs b/predicate/src/rpc_predicate/rewrite.rs index 4488e86a5a..2c65a0571c 100644 --- a/predicate/src/rpc_predicate/rewrite.rs +++ b/predicate/src/rpc_predicate/rewrite.rs @@ -1,11 +1,7 @@ use datafusion::{ + common::tree_node::{Transformed, TreeNode}, error::Result, - logical_expr::{ - binary_expr, - expr::Case, - expr_rewriter::{ExprRewritable, ExprRewriter}, - BinaryExpr, Operator, - }, + logical_expr::{binary_expr, expr::Case, BinaryExpr, Operator}, prelude::Expr, }; @@ -37,8 +33,22 @@ use datafusion::{ /// ELSE tag_col = 'cpu' /// END /// ``` -pub fn rewrite(expr: Expr) -> Result<Expr> { - expr.rewrite(&mut IOxExprRewriter::new()) +pub fn iox_expr_rewrite(expr: Expr) -> Result<Expr> { + expr.transform(&iox_expr_rewrite_inner) +} + +fn iox_expr_rewrite_inner(expr: Expr) -> Result<Transformed<Expr>> { + Ok(match expr { + Expr::BinaryExpr(BinaryExpr { left, op, right }) if is_case(&left) && is_comparison(op) => { + Transformed::Yes(inline_case(true, *left, *right, op)) + } + Expr::BinaryExpr(BinaryExpr { left, op, right }) + if is_case(&right) && is_comparison(op) => + { + Transformed::Yes(inline_case(false, *left, *right, op)) + } + expr => Transformed::No(expr), + }) } /// Special purpose `Expr` rewrite rules for an Expr that is used as a predicate. @@ -58,15 +68,58 @@ pub fn rewrite(expr: Expr) -> Result<Expr> { /// Currently it is special cases, but it would be great to generalize /// it and contribute it back to DataFusion pub fn simplify_predicate(expr: Expr) -> Result<Expr> { - expr.rewrite(&mut IOxPredicateRewriter::new()) + expr.transform(&simplify_predicate_inner) } -/// see docs on [rewrite] -struct IOxExprRewriter {} +fn simplify_predicate_inner(expr: Expr) -> Result<Transformed<Expr>> { + // look for this structure: + // + // NOT(col IS NULL) AND col = 'foo' + // + // and replace it with + // + // col = 'foo' + // + // Proof: + // Case 1: col is NULL + // + // not (NULL IS NULL) AND col = 'foo' + // not (true) AND NULL = 'foo' + // NULL + // + // Case 2: col is not NULL and not equal to 'foo' + // not (false) AND false + // true AND false + // false + // + // Case 3: col is not NULL and equal to 'foo' + // not (false) AND true + // true AND true + // true + match expr { + Expr::BinaryExpr(BinaryExpr { + left, + op: Operator::And, + right, + }) => { + if let (Some(coll), Some(colr)) = (is_col_not_null(&left), is_col_op_lit(&right)) { + if colr == coll { + return Ok(Transformed::Yes(*right)); + } + } else if let (Some(coll), Some(colr)) = (is_col_op_lit(&left), is_col_not_null(&right)) + { + if colr == coll { + return Ok(Transformed::Yes(*left)); + } + }; -impl IOxExprRewriter { - fn new() -> Self { - Self {} + Ok(Transformed::No(Expr::BinaryExpr(BinaryExpr { + left, + op: Operator::And, + right, + }))) + } + expr => Ok(Transformed::No(expr)), } } @@ -109,24 +162,6 @@ fn is_comparison(op: Operator) -> bool { } } -impl ExprRewriter for IOxExprRewriter { - fn mutate(&mut self, expr: Expr) -> Result<Expr> { - match expr { - Expr::BinaryExpr(BinaryExpr { left, op, right }) - if is_case(&left) && is_comparison(op) => - { - Ok(inline_case(true, *left, *right, op)) - } - Expr::BinaryExpr(BinaryExpr { left, op, right }) - if is_case(&right) && is_comparison(op) => - { - Ok(inline_case(false, *left, *right, op)) - } - expr => Ok(expr), - } - } -} - fn inline_case(case_on_left: bool, left: Expr, right: Expr, op: Operator) -> Expr { let (when_then_expr, else_expr, other) = match (case_on_left, left, right) { ( @@ -177,15 +212,6 @@ fn inline_case(case_on_left: bool, left: Expr, right: Expr, op: Operator) -> Exp }) } -/// see docs on [simplify_predicate] -struct IOxPredicateRewriter {} - -impl IOxPredicateRewriter { - fn new() -> Self { - Self {} - } -} - /// returns the column name for a column expression fn is_col(expr: &Expr) -> Option<&str> { if let Expr::Column(c) = &expr { @@ -226,61 +252,6 @@ fn is_col_op_lit(expr: &Expr) -> Option<&str> { } } -impl ExprRewriter for IOxPredicateRewriter { - fn mutate(&mut self, expr: Expr) -> Result<Expr> { - // look for this structure: - // - // NOT(col IS NULL) AND col = 'foo' - // - // and replace it with - // - // col = 'foo' - // - // Proof: - // Case 1: col is NULL - // - // not (NULL IS NULL) AND col = 'foo' - // not (true) AND NULL = 'foo' - // NULL - // - // Case 2: col is not NULL and not equal to 'foo' - // not (false) AND false - // true AND false - // false - // - // Case 3: col is not NULL and equal to 'foo' - // not (false) AND true - // true AND true - // true - match expr { - Expr::BinaryExpr(BinaryExpr { - left, - op: Operator::And, - right, - }) => { - if let (Some(coll), Some(colr)) = (is_col_not_null(&left), is_col_op_lit(&right)) { - if colr == coll { - return Ok(*right); - } - } else if let (Some(coll), Some(colr)) = - (is_col_op_lit(&left), is_col_not_null(&right)) - { - if colr == coll { - return Ok(*left); - } - }; - - Ok(Expr::BinaryExpr(BinaryExpr { - left, - op: Operator::And, - right, - })) - } - expr => Ok(expr), - } - } -} - #[cfg(test)] mod tests { use std::ops::Add; @@ -299,7 +270,7 @@ mod tests { .eq(lit("case2")); let expected = expr.clone(); - assert_eq!(expected, rewrite(expr).unwrap()); + assert_eq!(expected, iox_expr_rewrite(expr).unwrap()); } #[test] @@ -314,7 +285,7 @@ mod tests { col("tag").eq(lit("bar")), ); - assert_eq!(expected, rewrite(expr).unwrap()); + assert_eq!(expected, iox_expr_rewrite(expr).unwrap()); } #[test] @@ -331,7 +302,7 @@ mod tests { lit("bar").eq(col("tag")), ); - assert_eq!(expected, rewrite(expr).unwrap()); + assert_eq!(expected, iox_expr_rewrite(expr).unwrap()); } #[test] @@ -358,7 +329,7 @@ mod tests { )), ); - assert_eq!(expected, rewrite(expr).unwrap()); + assert_eq!(expected, iox_expr_rewrite(expr).unwrap()); } #[test] @@ -404,7 +375,7 @@ mod tests { expr.clone() }; - assert_eq!(expected, rewrite(expr).unwrap()); + assert_eq!(expected, iox_expr_rewrite(expr).unwrap()); } #[test] @@ -434,7 +405,7 @@ mod tests { .otherwise(lit("WTF?").eq(lit("is null"))) .unwrap(); - assert_eq!(expected, rewrite(expr).unwrap()); + assert_eq!(expected, iox_expr_rewrite(expr).unwrap()); } #[test] @@ -450,7 +421,7 @@ mod tests { .add(lit(1)); let expected = expr.clone(); - assert_eq!(expected, rewrite(expr).unwrap()); + assert_eq!(expected, iox_expr_rewrite(expr).unwrap()); } fn make_case(when_expr: Expr, then_expr: Expr, otherwise_expr: Expr) -> Expr { diff --git a/predicate/src/rpc_predicate/value_rewrite.rs b/predicate/src/rpc_predicate/value_rewrite.rs index ea2bc6b582..d91946ae4c 100644 --- a/predicate/src/rpc_predicate/value_rewrite.rs +++ b/predicate/src/rpc_predicate/value_rewrite.rs @@ -1,5 +1,5 @@ +use datafusion::common::tree_node::{TreeNode, TreeNodeRewriter}; use datafusion::error::Result as DataFusionResult; -use datafusion::logical_expr::expr_rewriter::{ExprRewritable, ExprRewriter}; use datafusion::prelude::{lit, Expr}; use crate::ValueExpr; @@ -19,7 +19,9 @@ struct FieldValueRewriter<'a> { value_exprs: &'a mut Vec<ValueExpr>, } -impl<'a> ExprRewriter for FieldValueRewriter<'a> { +impl<'a> TreeNodeRewriter for FieldValueRewriter<'a> { + type N = Expr; + fn mutate(&mut self, expr: Expr) -> DataFusionResult<Expr> { // try and convert Expr into a ValueExpr match expr.try_into() { diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index b740605c59..4e182cd5be 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -18,6 +18,7 @@ license.workspace = true [dependencies] ahash = { version = "0.8", default-features = false, features = ["runtime-rng"] } arrow = { version = "34", features = ["dyn_cmp_dict", "prettyprint"] } +arrow-array = { version = "34", default-features = false, features = ["chrono-tz"] } arrow-flight = { version = "34", features = ["flight-sql-experimental"] } arrow-ord = { version = "34", default-features = false, features = ["dyn_cmp_dict"] } arrow-string = { version = "34", default-features = false, features = ["dyn_cmp_dict"] } @@ -29,9 +30,9 @@ bytes = { version = "1" } chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "serde"] } crossbeam-utils = { version = "0.8" } crypto-common = { version = "0.1", default-features = false, features = ["std"] } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74c3955db48f7ef6458125100eed3999512a56ba" } -datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74c3955db48f7ef6458125100eed3999512a56ba", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] } -datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "74c3955db48f7ef6458125100eed3999512a56ba", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "f30671760285f242950437c3c0f520ef418c1068" } +datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "f30671760285f242950437c3c0f520ef418c1068", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] } +datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "f30671760285f242950437c3c0f520ef418c1068", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] } digest = { version = "0.10", features = ["mac", "std"] } either = { version = "1" } fixedbitset = { version = "0.4" }
1b882e00626c7f319bbdc1496f83b6161d3a6e65
Andrew Lamb
2023-01-23 23:23:34
`error arrow/ipc: could not read message schema: EOF` (#6668)
* chore: Test for schema from query * fix: Send schema even for no RecordBatches * fix: docs
null
fix: `error arrow/ipc: could not read message schema: EOF` (#6668) * chore: Test for schema from query * fix: Send schema even for no RecordBatches * fix: docs
diff --git a/influxdb_iox/tests/end_to_end_cases/querier.rs b/influxdb_iox/tests/end_to_end_cases/querier.rs index 9df2c3c8a3..2280561945 100644 --- a/influxdb_iox/tests/end_to_end_cases/querier.rs +++ b/influxdb_iox/tests/end_to_end_cases/querier.rs @@ -5,7 +5,7 @@ use std::time::Duration; use arrow_util::assert_batches_sorted_eq; use assert_cmd::{assert::Assert, Command}; -use futures::{FutureExt, TryStreamExt}; +use futures::{FutureExt, StreamExt, TryStreamExt}; use generated_types::{ aggregate::AggregateType, read_group_request::Group, read_response::frame::Data, }; @@ -61,6 +61,57 @@ mod with_kafka { .await } + #[tokio::test] + async fn basic_empty() { + test_helpers::maybe_start_logging(); + let database_url = maybe_skip_integration!(); + + let table_name = "the_table"; + + // Set up the cluster ==================================== + let mut cluster = MiniCluster::create_shared(database_url).await; + + StepTest::new( + &mut cluster, + vec![ + Step::WriteLineProtocol(format!( + "{},tag1=A,tag2=B val=42i 123456\n\ + {},tag1=A,tag2=C val=43i 123457", + table_name, table_name + )), + Step::WaitForReadable, + Step::AssertNotPersisted, + Step::Custom(Box::new(move |state: &mut StepTestState| { + async move { + // query returns no results + let sql = format!("select * from {} where time > '2023-01-12'", table_name); + let querier_connection = + state.cluster().querier().querier_grpc_connection(); + let namespace = state.cluster().namespace(); + + let mut client = + influxdb_iox_client::flight::Client::new(querier_connection); + + let result_stream = client.sql(namespace.into(), sql).await.unwrap(); + + let mut flight_stream = result_stream.into_inner(); + + // no data is returned + assert!(flight_stream.next().await.is_none()); + + // even though there are no results, we should have still got the schema + // otherwise other clients may complain + // https://github.com/influxdata/influxdb_iox/pull/6668 + assert!(flight_stream.got_schema()); + } + .boxed() + })), + ], + ) + .run() + .await + } + #[tokio::test] async fn basic_on_parquet() { test_helpers::maybe_start_logging(); @@ -965,6 +1016,71 @@ mod kafkaless_rpc_write { .await } + #[tokio::test] + async fn basic_empty() { + test_helpers::maybe_start_logging(); + let database_url = maybe_skip_integration!(); + + let table_name = "the_table"; + + // Set up the cluster ==================================== + let ingester_config = TestConfig::new_ingester2(&database_url); + let router_config = TestConfig::new_router2(&ingester_config); + // specially create a querier2 config that is NOT connected to the ingester2 + let querier_config = TestConfig::new_querier2_without_ingester2(&ingester_config); + + let mut cluster = MiniCluster::new() + .with_ingester(ingester_config) + .await + .with_router(router_config) + .await + .with_querier(querier_config) + .await; + + StepTest::new( + &mut cluster, + vec![ + Step::RecordNumParquetFiles, + Step::WriteLineProtocol(format!( + "{},tag1=A,tag2=B val=42i 123456\n\ + {},tag1=A,tag2=C val=43i 123457", + table_name, table_name + )), + // Wait for data to be persisted to parquet + Step::WaitForPersisted2 { + expected_increase: 1, + }, + Step::Custom(Box::new(move |state: &mut StepTestState| { + async move { + // query returns no results + let sql = format!("select * from {} where time > '2023-01-12'", table_name); + let querier_connection = + state.cluster().querier().querier_grpc_connection(); + let namespace = state.cluster().namespace(); + + let mut client = + influxdb_iox_client::flight::Client::new(querier_connection); + + let result_stream = client.sql(namespace.into(), sql).await.unwrap(); + + let mut flight_stream = result_stream.into_inner(); + + // no data is returned + assert!(flight_stream.next().await.is_none()); + + // even though there are no results, we should have still got the schema + // otherwise other clients may complain + // https://github.com/influxdata/influxdb_iox/pull/6668 + assert!(flight_stream.got_schema()); + } + .boxed() + })), + ], + ) + .run() + .await + } + #[tokio::test] async fn basic_no_ingester_connection() { test_helpers::maybe_start_logging(); diff --git a/service_grpc_flight/src/lib.rs b/service_grpc_flight/src/lib.rs index 02aef49c4f..9724f2a9fb 100644 --- a/service_grpc_flight/src/lib.rs +++ b/service_grpc_flight/src/lib.rs @@ -2,7 +2,10 @@ mod request; -use arrow::error::ArrowError; +use arrow::{ + datatypes::SchemaRef, error::ArrowError, ipc::writer::IpcWriteOptions, + record_batch::RecordBatch, +}; use arrow_flight::{ encode::{FlightDataEncoder, FlightDataEncoderBuilder}, error::FlightError, @@ -10,8 +13,9 @@ use arrow_flight::{ flight_service_server::{FlightService as Flight, FlightServiceServer as FlightServer}, sql::{CommandStatementQuery, ProstMessageExt}, Action, ActionType, Criteria, Empty, FlightData, FlightDescriptor, FlightEndpoint, FlightInfo, - HandshakeRequest, HandshakeResponse, PutResult, SchemaResult, Ticket, + HandshakeRequest, HandshakeResponse, PutResult, SchemaAsIpc, SchemaResult, Ticket, }; +use bytes::Bytes; use data_types::NamespaceNameError; use datafusion::{error::DataFusionError, physical_plan::ExecutionPlan}; use futures::{ready, Stream, StreamExt, TryStreamExt}; @@ -463,7 +467,7 @@ where /// Wrapper over a FlightDataEncodeStream that adds IOx specfic /// metadata and records completion struct GetStream { - inner: FlightDataEncoder, + inner: IOxFlightDataEncoder, #[allow(dead_code)] permit: InstrumentedAsyncOwnedSemaphorePermit, query_completed_token: QueryCompletedToken, @@ -480,6 +484,8 @@ impl GetStream { ) -> Result<Self, tonic::Status> { let app_metadata = proto::AppMetadata {}; + let schema = physical_plan.schema(); + let query_results = ctx .execute_stream(Arc::clone(&physical_plan)) .await @@ -489,7 +495,7 @@ impl GetStream { .map_err(FlightError::from); // setup inner stream - let inner = FlightDataEncoderBuilder::new() + let inner = IOxFlightDataEncoderBuilder::new(schema) .with_metadata(app_metadata.encode_to_vec().into()) .build(query_results); @@ -502,6 +508,94 @@ impl GetStream { } } +/// workaround for <https://github.com/apache/arrow-rs/issues/3591> +/// +/// data encoder stream that always sends a Schema message even if the +/// underlying stream is empty +struct IOxFlightDataEncoder { + inner: FlightDataEncoder, + // The schema of the inner stream. Set to None when a schema + // message has been sent. + schema: Option<SchemaRef>, + done: bool, +} + +impl IOxFlightDataEncoder { + fn new(inner: FlightDataEncoder, schema: SchemaRef) -> Self { + Self { + inner, + schema: Some(schema), + done: false, + } + } +} + +#[derive(Debug)] +struct IOxFlightDataEncoderBuilder { + inner: FlightDataEncoderBuilder, + schema: SchemaRef, +} + +impl IOxFlightDataEncoderBuilder { + fn new(schema: SchemaRef) -> Self { + Self { + inner: FlightDataEncoderBuilder::new(), + schema, + } + } + + pub fn with_metadata(mut self, app_metadata: Bytes) -> Self { + self.inner = self.inner.with_metadata(app_metadata); + self + } + + pub fn build<S>(self, input: S) -> IOxFlightDataEncoder + where + S: Stream<Item = arrow_flight::error::Result<RecordBatch>> + Send + 'static, + { + let Self { inner, schema } = self; + + IOxFlightDataEncoder::new(inner.build(input), schema) + } +} + +impl Stream for IOxFlightDataEncoder { + type Item = arrow_flight::error::Result<FlightData>; + + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> Poll<Option<Self::Item>> { + loop { + if self.done { + return Poll::Ready(None); + } + + let res = ready!(self.inner.poll_next_unpin(cx)); + match res { + None => { + self.done = true; + // return a schema message if we haven't sent any data + if let Some(schema) = self.schema.take() { + let options = IpcWriteOptions::default(); + let data: FlightData = SchemaAsIpc::new(schema.as_ref(), &options).into(); + return Poll::Ready(Some(Ok(data))); + } + } + Some(Ok(data)) => { + // If any data is returned from the underlying stream no need to resend schema + self.schema = None; + return Poll::Ready(Some(Ok(data))); + } + Some(Err(e)) => { + self.done = true; + return Poll::Ready(Some(Err(e))); + } + } + } + } +} + impl Stream for GetStream { type Item = Result<FlightData, tonic::Status>; @@ -533,7 +627,7 @@ impl Stream for GetStream { } } -// TODO remove this after +// TODO remove this when this is released: // https://github.com/apache/arrow-rs/issues/3566 fn flight_error_to_status(e: FlightError) -> tonic::Status { // Special error code translation logic for finding root of chain:
63de1a3bc8f52c7005c87f03381a9816dc373847
Dom Dwyer
2023-05-17 14:03:31
use "tag" instead of "column"
I was going back and forth on this, but the MVP is tags only. If we expand it to be the more general "columns" in the future, we can change the proto to reflect the more generalised implementation and have a more descriptive field name now!
null
refactor(proto): use "tag" instead of "column" I was going back and forth on this, but the MVP is tags only. If we expand it to be the more general "columns" in the future, we can change the proto to reflect the more generalised implementation and have a more descriptive field name now!
diff --git a/generated_types/protos/influxdata/iox/partition_template/v1/template.proto b/generated_types/protos/influxdata/iox/partition_template/v1/template.proto index 9487e8228e..61d7f89835 100644 --- a/generated_types/protos/influxdata/iox/partition_template/v1/template.proto +++ b/generated_types/protos/influxdata/iox/partition_template/v1/template.proto @@ -12,10 +12,7 @@ message PartitionTemplate { // // For example, given the following template: // - // [ - // TemplatePart::time_format("%Y.%j") - // TemplatePart::column_value("region") - // ] + // [ TemplatePart::time_format("%Y.%j") TemplatePart::tag_value("region") ] // // The below example rows would have the specified partition key derived: // @@ -29,12 +26,12 @@ message PartitionTemplate { // A sub-part of a PartitionTemplate. message TemplatePart { oneof part { - // A column value matcher extracts a string value from the column with the + // A tag value matcher extracts a string value from the tag with the // specified name. // - // If a row does not contain the specified column, the provided column name - // is rendered instead of the (missing) value. - string column_value = 1; + // If a row does not contain the specified tag, the provided tag name is + // rendered instead of the (missing) value. + string tag_value = 1; // A time format matcher accepts a "strftime"-like format string and // evaluates it against the "time" column.
2ed5758ddba42f0626e9f8d2b38d3d99e9aa4d78
Stuart Carnie
2023-02-27 11:50:06
InfluxQL planner learns how to project multiple measurements (#7063)
* feat: Planner learns how to project multiple measurements Closes #6896 * chore: Update test * chore: PR feedback
null
feat: InfluxQL planner learns how to project multiple measurements (#7063) * feat: Planner learns how to project multiple measurements Closes #6896 * chore: Update test * chore: PR feedback
diff --git a/influxdb_iox/tests/end_to_end_cases/influxql.rs b/influxdb_iox/tests/end_to_end_cases/influxql.rs index 2345ec8ff1..f9f8da43ab 100644 --- a/influxdb_iox/tests/end_to_end_cases/influxql.rs +++ b/influxdb_iox/tests/end_to_end_cases/influxql.rs @@ -50,12 +50,12 @@ async fn influxql_select_returns_results() { Step::InfluxQLQuery { query: format!("select tag1, val from {table_name}"), expected: vec![ - "+--------------------------------+------+-----+", - "| time | tag1 | val |", - "+--------------------------------+------+-----+", - "| 1970-01-01T00:00:00.000123456Z | A | 42 |", - "| 1970-01-01T00:00:00.000123457Z | A | 43 |", - "+--------------------------------+------+-----+", + "+------------------+--------------------------------+------+-----+", + "| iox::measurement | time | tag1 | val |", + "+------------------+--------------------------------+------+-----+", + "| the_table | 1970-01-01T00:00:00.000123456Z | A | 42 |", + "| the_table | 1970-01-01T00:00:00.000123457Z | A | 43 |", + "+------------------+--------------------------------+------+-----+", ], }, ], diff --git a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql index 408b63661f..30a42f17de 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql +++ b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql @@ -10,6 +10,9 @@ -- IOX_COMPARE: sorted SELECT * FROM m0; +-- No matching measurement +SELECT * FROM non_existent; + -- Projection wildcard, only tags -- IOX_COMPARE: sorted SELECT *::tag, f64 FROM m0; @@ -250,3 +253,9 @@ SELECT tag0, i64, i64 * 0.5, i64 + f64::integer, i64 & 1 FROM m0 WHERE f64 > 19; -- non-existing column SELECT f64, non_existing, f64 + non_existing FROM m0 WHERE f64 > 19; + +-- +-- Multiple measurements in the FROM clause +-- + +SELECT usage_idle, bytes_used FROM cpu, disk; \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected index 9818274054..b4198d805e 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected @@ -1,367 +1,370 @@ -- Test Setup: InfluxQLSelectSupport -- InfluxQL: SELECT * FROM m0; -- Results After Sorting -+----------------------+------+-----+-----+-------+-------+ -| time | f64 | i64 | str | tag0 | tag1 | -+----------------------+------+-----+-----+-------+-------+ -| 2022-10-31T02:00:00Z | 10.1 | 101 | hi | val00 | | -| 2022-10-31T02:00:00Z | 10.4 | 101 | lo | val02 | | -| 2022-10-31T02:00:00Z | 11.3 | 211 | lo | val01 | | -| 2022-10-31T02:00:10Z | 18.9 | 211 | lo | val00 | val10 | -| 2022-10-31T02:00:10Z | 21.2 | 211 | hi | val00 | | -| 2022-10-31T02:00:20Z | 11.2 | 191 | lo | val00 | | -| 2022-10-31T02:00:30Z | 19.2 | 392 | lo | val00 | | -+----------------------+------+-----+-----+-------+-------+ ++------------------+----------------------+------+-----+-----+-------+-------+ +| iox::measurement | time | f64 | i64 | str | tag0 | tag1 | ++------------------+----------------------+------+-----+-----+-------+-------+ +| m0 | 2022-10-31T02:00:00Z | 10.1 | 101 | hi | val00 | | +| m0 | 2022-10-31T02:00:00Z | 10.4 | 101 | lo | val02 | | +| m0 | 2022-10-31T02:00:00Z | 11.3 | 211 | lo | val01 | | +| m0 | 2022-10-31T02:00:10Z | 18.9 | 211 | lo | val00 | val10 | +| m0 | 2022-10-31T02:00:10Z | 21.2 | 211 | hi | val00 | | +| m0 | 2022-10-31T02:00:20Z | 11.2 | 191 | lo | val00 | | +| m0 | 2022-10-31T02:00:30Z | 19.2 | 392 | lo | val00 | | ++------------------+----------------------+------+-----+-----+-------+-------+ +-- InfluxQL: SELECT * FROM non_existent; +++ +++ -- InfluxQL: SELECT *::tag, f64 FROM m0; -- Results After Sorting -+----------------------+-------+-------+------+ -| time | tag0 | tag1 | f64 | -+----------------------+-------+-------+------+ -| 2022-10-31T02:00:00Z | val00 | | 10.1 | -| 2022-10-31T02:00:00Z | val01 | | 11.3 | -| 2022-10-31T02:00:00Z | val02 | | 10.4 | -| 2022-10-31T02:00:10Z | val00 | val10 | 18.9 | -| 2022-10-31T02:00:10Z | val00 | | 21.2 | -| 2022-10-31T02:00:20Z | val00 | | 11.2 | -| 2022-10-31T02:00:30Z | val00 | | 19.2 | -+----------------------+-------+-------+------+ ++------------------+----------------------+-------+-------+------+ +| iox::measurement | time | tag0 | tag1 | f64 | ++------------------+----------------------+-------+-------+------+ +| m0 | 2022-10-31T02:00:00Z | val00 | | 10.1 | +| m0 | 2022-10-31T02:00:00Z | val01 | | 11.3 | +| m0 | 2022-10-31T02:00:00Z | val02 | | 10.4 | +| m0 | 2022-10-31T02:00:10Z | val00 | val10 | 18.9 | +| m0 | 2022-10-31T02:00:10Z | val00 | | 21.2 | +| m0 | 2022-10-31T02:00:20Z | val00 | | 11.2 | +| m0 | 2022-10-31T02:00:30Z | val00 | | 19.2 | ++------------------+----------------------+-------+-------+------+ -- InfluxQL: SELECT *::field FROM m0; -- Results After Sorting -+----------------------+------+-----+-----+ -| time | f64 | i64 | str | -+----------------------+------+-----+-----+ -| 2022-10-31T02:00:00Z | 10.1 | 101 | hi | -| 2022-10-31T02:00:00Z | 10.4 | 101 | lo | -| 2022-10-31T02:00:00Z | 11.3 | 211 | lo | -| 2022-10-31T02:00:10Z | 18.9 | 211 | lo | -| 2022-10-31T02:00:10Z | 21.2 | 211 | hi | -| 2022-10-31T02:00:20Z | 11.2 | 191 | lo | -| 2022-10-31T02:00:30Z | 19.2 | 392 | lo | -+----------------------+------+-----+-----+ ++------------------+----------------------+------+-----+-----+ +| iox::measurement | time | f64 | i64 | str | ++------------------+----------------------+------+-----+-----+ +| m0 | 2022-10-31T02:00:00Z | 10.1 | 101 | hi | +| m0 | 2022-10-31T02:00:00Z | 10.4 | 101 | lo | +| m0 | 2022-10-31T02:00:00Z | 11.3 | 211 | lo | +| m0 | 2022-10-31T02:00:10Z | 18.9 | 211 | lo | +| m0 | 2022-10-31T02:00:10Z | 21.2 | 211 | hi | +| m0 | 2022-10-31T02:00:20Z | 11.2 | 191 | lo | +| m0 | 2022-10-31T02:00:30Z | 19.2 | 392 | lo | ++------------------+----------------------+------+-----+-----+ -- InfluxQL: SELECT /64|tag0/ FROM m0; -- Results After Sorting -+----------------------+------+-----+-------+ -| time | f64 | i64 | tag0 | -+----------------------+------+-----+-------+ -| 2022-10-31T02:00:00Z | 10.1 | 101 | val00 | -| 2022-10-31T02:00:00Z | 10.4 | 101 | val02 | -| 2022-10-31T02:00:00Z | 11.3 | 211 | val01 | -| 2022-10-31T02:00:10Z | 18.9 | 211 | val00 | -| 2022-10-31T02:00:10Z | 21.2 | 211 | val00 | -| 2022-10-31T02:00:20Z | 11.2 | 191 | val00 | -| 2022-10-31T02:00:30Z | 19.2 | 392 | val00 | -+----------------------+------+-----+-------+ ++------------------+----------------------+------+-----+-------+ +| iox::measurement | time | f64 | i64 | tag0 | ++------------------+----------------------+------+-----+-------+ +| m0 | 2022-10-31T02:00:00Z | 10.1 | 101 | val00 | +| m0 | 2022-10-31T02:00:00Z | 10.4 | 101 | val02 | +| m0 | 2022-10-31T02:00:00Z | 11.3 | 211 | val01 | +| m0 | 2022-10-31T02:00:10Z | 18.9 | 211 | val00 | +| m0 | 2022-10-31T02:00:10Z | 21.2 | 211 | val00 | +| m0 | 2022-10-31T02:00:20Z | 11.2 | 191 | val00 | +| m0 | 2022-10-31T02:00:30Z | 19.2 | 392 | val00 | ++------------------+----------------------+------+-----+-------+ -- InfluxQL: SELECT f64, tag0 FROM m0; -- Results After Sorting -+----------------------+------+-------+ -| time | f64 | tag0 | -+----------------------+------+-------+ -| 2022-10-31T02:00:00Z | 10.1 | val00 | -| 2022-10-31T02:00:00Z | 10.4 | val02 | -| 2022-10-31T02:00:00Z | 11.3 | val01 | -| 2022-10-31T02:00:10Z | 18.9 | val00 | -| 2022-10-31T02:00:10Z | 21.2 | val00 | -| 2022-10-31T02:00:20Z | 11.2 | val00 | -| 2022-10-31T02:00:30Z | 19.2 | val00 | -+----------------------+------+-------+ ++------------------+----------------------+------+-------+ +| iox::measurement | time | f64 | tag0 | ++------------------+----------------------+------+-------+ +| m0 | 2022-10-31T02:00:00Z | 10.1 | val00 | +| m0 | 2022-10-31T02:00:00Z | 10.4 | val02 | +| m0 | 2022-10-31T02:00:00Z | 11.3 | val01 | +| m0 | 2022-10-31T02:00:10Z | 18.9 | val00 | +| m0 | 2022-10-31T02:00:10Z | 21.2 | val00 | +| m0 | 2022-10-31T02:00:20Z | 11.2 | val00 | +| m0 | 2022-10-31T02:00:30Z | 19.2 | val00 | ++------------------+----------------------+------+-------+ -- InfluxQL: SELECT f64, tag0, time FROM m0; -- Results After Sorting -+------+-------+----------------------+ -| f64 | tag0 | time | -+------+-------+----------------------+ -| 10.1 | val00 | 2022-10-31T02:00:00Z | -| 10.4 | val02 | 2022-10-31T02:00:00Z | -| 11.2 | val00 | 2022-10-31T02:00:20Z | -| 11.3 | val01 | 2022-10-31T02:00:00Z | -| 18.9 | val00 | 2022-10-31T02:00:10Z | -| 19.2 | val00 | 2022-10-31T02:00:30Z | -| 21.2 | val00 | 2022-10-31T02:00:10Z | -+------+-------+----------------------+ ++------------------+------+-------+----------------------+ +| iox::measurement | f64 | tag0 | time | ++------------------+------+-------+----------------------+ +| m0 | 10.1 | val00 | 2022-10-31T02:00:00Z | +| m0 | 10.4 | val02 | 2022-10-31T02:00:00Z | +| m0 | 11.2 | val00 | 2022-10-31T02:00:20Z | +| m0 | 11.3 | val01 | 2022-10-31T02:00:00Z | +| m0 | 18.9 | val00 | 2022-10-31T02:00:10Z | +| m0 | 19.2 | val00 | 2022-10-31T02:00:30Z | +| m0 | 21.2 | val00 | 2022-10-31T02:00:10Z | ++------------------+------+-------+----------------------+ -- InfluxQL: SELECT f64, f64 * 2, i64, i64 + i64 FROM m0; -- Results After Sorting -+----------------------+------+-------+-----+---------+ -| time | f64 | f64_1 | i64 | i64_i64 | -+----------------------+------+-------+-----+---------+ -| 2022-10-31T02:00:00Z | 10.1 | 20.2 | 101 | 202 | -| 2022-10-31T02:00:00Z | 10.4 | 20.8 | 101 | 202 | -| 2022-10-31T02:00:00Z | 11.3 | 22.6 | 211 | 422 | -| 2022-10-31T02:00:10Z | 18.9 | 37.8 | 211 | 422 | -| 2022-10-31T02:00:10Z | 21.2 | 42.4 | 211 | 422 | -| 2022-10-31T02:00:20Z | 11.2 | 22.4 | 191 | 382 | -| 2022-10-31T02:00:30Z | 19.2 | 38.4 | 392 | 784 | -+----------------------+------+-------+-----+---------+ ++------------------+----------------------+------+-------+-----+---------+ +| iox::measurement | time | f64 | f64_1 | i64 | i64_i64 | ++------------------+----------------------+------+-------+-----+---------+ +| m0 | 2022-10-31T02:00:00Z | 10.1 | 20.2 | 101 | 202 | +| m0 | 2022-10-31T02:00:00Z | 10.4 | 20.8 | 101 | 202 | +| m0 | 2022-10-31T02:00:00Z | 11.3 | 22.6 | 211 | 422 | +| m0 | 2022-10-31T02:00:10Z | 18.9 | 37.8 | 211 | 422 | +| m0 | 2022-10-31T02:00:10Z | 21.2 | 42.4 | 211 | 422 | +| m0 | 2022-10-31T02:00:20Z | 11.2 | 22.4 | 191 | 382 | +| m0 | 2022-10-31T02:00:30Z | 19.2 | 38.4 | 392 | 784 | ++------------------+----------------------+------+-------+-----+---------+ -- InfluxQL: SELECT i64, i64 & 1 FROM m0; -- Results After Sorting -+----------------------+-----+-------+ -| time | i64 | i64_1 | -+----------------------+-----+-------+ -| 2022-10-31T02:00:00Z | 101 | 1 | -| 2022-10-31T02:00:00Z | 101 | 1 | -| 2022-10-31T02:00:00Z | 211 | 1 | -| 2022-10-31T02:00:10Z | 211 | 1 | -| 2022-10-31T02:00:10Z | 211 | 1 | -| 2022-10-31T02:00:20Z | 191 | 1 | -| 2022-10-31T02:00:30Z | 392 | 0 | -+----------------------+-----+-------+ ++------------------+----------------------+-----+-------+ +| iox::measurement | time | i64 | i64_1 | ++------------------+----------------------+-----+-------+ +| m0 | 2022-10-31T02:00:00Z | 101 | 1 | +| m0 | 2022-10-31T02:00:00Z | 101 | 1 | +| m0 | 2022-10-31T02:00:00Z | 211 | 1 | +| m0 | 2022-10-31T02:00:10Z | 211 | 1 | +| m0 | 2022-10-31T02:00:10Z | 211 | 1 | +| m0 | 2022-10-31T02:00:20Z | 191 | 1 | +| m0 | 2022-10-31T02:00:30Z | 392 | 0 | ++------------------+----------------------+-----+-------+ -- InfluxQL: SELECT f64 + i64 FROM m0; -- Results After Sorting -+----------------------+---------+ -| time | f64_i64 | -+----------------------+---------+ -| 2022-10-31T02:00:00Z | 111.1 | -| 2022-10-31T02:00:00Z | 111.4 | -| 2022-10-31T02:00:00Z | 222.3 | -| 2022-10-31T02:00:10Z | 229.9 | -| 2022-10-31T02:00:10Z | 232.2 | -| 2022-10-31T02:00:20Z | 202.2 | -| 2022-10-31T02:00:30Z | 411.2 | -+----------------------+---------+ ++------------------+----------------------+---------+ +| iox::measurement | time | f64_i64 | ++------------------+----------------------+---------+ +| m0 | 2022-10-31T02:00:00Z | 111.1 | +| m0 | 2022-10-31T02:00:00Z | 111.4 | +| m0 | 2022-10-31T02:00:00Z | 222.3 | +| m0 | 2022-10-31T02:00:10Z | 229.9 | +| m0 | 2022-10-31T02:00:10Z | 232.2 | +| m0 | 2022-10-31T02:00:20Z | 202.2 | +| m0 | 2022-10-31T02:00:30Z | 411.2 | ++------------------+----------------------+---------+ -- InfluxQL: SELECT f64, f64::integer FROM m0; -+----------------------+------+-------+ -| time | f64 | f64_1 | -+----------------------+------+-------+ -| 2022-10-31T02:00:00Z | 10.1 | 10 | -| 2022-10-31T02:00:00Z | 11.3 | 11 | -| 2022-10-31T02:00:00Z | 10.4 | 10 | -| 2022-10-31T02:00:10Z | 21.2 | 21 | -| 2022-10-31T02:00:10Z | 18.9 | 18 | -| 2022-10-31T02:00:20Z | 11.2 | 11 | -| 2022-10-31T02:00:30Z | 19.2 | 19 | -+----------------------+------+-------+ ++------------------+----------------------+------+-------+ +| iox::measurement | time | f64 | f64_1 | ++------------------+----------------------+------+-------+ +| m0 | 2022-10-31T02:00:00Z | 10.1 | 10 | +| m0 | 2022-10-31T02:00:00Z | 11.3 | 11 | +| m0 | 2022-10-31T02:00:00Z | 10.4 | 10 | +| m0 | 2022-10-31T02:00:10Z | 21.2 | 21 | +| m0 | 2022-10-31T02:00:10Z | 18.9 | 18 | +| m0 | 2022-10-31T02:00:20Z | 11.2 | 11 | +| m0 | 2022-10-31T02:00:30Z | 19.2 | 19 | ++------------------+----------------------+------+-------+ -- InfluxQL: SELECT f64 AS f64_2, f64, f64, f64 FROM m0 LIMIT 1; -- Results After Sorting -+----------------------+-------+------+-------+-------+ -| time | f64_2 | f64 | f64_1 | f64_3 | -+----------------------+-------+------+-------+-------+ -| 2022-10-31T02:00:00Z | 10.1 | 10.1 | 10.1 | 10.1 | -+----------------------+-------+------+-------+-------+ ++------------------+----------------------+-------+------+-------+-------+ +| iox::measurement | time | f64_2 | f64 | f64_1 | f64_3 | ++------------------+----------------------+-------+------+-------+-------+ +| m0 | 2022-10-31T02:00:00Z | 10.1 | 10.1 | 10.1 | 10.1 | ++------------------+----------------------+-------+------+-------+-------+ -- InfluxQL: SELECT tag0, f64 FROM m0 WHERE tag0 = 'val00'; -- Results After Sorting -+----------------------+-------+------+ -| time | tag0 | f64 | -+----------------------+-------+------+ -| 2022-10-31T02:00:00Z | val00 | 10.1 | -| 2022-10-31T02:00:10Z | val00 | 18.9 | -| 2022-10-31T02:00:10Z | val00 | 21.2 | -| 2022-10-31T02:00:20Z | val00 | 11.2 | -| 2022-10-31T02:00:30Z | val00 | 19.2 | -+----------------------+-------+------+ ++------------------+----------------------+-------+------+ +| iox::measurement | time | tag0 | f64 | ++------------------+----------------------+-------+------+ +| m0 | 2022-10-31T02:00:00Z | val00 | 10.1 | +| m0 | 2022-10-31T02:00:10Z | val00 | 18.9 | +| m0 | 2022-10-31T02:00:10Z | val00 | 21.2 | +| m0 | 2022-10-31T02:00:20Z | val00 | 11.2 | +| m0 | 2022-10-31T02:00:30Z | val00 | 19.2 | ++------------------+----------------------+-------+------+ -- InfluxQL: SELECT tag0, f64 FROM m0 WHERE tag0 =~ /^val0(1|2)/; -- Results After Sorting -+----------------------+-------+------+ -| time | tag0 | f64 | -+----------------------+-------+------+ -| 2022-10-31T02:00:00Z | val01 | 11.3 | -| 2022-10-31T02:00:00Z | val02 | 10.4 | -+----------------------+-------+------+ ++------------------+----------------------+-------+------+ +| iox::measurement | time | tag0 | f64 | ++------------------+----------------------+-------+------+ +| m0 | 2022-10-31T02:00:00Z | val01 | 11.3 | +| m0 | 2022-10-31T02:00:00Z | val02 | 10.4 | ++------------------+----------------------+-------+------+ -- InfluxQL: SELECT /tag(0|1)/, f64 FROM m0 WHERE tag0 = 'val00' AND tag1 = 'val10'; -- Results After Sorting -+----------------------+-------+-------+------+ -| time | tag0 | tag1 | f64 | -+----------------------+-------+-------+------+ -| 2022-10-31T02:00:10Z | val00 | val10 | 18.9 | -+----------------------+-------+-------+------+ ++------------------+----------------------+-------+-------+------+ +| iox::measurement | time | tag0 | tag1 | f64 | ++------------------+----------------------+-------+-------+------+ +| m0 | 2022-10-31T02:00:10Z | val00 | val10 | 18.9 | ++------------------+----------------------+-------+-------+------+ -- InfluxQL: SELECT /tag(0|1)/, f64 FROM m0 WHERE tag0 = 'val00' OR tag1 = 'val10'; -- Results After Sorting -+----------------------+-------+-------+------+ -| time | tag0 | tag1 | f64 | -+----------------------+-------+-------+------+ -| 2022-10-31T02:00:00Z | val00 | | 10.1 | -| 2022-10-31T02:00:10Z | val00 | val10 | 18.9 | -| 2022-10-31T02:00:10Z | val00 | | 21.2 | -| 2022-10-31T02:00:20Z | val00 | | 11.2 | -| 2022-10-31T02:00:30Z | val00 | | 19.2 | -+----------------------+-------+-------+------+ ++------------------+----------------------+-------+-------+------+ +| iox::measurement | time | tag0 | tag1 | f64 | ++------------------+----------------------+-------+-------+------+ +| m0 | 2022-10-31T02:00:00Z | val00 | | 10.1 | +| m0 | 2022-10-31T02:00:10Z | val00 | val10 | 18.9 | +| m0 | 2022-10-31T02:00:10Z | val00 | | 21.2 | +| m0 | 2022-10-31T02:00:20Z | val00 | | 11.2 | +| m0 | 2022-10-31T02:00:30Z | val00 | | 19.2 | ++------------------+----------------------+-------+-------+------+ -- InfluxQL: SELECT f64 FROM m0 WHERE f64 > 10 + 10; -- Results After Sorting -+----------------------+------+ -| time | f64 | -+----------------------+------+ -| 2022-10-31T02:00:10Z | 21.2 | -+----------------------+------+ ++------------------+----------------------+------+ +| iox::measurement | time | f64 | ++------------------+----------------------+------+ +| m0 | 2022-10-31T02:00:10Z | 21.2 | ++------------------+----------------------+------+ -- InfluxQL: SELECT i64 FROM m0 WHERE i64 & 1 = 0; -- Results After Sorting -+----------------------+-----+ -| time | i64 | -+----------------------+-----+ -| 2022-10-31T02:00:30Z | 392 | -+----------------------+-----+ ++------------------+----------------------+-----+ +| iox::measurement | time | i64 | ++------------------+----------------------+-----+ +| m0 | 2022-10-31T02:00:30Z | 392 | ++------------------+----------------------+-----+ -- InfluxQL: SELECT i64 FROM m0 WHERE time > '2022-10-31'; -- Results After Sorting -+----------------------+-----+ -| time | i64 | -+----------------------+-----+ -| 2022-10-31T02:00:00Z | 101 | -| 2022-10-31T02:00:00Z | 101 | -| 2022-10-31T02:00:00Z | 211 | -| 2022-10-31T02:00:10Z | 211 | -| 2022-10-31T02:00:10Z | 211 | -| 2022-10-31T02:00:20Z | 191 | -| 2022-10-31T02:00:30Z | 392 | -+----------------------+-----+ ++------------------+----------------------+-----+ +| iox::measurement | time | i64 | ++------------------+----------------------+-----+ +| m0 | 2022-10-31T02:00:00Z | 101 | +| m0 | 2022-10-31T02:00:00Z | 101 | +| m0 | 2022-10-31T02:00:00Z | 211 | +| m0 | 2022-10-31T02:00:10Z | 211 | +| m0 | 2022-10-31T02:00:10Z | 211 | +| m0 | 2022-10-31T02:00:20Z | 191 | +| m0 | 2022-10-31T02:00:30Z | 392 | ++------------------+----------------------+-----+ -- InfluxQL: SELECT i64 FROM m0 WHERE time > '2022-10-31 02:00:10'; -- Results After Sorting -+----------------------+-----+ -| time | i64 | -+----------------------+-----+ -| 2022-10-31T02:00:20Z | 191 | -| 2022-10-31T02:00:30Z | 392 | -+----------------------+-----+ ++------------------+----------------------+-----+ +| iox::measurement | time | i64 | ++------------------+----------------------+-----+ +| m0 | 2022-10-31T02:00:20Z | 191 | +| m0 | 2022-10-31T02:00:30Z | 392 | ++------------------+----------------------+-----+ -- InfluxQL: SELECT i64 FROM m0 WHERE time > now() - 100000d; -- Results After Sorting -+----------------------+-----+ -| time | i64 | -+----------------------+-----+ -| 2022-10-31T02:00:00Z | 101 | -| 2022-10-31T02:00:00Z | 101 | -| 2022-10-31T02:00:00Z | 211 | -| 2022-10-31T02:00:10Z | 211 | -| 2022-10-31T02:00:10Z | 211 | -| 2022-10-31T02:00:20Z | 191 | -| 2022-10-31T02:00:30Z | 392 | -+----------------------+-----+ ++------------------+----------------------+-----+ +| iox::measurement | time | i64 | ++------------------+----------------------+-----+ +| m0 | 2022-10-31T02:00:00Z | 101 | +| m0 | 2022-10-31T02:00:00Z | 101 | +| m0 | 2022-10-31T02:00:00Z | 211 | +| m0 | 2022-10-31T02:00:10Z | 211 | +| m0 | 2022-10-31T02:00:10Z | 211 | +| m0 | 2022-10-31T02:00:20Z | 191 | +| m0 | 2022-10-31T02:00:30Z | 392 | ++------------------+----------------------+-----+ -- InfluxQL: SELECT tag1, f64 FROM m0 WHERE tag1 != ''; -- Results After Sorting -+----------------------+-------+------+ -| time | tag1 | f64 | -+----------------------+-------+------+ -| 2022-10-31T02:00:10Z | val10 | 18.9 | -+----------------------+-------+------+ ++------------------+----------------------+-------+------+ +| iox::measurement | time | tag1 | f64 | ++------------------+----------------------+-------+------+ +| m0 | 2022-10-31T02:00:10Z | val10 | 18.9 | ++------------------+----------------------+-------+------+ -- InfluxQL: SELECT tag0, f64 FROM m0 LIMIT 1; -+----------------------+-------+------+ -| time | tag0 | f64 | -+----------------------+-------+------+ -| 2022-10-31T02:00:00Z | val00 | 10.1 | -+----------------------+-------+------+ ++------------------+----------------------+-------+------+ +| iox::measurement | time | tag0 | f64 | ++------------------+----------------------+-------+------+ +| m0 | 2022-10-31T02:00:00Z | val00 | 10.1 | ++------------------+----------------------+-------+------+ -- InfluxQL: SELECT tag0, f64 FROM m0 WHERE tag0 = 'val00' LIMIT 2 OFFSET 1; -+----------------------+-------+------+ -| time | tag0 | f64 | -+----------------------+-------+------+ -| 2022-10-31T02:00:10Z | val00 | 21.2 | -| 2022-10-31T02:00:10Z | val00 | 18.9 | -+----------------------+-------+------+ ++------------------+----------------------+-------+------+ +| iox::measurement | time | tag0 | f64 | ++------------------+----------------------+-------+------+ +| m0 | 2022-10-31T02:00:10Z | val00 | 21.2 | +| m0 | 2022-10-31T02:00:10Z | val00 | 18.9 | ++------------------+----------------------+-------+------+ -- InfluxQL: SELECT tag0, f64 FROM m0 LIMIT 1 OFFSET 1; -+----------------------+-------+------+ -| time | tag0 | f64 | -+----------------------+-------+------+ -| 2022-10-31T02:00:00Z | val01 | 11.3 | -+----------------------+-------+------+ ++------------------+----------------------+-------+------+ +| iox::measurement | time | tag0 | f64 | ++------------------+----------------------+-------+------+ +| m0 | 2022-10-31T02:00:00Z | val01 | 11.3 | ++------------------+----------------------+-------+------+ -- InfluxQL: SELECT * FROM m0; -+----------------------+------+-----+-----+-------+-------+ -| time | f64 | i64 | str | tag0 | tag1 | -+----------------------+------+-----+-----+-------+-------+ -| 2022-10-31T02:00:00Z | 10.1 | 101 | hi | val00 | | -| 2022-10-31T02:00:00Z | 11.3 | 211 | lo | val01 | | -| 2022-10-31T02:00:00Z | 10.4 | 101 | lo | val02 | | -| 2022-10-31T02:00:10Z | 21.2 | 211 | hi | val00 | | -| 2022-10-31T02:00:10Z | 18.9 | 211 | lo | val00 | val10 | -| 2022-10-31T02:00:20Z | 11.2 | 191 | lo | val00 | | -| 2022-10-31T02:00:30Z | 19.2 | 392 | lo | val00 | | -+----------------------+------+-----+-----+-------+-------+ ++------------------+----------------------+------+-----+-----+-------+-------+ +| iox::measurement | time | f64 | i64 | str | tag0 | tag1 | ++------------------+----------------------+------+-----+-----+-------+-------+ +| m0 | 2022-10-31T02:00:00Z | 10.1 | 101 | hi | val00 | | +| m0 | 2022-10-31T02:00:00Z | 11.3 | 211 | lo | val01 | | +| m0 | 2022-10-31T02:00:00Z | 10.4 | 101 | lo | val02 | | +| m0 | 2022-10-31T02:00:10Z | 18.9 | 211 | lo | val00 | val10 | +| m0 | 2022-10-31T02:00:10Z | 21.2 | 211 | hi | val00 | | +| m0 | 2022-10-31T02:00:20Z | 11.2 | 191 | lo | val00 | | +| m0 | 2022-10-31T02:00:30Z | 19.2 | 392 | lo | val00 | | ++------------------+----------------------+------+-----+-----+-------+-------+ -- InfluxQL: SELECT * FROM m0 ORDER BY time DESC; -+----------------------+------+-----+-----+-------+-------+ -| time | f64 | i64 | str | tag0 | tag1 | -+----------------------+------+-----+-----+-------+-------+ -| 2022-10-31T02:00:30Z | 19.2 | 392 | lo | val00 | | -| 2022-10-31T02:00:20Z | 11.2 | 191 | lo | val00 | | -| 2022-10-31T02:00:10Z | 21.2 | 211 | hi | val00 | | -| 2022-10-31T02:00:10Z | 18.9 | 211 | lo | val00 | val10 | -| 2022-10-31T02:00:00Z | 10.1 | 101 | hi | val00 | | -| 2022-10-31T02:00:00Z | 11.3 | 211 | lo | val01 | | -| 2022-10-31T02:00:00Z | 10.4 | 101 | lo | val02 | | -+----------------------+------+-----+-----+-------+-------+ ++------------------+----------------------+------+-----+-----+-------+-------+ +| iox::measurement | time | f64 | i64 | str | tag0 | tag1 | ++------------------+----------------------+------+-----+-----+-------+-------+ +| m0 | 2022-10-31T02:00:30Z | 19.2 | 392 | lo | val00 | | +| m0 | 2022-10-31T02:00:20Z | 11.2 | 191 | lo | val00 | | +| m0 | 2022-10-31T02:00:10Z | 18.9 | 211 | lo | val00 | val10 | +| m0 | 2022-10-31T02:00:10Z | 21.2 | 211 | hi | val00 | | +| m0 | 2022-10-31T02:00:00Z | 10.1 | 101 | hi | val00 | | +| m0 | 2022-10-31T02:00:00Z | 11.3 | 211 | lo | val01 | | +| m0 | 2022-10-31T02:00:00Z | 10.4 | 101 | lo | val02 | | ++------------------+----------------------+------+-----+-----+-------+-------+ -- InfluxQL: SELECT f64, abs(f64 * -1), sin(f64), cos(f64), tan(f64), asin(1/f64), acos(1/f64), atan(f64), atan2(f64, 2), exp(f64), ln(f64), log2(f64), log10(f64), sqrt(f64), pow(f64, 2), floor(f64), ceil(f64), round(f64) FROM m0 LIMIT 1; -+----------------------+------+------+---------------------+---------------------+--------------------+--------------------+-------------------+------------------+--------------------+-------------------+-------------------+--------------------+--------------------+--------------------+--------------------+-------+------+-------+ -| time | f64 | abs | sin | cos | tan | asin | acos | atan | atan2 | exp | ln | log2 | log10 | sqrt | pow | floor | ceil | round | -+----------------------+------+------+---------------------+---------------------+--------------------+--------------------+-------------------+------------------+--------------------+-------------------+-------------------+--------------------+--------------------+--------------------+--------------------+-------+------+-------+ -| 2022-10-31T02:00:00Z | 10.1 | 10.1 | -0.6250706488928821 | -0.7805681801691837 | 0.8007893029375109 | 0.0991723838059207 | 1.471623942988976 | 1.47210806614649 | 1.3753055265462157 | 24343.00942440838 | 2.312535423847214 | 3.3362833878644325 | 1.0043213737826426 | 3.1780497164141406 | 102.00999999999999 | 10.0 | 11.0 | 10.0 | -+----------------------+------+------+---------------------+---------------------+--------------------+--------------------+-------------------+------------------+--------------------+-------------------+-------------------+--------------------+--------------------+--------------------+--------------------+-------+------+-------+ ++------------------+----------------------+------+------+---------------------+---------------------+--------------------+--------------------+-------------------+------------------+--------------------+-------------------+-------------------+--------------------+--------------------+--------------------+--------------------+-------+------+-------+ +| iox::measurement | time | f64 | abs | sin | cos | tan | asin | acos | atan | atan2 | exp | ln | log2 | log10 | sqrt | pow | floor | ceil | round | ++------------------+----------------------+------+------+---------------------+---------------------+--------------------+--------------------+-------------------+------------------+--------------------+-------------------+-------------------+--------------------+--------------------+--------------------+--------------------+-------+------+-------+ +| m0 | 2022-10-31T02:00:00Z | 10.1 | 10.1 | -0.6250706488928821 | -0.7805681801691837 | 0.8007893029375109 | 0.0991723838059207 | 1.471623942988976 | 1.47210806614649 | 1.3753055265462157 | 24343.00942440838 | 2.312535423847214 | 3.3362833878644325 | 1.0043213737826426 | 3.1780497164141406 | 102.00999999999999 | 10.0 | 11.0 | 10.0 | ++------------------+----------------------+------+------+---------------------+---------------------+--------------------+--------------------+-------------------+------------------+--------------------+-------------------+-------------------+--------------------+--------------------+--------------------+--------------------+-------+------+-------+ -- InfluxQL: SELECT i64, abs(i64 * -1), sin(i64), cos(i64), tan(i64), acos(1/i64), atan(i64), atan2(i64, 2), exp(i64), ln(i64), log2(i64), log10(i64), sqrt(i64), pow(i64, 2), floor(i64), ceil(i64), round(i64) FROM m0 LIMIT 1; -+----------------------+-----+-------+---------------------+--------------------+--------------------+--------------------+-------------------+-----------+----------------------+------------------+-------------------+--------------------+-------------------+-------+-------+-------+-------+ -| time | i64 | abs | sin | cos | tan | acos | atan | atan2 | exp | ln | log2 | log10 | sqrt | pow | floor | ceil | round | -+----------------------+-----+-------+---------------------+--------------------+--------------------+--------------------+-------------------+-----------+----------------------+------------------+-------------------+--------------------+-------------------+-------+-------+-------+-------+ -| 2022-10-31T02:00:00Z | 101 | 101.0 | 0.45202578717835057 | 0.8920048697881602 | 0.5067526002248183 | 1.5707963267948966 | 1.560895660206908 | 1.5509969 | 7.307059979368067e43 | 4.61512051684126 | 6.658211482751795 | 2.0043213737826426 | 10.04987562112089 | 10201 | 101.0 | 101.0 | 101.0 | -+----------------------+-----+-------+---------------------+--------------------+--------------------+--------------------+-------------------+-----------+----------------------+------------------+-------------------+--------------------+-------------------+-------+-------+-------+-------+ ++------------------+----------------------+-----+-------+---------------------+--------------------+--------------------+--------------------+-------------------+-----------+----------------------+------------------+-------------------+--------------------+-------------------+-------+-------+-------+-------+ +| iox::measurement | time | i64 | abs | sin | cos | tan | acos | atan | atan2 | exp | ln | log2 | log10 | sqrt | pow | floor | ceil | round | ++------------------+----------------------+-----+-------+---------------------+--------------------+--------------------+--------------------+-------------------+-----------+----------------------+------------------+-------------------+--------------------+-------------------+-------+-------+-------+-------+ +| m0 | 2022-10-31T02:00:00Z | 101 | 101.0 | 0.45202578717835057 | 0.8920048697881602 | 0.5067526002248183 | 1.5707963267948966 | 1.560895660206908 | 1.5509969 | 7.307059979368067e43 | 4.61512051684126 | 6.658211482751795 | 2.0043213737826426 | 10.04987562112089 | 10201 | 101.0 | 101.0 | 101.0 | ++------------------+----------------------+-----+-------+---------------------+--------------------+--------------------+--------------------+-------------------+-----------+----------------------+------------------+-------------------+--------------------+-------------------+-------+-------+-------+-------+ -- InfluxQL: SELECT f64, asin(f64), acos(f64) FROM m0 LIMIT 1; -+----------------------+------+------+------+ -| time | f64 | asin | acos | -+----------------------+------+------+------+ -| 2022-10-31T02:00:00Z | 10.1 | NaN | NaN | -+----------------------+------+------+------+ ++------------------+----------------------+------+------+------+ +| iox::measurement | time | f64 | asin | acos | ++------------------+----------------------+------+------+------+ +| m0 | 2022-10-31T02:00:00Z | 10.1 | NaN | NaN | ++------------------+----------------------+------+------+------+ -- InfluxQL: SELECT f64, pow(f64, pow(2, 10)) FROM m0 LIMIT 1; -+----------------------+------+-----+ -| time | f64 | pow | -+----------------------+------+-----+ -| 2022-10-31T02:00:00Z | 10.1 | inf | -+----------------------+------+-----+ ++------------------+----------------------+------+-----+ +| iox::measurement | time | f64 | pow | ++------------------+----------------------+------+-----+ +| m0 | 2022-10-31T02:00:00Z | 10.1 | inf | ++------------------+----------------------+------+-----+ -- InfluxQL: SELECT f64 FROM m0 WHERE f64 >= 19 + 0.5; -+----------------------+------+ -| time | f64 | -+----------------------+------+ -| 2022-10-31T02:00:10Z | 21.2 | -+----------------------+------+ ++------------------+----------------------+------+ +| iox::measurement | time | f64 | ++------------------+----------------------+------+ +| m0 | 2022-10-31T02:00:10Z | 21.2 | ++------------------+----------------------+------+ -- InfluxQL: SELECT f64 FROM m0 WHERE f64 - 0.5 >= 19; -+----------------------+------+ -| time | f64 | -+----------------------+------+ -| 2022-10-31T02:00:10Z | 21.2 | -+----------------------+------+ ++------------------+----------------------+------+ +| iox::measurement | time | f64 | ++------------------+----------------------+------+ +| m0 | 2022-10-31T02:00:10Z | 21.2 | ++------------------+----------------------+------+ -- InfluxQL: SELECT f64 FROM m0 WHERE str = 'h' + 'i'; -+----------------------+------+ -| time | f64 | -+----------------------+------+ -| 2022-10-31T02:00:00Z | 10.1 | -| 2022-10-31T02:00:10Z | 21.2 | -+----------------------+------+ ++------------------+----------------------+------+ +| iox::measurement | time | f64 | ++------------------+----------------------+------+ +| m0 | 2022-10-31T02:00:00Z | 10.1 | +| m0 | 2022-10-31T02:00:10Z | 21.2 | ++------------------+----------------------+------+ -- InfluxQL: SELECT f64 FROM m0 WHERE f64::integer & 1 = 1; -+----------------------+------+ -| time | f64 | -+----------------------+------+ -| 2022-10-31T02:00:00Z | 11.3 | -| 2022-10-31T02:00:10Z | 21.2 | -| 2022-10-31T02:00:20Z | 11.2 | -| 2022-10-31T02:00:30Z | 19.2 | -+----------------------+------+ ++------------------+----------------------+------+ +| iox::measurement | time | f64 | ++------------------+----------------------+------+ +| m0 | 2022-10-31T02:00:00Z | 11.3 | +| m0 | 2022-10-31T02:00:10Z | 21.2 | +| m0 | 2022-10-31T02:00:20Z | 11.2 | +| m0 | 2022-10-31T02:00:30Z | 19.2 | ++------------------+----------------------+------+ -- InfluxQL: SELECT f64 FROM m0 WHERE f64::integer & 1 = 0; -+----------------------+------+ -| time | f64 | -+----------------------+------+ -| 2022-10-31T02:00:00Z | 10.1 | -| 2022-10-31T02:00:00Z | 10.4 | -| 2022-10-31T02:00:10Z | 18.9 | -+----------------------+------+ ++------------------+----------------------+------+ +| iox::measurement | time | f64 | ++------------------+----------------------+------+ +| m0 | 2022-10-31T02:00:00Z | 10.1 | +| m0 | 2022-10-31T02:00:00Z | 10.4 | +| m0 | 2022-10-31T02:00:10Z | 18.9 | ++------------------+----------------------+------+ -- InfluxQL: SELECT f64 FROM m0 WHERE f64 & 1 = 1; -+----------------------+------+ -| time | f64 | -+----------------------+------+ -| 2022-10-31T02:00:00Z | 11.3 | -| 2022-10-31T02:00:10Z | 21.2 | -| 2022-10-31T02:00:20Z | 11.2 | -| 2022-10-31T02:00:30Z | 19.2 | -+----------------------+------+ ++------------------+----------------------+------+ +| iox::measurement | time | f64 | ++------------------+----------------------+------+ +| m0 | 2022-10-31T02:00:00Z | 11.3 | +| m0 | 2022-10-31T02:00:10Z | 21.2 | +| m0 | 2022-10-31T02:00:20Z | 11.2 | +| m0 | 2022-10-31T02:00:30Z | 19.2 | ++------------------+----------------------+------+ -- InfluxQL: SELECT f64 FROM m0 WHERE f64 & 1 = 0; -+----------------------+------+ -| time | f64 | -+----------------------+------+ -| 2022-10-31T02:00:00Z | 10.1 | -| 2022-10-31T02:00:00Z | 10.4 | -| 2022-10-31T02:00:10Z | 18.9 | -+----------------------+------+ ++------------------+----------------------+------+ +| iox::measurement | time | f64 | ++------------------+----------------------+------+ +| m0 | 2022-10-31T02:00:00Z | 10.1 | +| m0 | 2022-10-31T02:00:00Z | 10.4 | +| m0 | 2022-10-31T02:00:10Z | 18.9 | ++------------------+----------------------+------+ -- InfluxQL: SELECT f64 FROM m0 WHERE f64 >= 19 + 0.5 OR str = 1; -+----------------------+------+ -| time | f64 | -+----------------------+------+ -| 2022-10-31T02:00:10Z | 21.2 | -+----------------------+------+ ++------------------+----------------------+------+ +| iox::measurement | time | f64 | ++------------------+----------------------+------+ +| m0 | 2022-10-31T02:00:10Z | 21.2 | ++------------------+----------------------+------+ -- InfluxQL: SELECT f64 FROM m0 WHERE f64 >= 19.5 AND str = 1; ++ ++ -- InfluxQL: SELECT f64 FROM m0 WHERE f64 >= 19 + 0.5 OR non_existent = 1; -+----------------------+------+ -| time | f64 | -+----------------------+------+ -| 2022-10-31T02:00:10Z | 21.2 | -+----------------------+------+ ++------------------+----------------------+------+ +| iox::measurement | time | f64 | ++------------------+----------------------+------+ +| m0 | 2022-10-31T02:00:10Z | 21.2 | ++------------------+----------------------+------+ -- InfluxQL: SELECT f64 FROM m0 WHERE f64 >= 19.5 AND non_existent = 1; ++ ++ @@ -369,36 +372,45 @@ ++ ++ -- InfluxQL: SELECT f64 FROM m0 WHERE f64 >= 19.5 OR f64 =~ /foo/; -+----------------------+------+ -| time | f64 | -+----------------------+------+ -| 2022-10-31T02:00:10Z | 21.2 | -+----------------------+------+ ++------------------+----------------------+------+ +| iox::measurement | time | f64 | ++------------------+----------------------+------+ +| m0 | 2022-10-31T02:00:10Z | 21.2 | ++------------------+----------------------+------+ -- InfluxQL: SELECT tag0, f64, f64 * 0.5, f64 + str FROM m0 WHERE f64 > 19; -+----------------------+-------+------+-------+---------+ -| time | tag0 | f64 | f64_1 | f64_str | -+----------------------+-------+------+-------+---------+ -| 2022-10-31T02:00:10Z | val00 | 21.2 | 10.6 | | -| 2022-10-31T02:00:30Z | val00 | 19.2 | 9.6 | | -+----------------------+-------+------+-------+---------+ ++------------------+----------------------+-------+------+-------+---------+ +| iox::measurement | time | tag0 | f64 | f64_1 | f64_str | ++------------------+----------------------+-------+------+-------+---------+ +| m0 | 2022-10-31T02:00:10Z | val00 | 21.2 | 10.6 | | +| m0 | 2022-10-31T02:00:30Z | val00 | 19.2 | 9.6 | | ++------------------+----------------------+-------+------+-------+---------+ -- InfluxQL: SELECT tag0, str, str + 'foo', str * 5 FROM m0 WHERE f64 > 19; -+----------------------+-------+-----+-------+-------+ -| time | tag0 | str | str_1 | str_2 | -+----------------------+-------+-----+-------+-------+ -| 2022-10-31T02:00:10Z | val00 | hi | hifoo | | -| 2022-10-31T02:00:30Z | val00 | lo | lofoo | | -+----------------------+-------+-----+-------+-------+ ++------------------+----------------------+-------+-----+-------+-------+ +| iox::measurement | time | tag0 | str | str_1 | str_2 | ++------------------+----------------------+-------+-----+-------+-------+ +| m0 | 2022-10-31T02:00:10Z | val00 | hi | hifoo | | +| m0 | 2022-10-31T02:00:30Z | val00 | lo | lofoo | | ++------------------+----------------------+-------+-----+-------+-------+ -- InfluxQL: SELECT tag0, i64, i64 * 0.5, i64 + f64::integer, i64 & 1 FROM m0 WHERE f64 > 19; -+----------------------+-------+-----+-------+---------+-------+ -| time | tag0 | i64 | i64_1 | i64_f64 | i64_2 | -+----------------------+-------+-----+-------+---------+-------+ -| 2022-10-31T02:00:10Z | val00 | 211 | 105.5 | 232 | 1 | -| 2022-10-31T02:00:30Z | val00 | 392 | 196.0 | 411 | 0 | -+----------------------+-------+-----+-------+---------+-------+ ++------------------+----------------------+-------+-----+-------+---------+-------+ +| iox::measurement | time | tag0 | i64 | i64_1 | i64_f64 | i64_2 | ++------------------+----------------------+-------+-----+-------+---------+-------+ +| m0 | 2022-10-31T02:00:10Z | val00 | 211 | 105.5 | 232 | 1 | +| m0 | 2022-10-31T02:00:30Z | val00 | 392 | 196.0 | 411 | 0 | ++------------------+----------------------+-------+-----+-------+---------+-------+ -- InfluxQL: SELECT f64, non_existing, f64 + non_existing FROM m0 WHERE f64 > 19; -+----------------------+------+--------------+------------------+ -| time | f64 | non_existing | f64_non_existing | -+----------------------+------+--------------+------------------+ -| 2022-10-31T02:00:10Z | 21.2 | | | -| 2022-10-31T02:00:30Z | 19.2 | | | -+----------------------+------+--------------+------------------+ \ No newline at end of file ++------------------+----------------------+------+--------------+------------------+ +| iox::measurement | time | f64 | non_existing | f64_non_existing | ++------------------+----------------------+------+--------------+------------------+ +| m0 | 2022-10-31T02:00:10Z | 21.2 | | | +| m0 | 2022-10-31T02:00:30Z | 19.2 | | | ++------------------+----------------------+------+--------------+------------------+ +-- InfluxQL: SELECT usage_idle, bytes_used FROM cpu, disk; ++------------------+----------------------+------------+------------+ +| iox::measurement | time | usage_idle | bytes_used | ++------------------+----------------------+------------+------------+ +| cpu | 2022-10-31T02:00:00Z | 0.98 | | +| cpu | 2022-10-31T02:00:10Z | 0.99 | | +| disk | 2022-10-31T02:00:00Z | | 219838.0 | +| disk | 2022-10-31T02:00:10Z | | 219833.0 | ++------------------+----------------------+------------+------------+ \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/setups.rs b/influxdb_iox/tests/query_tests2/setups.rs index 05c8a073b5..14bf2ba253 100644 --- a/influxdb_iox/tests/query_tests2/setups.rs +++ b/influxdb_iox/tests/query_tests2/setups.rs @@ -1233,6 +1233,10 @@ pub static SETUPS: Lazy<HashMap<SetupName, SetupSteps>> = Lazy::new(|| { m1,tag0=val00 f64=100.5,i64=1001i,str="hi" 1667181600000000000 m1,tag0=val00 f64=200.6,i64=2001i,str="lo" 1667181610000000000 m1,tag0=val01 f64=101.7,i64=1011i,str="lo" 1667181600000000000 + cpu,host=host1,cpu=cpu0 usage_idle=0.98,usage_system=0.2 1667181600000000000 + cpu,host=host1,cpu=cpu0 usage_idle=0.99,usage_system=0.1 1667181610000000000 + disk,host=host1,device=disk1s1 bytes_free=1234,bytes_used=219838 1667181600000000000 + disk,host=host1,device=disk1s1 bytes_free=1239,bytes_used=219833 1667181610000000000 "# .to_string(), ), diff --git a/iox_query/src/plan/influxql/field_mapper.rs b/iox_query/src/plan/influxql/field_mapper.rs index 5fdcb64b8a..d50e387df2 100644 --- a/iox_query/src/plan/influxql/field_mapper.rs +++ b/iox_query/src/plan/influxql/field_mapper.rs @@ -70,7 +70,7 @@ mod test { ); assert_eq!( tag_set, - TagSet::from(["host".to_string(), "region".to_string()]) + TagSet::from(["cpu".to_string(), "host".to_string(), "region".to_string()]) ); // Measurement does not exist diff --git a/iox_query/src/plan/influxql/planner.rs b/iox_query/src/plan/influxql/planner.rs index f77b61ca34..928efc337f 100644 --- a/iox_query/src/plan/influxql/planner.rs +++ b/iox_query/src/plan/influxql/planner.rs @@ -8,16 +8,14 @@ use crate::plan::influxql::var_ref::{ use crate::DataFusionError; use arrow::datatypes::DataType; use datafusion::common::{Result, ScalarValue, ToDFSchema}; -use datafusion::logical_expr::expr::Sort; use datafusion::logical_expr::expr_rewriter::{normalize_col, ExprRewritable, ExprRewriter}; use datafusion::logical_expr::logical_plan::builder::project; use datafusion::logical_expr::logical_plan::Analyze; use datafusion::logical_expr::{ - lit, BinaryExpr, BuiltinScalarFunction, Explain, Expr, ExprSchemable, LogicalPlan, - LogicalPlanBuilder, Operator, PlanType, TableSource, ToStringifiedPlan, + binary_expr, lit, BinaryExpr, BuiltinScalarFunction, Explain, Expr, ExprSchemable, LogicalPlan, + LogicalPlanBuilder, Operator, PlanType, Projection, TableSource, ToStringifiedPlan, }; -use datafusion::prelude::{binary_expr, Column}; -use datafusion_util::AsExpr; +use datafusion_util::{lit_dict, AsExpr}; use influxdb_influxql_parser::common::OrderByClause; use influxdb_influxql_parser::explain::{ExplainOption, ExplainStatement}; use influxdb_influxql_parser::expression::{ @@ -32,10 +30,11 @@ use influxdb_influxql_parser::{ select::{Field, FieldList, FromMeasurementClause, MeasurementSelection, SelectStatement}, statement::Statement, }; +use itertools::Itertools; use once_cell::sync::Lazy; use query_functions::clean_non_meta_escapes; use schema::{InfluxColumnType, InfluxFieldType, Schema}; -use std::collections::HashSet; +use std::collections::{HashSet, VecDeque}; use std::fmt::Debug; use std::iter; use std::ops::Deref; @@ -96,7 +95,9 @@ impl<'a> InfluxQLToLogicalPlan<'a> { Err(DataFusionError::NotImplemented("DROP MEASUREMENT".into())) } Statement::Explain(explain) => self.explain_statement_to_plan(*explain), - Statement::Select(select) => self.select_statement_to_plan(*select), + Statement::Select(select) => { + self.select_statement_to_plan(&self.rewrite_select_statement(*select)?) + } Statement::ShowDatabases(_) => { Err(DataFusionError::NotImplemented("SHOW DATABASES".into())) } @@ -119,7 +120,8 @@ impl<'a> InfluxQLToLogicalPlan<'a> { } fn explain_statement_to_plan(&self, explain: ExplainStatement) -> Result<LogicalPlan> { - let plan = self.select_statement_to_plan(*explain.select)?; + let plan = + self.select_statement_to_plan(&self.rewrite_select_statement(*explain.select)?)?; let plan = Arc::new(plan); let schema = LogicalPlan::explain_schema(); let schema = schema.to_dfschema_ref()?; @@ -149,56 +151,74 @@ impl<'a> InfluxQLToLogicalPlan<'a> { } } - /// Create a [`LogicalPlan`] from the specified InfluxQL `SELECT` statement. - fn select_statement_to_plan(&self, select: SelectStatement) -> Result<LogicalPlan> { - let select = rewrite_statement(self.s, &select)?; - - // Process FROM clause - let plans = self.plan_from_tables(select.from)?; - - // Only support a single measurement to begin with - let plan = match plans.len() { - 0 => Err(DataFusionError::NotImplemented( - "unsupported FROM: schema must exist".into(), - )), - 1 => Ok(plans[0].clone()), - _ => Err(DataFusionError::NotImplemented( - "unsupported FROM: must target a single measurement".into(), - )), - }?; + fn rewrite_select_statement(&self, select: SelectStatement) -> Result<SelectStatement> { + rewrite_statement(self.s, &select) + } - let schemas = Schemas::new(plan.schema())?; - let tz = select.timezone.map(|tz| *tz); + /// Create a [`LogicalPlan`] from the specified InfluxQL `SELECT` statement. + fn select_statement_to_plan(&self, select: &SelectStatement) -> Result<LogicalPlan> { + let mut plans = self.plan_from_tables(&select.from)?; + + let Some(plan) = plans.pop_front() else { return LogicalPlanBuilder::empty(false).build(); }; + let plan = self.project_select(plan, select)?; + + // If there are multiple measurements, we need to sort by the measurement column + // NOTE: Ideally DataFusion would maintain the order of the UNION ALL, which would eliminate + // the need to sort by measurement. + // See: https://github.com/influxdata/influxdb_iox/issues/7062 + let mut series_sort = if !plans.is_empty() { + vec![Expr::sort("iox::measurement".as_expr(), true, false)] + } else { + vec![] + }; - let plan = self.plan_where_clause(select.condition, plan, &schemas, tz)?; + // UNION the remaining plans + let plan = plans.into_iter().try_fold(plan, |prev, next| { + let next = self.project_select(next, select)?; + LogicalPlanBuilder::from(prev).union(next)?.build() + })?; let plan = if select.group_by.is_none() { - LogicalPlanBuilder::from(plan) - .sort(iter::once(Expr::Sort(Sort { - expr: Box::new(Expr::Column(Column { - relation: None, - name: "time".to_string(), - })), - asc: match select.order_by { - // Default behaviour is to sort by time in ascending order if there is no ORDER BY - None | Some(OrderByClause::Ascending) => true, - Some(OrderByClause::Descending) => false, - }, - nulls_first: false, - })))? - .build() + // Generate the following sort: + // iox::measurement, time, [projected tags, sorted lexicographically] + + series_sort.push(Expr::sort( + "time".as_expr(), + match select.order_by { + // Default behaviour is to sort by time in ascending order if there is no ORDER BY + None | Some(OrderByClause::Ascending) => true, + Some(OrderByClause::Descending) => false, + }, + false, + )); + + series_sort.extend( + select + .fields + .iter() + .filter_map(|f| { + if let IQLExpr::VarRef { + name, + data_type: Some(VarRefDataType::Tag), + } = &f.expr + { + Some(name.deref()) + } else { + None + } + }) + // the tags must be sorted lexicographically in ascending order to match + // the ordering in InfluxQL + .sorted() + .map(|n| Expr::sort(n.as_expr(), true, false)), + ); + LogicalPlanBuilder::from(plan).sort(series_sort)?.build() } else { Err(DataFusionError::NotImplemented( "GROUP BY not supported".into(), )) }?; - // Process and validate the field expressions in the SELECT projection list - let select_exprs = self.field_list_to_exprs(&plan, select.fields, &schemas)?; - - // Wrap the plan in a `LogicalPlan::Projection` from the select expressions - let plan = project(plan, select_exprs)?; - let plan = self.limit(plan, select.offset, select.limit)?; let plan = self.slimit(plan, select.series_offset, select.series_limit)?; @@ -206,6 +226,27 @@ impl<'a> InfluxQLToLogicalPlan<'a> { Ok(plan) } + fn project_select(&self, plan: LogicalPlan, select: &SelectStatement) -> Result<LogicalPlan> { + let (proj, plan) = match plan { + LogicalPlan::Projection(Projection { expr, input, .. }) => { + (expr, input.deref().clone()) + } + // TODO: Review when we support subqueries, as this shouldn't be the case + _ => (vec![], plan), + }; + + let schemas = Schemas::new(plan.schema())?; + + let tz = select.timezone.as_deref().cloned(); + let plan = self.plan_where_clause(&select.condition, plan, &schemas, tz)?; + + // Process and validate the field expressions in the SELECT projection list + let select_exprs = self.field_list_to_exprs(&plan, &select.fields, &schemas)?; + + // Wrap the plan in a `LogicalPlan::Projection` from the select expressions + project(plan, proj.into_iter().chain(select_exprs.into_iter())) + } + /// Optionally wrap the input logical plan in a [`LogicalPlan::Limit`] node using the specified /// `offset` and `limit`. fn limit( @@ -249,11 +290,11 @@ impl<'a> InfluxQLToLogicalPlan<'a> { fn field_list_to_exprs( &self, plan: &LogicalPlan, - fields: FieldList, + fields: &FieldList, schemas: &Schemas, ) -> Result<Vec<Expr>> { // InfluxQL requires the time column is present in the projection list. - let extra = if !has_time_column(&fields) { + let extra = if !has_time_column(fields) { vec![Field { expr: IQLExpr::VarRef { name: "time".into(), @@ -283,12 +324,14 @@ impl<'a> InfluxQLToLogicalPlan<'a> { ) -> Result<Expr> { let expr = self.expr_to_df_expr(ExprScope::Projection, &field.expr, schemas)?; let expr = rewrite_field_expr(expr, schemas)?; - if let Some(alias) = &field.alias { - let expr = Expr::Alias(Box::new(expr), alias.deref().into()); - normalize_col(expr, plan) - } else { - normalize_col(expr, plan) - } + normalize_col( + if let Some(alias) = &field.alias { + expr.alias(alias.deref()) + } else { + expr + }, + plan, + ) } /// Map an InfluxQL [`ConditionalExpression`] to a DataFusion [`Expr`]. @@ -504,14 +547,14 @@ impl<'a> InfluxQLToLogicalPlan<'a> { /// optional InfluxQL conditional expression. fn plan_where_clause( &self, - condition: Option<WhereClause>, + condition: &Option<WhereClause>, plan: LogicalPlan, schemas: &Schemas, tz: Option<chrono_tz::Tz>, ) -> Result<LogicalPlan> { match condition { Some(where_clause) => { - let filter_expr = self.conditional_to_df_expr(&where_clause, schemas, tz)?; + let filter_expr = self.conditional_to_df_expr(where_clause, schemas, tz)?; let filter_expr = rewrite_conditional_expr(filter_expr, schemas)?; let plan = LogicalPlanBuilder::from(plan) .filter(filter_expr)? @@ -524,10 +567,10 @@ impl<'a> InfluxQLToLogicalPlan<'a> { /// Generate a list of logical plans for each of the tables references in the `FROM` /// clause. - fn plan_from_tables(&self, from: FromMeasurementClause) -> Result<Vec<LogicalPlan>> { - let mut plans = vec![]; + fn plan_from_tables(&self, from: &FromMeasurementClause) -> Result<VecDeque<LogicalPlan>> { + let mut plans = VecDeque::new(); for ms in from.iter() { - let plan = match ms { + let Some(plan) = match ms { MeasurementSelection::Name(qn) => match qn.name { MeasurementName::Name(ref ident) => { self.create_table_ref(normalize_identifier(ident)) @@ -540,20 +583,25 @@ impl<'a> InfluxQLToLogicalPlan<'a> { MeasurementSelection::Subquery(_) => Err(DataFusionError::NotImplemented( "subquery in FROM clause".into(), )), - }?; - plans.push(plan); + }? else { continue }; + plans.push_back(plan); } Ok(plans) } - /// Create a [LogicalPlan] that refers to the specified `table_name` or - /// an [LogicalPlan::EmptyRelation] if the table does not exist. - fn create_table_ref(&self, table_name: String) -> Result<LogicalPlan> { - if let Ok(source) = self.s.get_table_provider(&table_name) { - LogicalPlanBuilder::scan(&table_name, source, None)?.build() + /// Create a [LogicalPlan] that refers to the specified `table_name`. + /// + /// Normally, this functions will not return a `None`, as tables have been matched] + /// by the [`rewrite_statement`] function. + fn create_table_ref(&self, table_name: String) -> Result<Option<LogicalPlan>> { + Ok(if let Ok(source) = self.s.get_table_provider(&table_name) { + Some(project( + LogicalPlanBuilder::scan(&table_name, source, None)?.build()?, + iter::once(lit_dict(&table_name).alias("iox::measurement")), + )?) } else { - LogicalPlanBuilder::empty(false).build() - } + None + }) } } @@ -792,20 +840,51 @@ mod test { mod select { use super::*; + /// Verify the behaviour of the `FROM` clause when selecting from zero to many measurements. + #[test] + fn test_from_zero_to_many() { + assert_snapshot!(plan("SELECT host, cpu, device, usage_idle, bytes_used FROM cpu, disk"), @r###" + Sort: iox::measurement ASC NULLS LAST, time ASC NULLS LAST, cpu ASC NULLS LAST, device ASC NULLS LAST, host ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_used:Int64;N] + Union [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_used:Int64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, cpu.host AS host, CAST(cpu.cpu AS Utf8) AS cpu, CAST(NULL AS Utf8) AS device, cpu.usage_idle AS usage_idle, CAST(NULL AS Int64) AS bytes_used [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_used:Int64;N] + TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] + Projection: Dictionary(Int32, Utf8("disk")) AS iox::measurement, disk.time, disk.host AS host, CAST(NULL AS Utf8) AS cpu, CAST(disk.device AS Utf8) AS device, CAST(NULL AS Float64) AS usage_idle, disk.bytes_used AS bytes_used [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_used:Int64;N] + TableScan: disk [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] + "###); + + // nonexistent + assert_snapshot!(plan("SELECT host, usage_idle FROM non_existent"), @"EmptyRelation []"); + assert_snapshot!(plan("SELECT host, usage_idle FROM cpu, non_existent"), @r###" + Sort: cpu.time ASC NULLS LAST, host ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, cpu.host AS host, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] + "###); + + // multiple of same measurement + assert_snapshot!(plan("SELECT host, usage_idle FROM cpu, cpu"), @r###" + Sort: iox::measurement ASC NULLS LAST, time ASC NULLS LAST, host ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Union [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, cpu.host AS host, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, cpu.host AS host, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] + "###); + } + #[test] fn test_time_range_in_where() { assert_snapshot!( plan("SELECT foo, f64_field FROM data where time > now() - 10s"), @r###" - Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: data.time > now() - IntervalMonthDayNano("10000000000") [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "### ); assert_snapshot!( plan("SELECT foo, f64_field FROM data where time > '2004-04-09T02:33:45Z'"), @r###" - Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: data.time > TimestampNanosecond(1081478025000000000, None) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "### @@ -817,8 +896,8 @@ mod test { // time on the right-hand side assert_snapshot!( plan("SELECT foo, f64_field FROM data where now() - 10s < time"), @r###" - Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: now() - IntervalMonthDayNano("10000000000") < data.time [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "### @@ -827,16 +906,16 @@ mod test { // Regular expression equality tests assert_snapshot!(plan("SELECT foo, f64_field FROM data where foo =~ /f/"), @r###" - Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: CAST(data.foo AS Utf8) ~ Utf8("f") [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); // regular expression for a numeric field is rewritten to `false` assert_snapshot!(plan("SELECT foo, f64_field FROM data where f64_field =~ /f/"), @r###" - Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: Boolean(false) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); @@ -844,8 +923,8 @@ mod test { // regular expression for a non-existent field is rewritten to `false` assert_snapshot!( plan("SELECT foo, f64_field FROM data where non_existent =~ /f/"), @r###" - Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: Boolean(false) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "### @@ -854,16 +933,16 @@ mod test { // Regular expression inequality tests assert_snapshot!(plan("SELECT foo, f64_field FROM data where foo !~ /f/"), @r###" - Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: CAST(data.foo AS Utf8) !~ Utf8("f") [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); // regular expression for a numeric field is rewritten to `false` assert_snapshot!(plan("SELECT foo, f64_field FROM data where f64_field !~ /f/"), @r###" - Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: Boolean(false) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); @@ -871,8 +950,8 @@ mod test { // regular expression for a non-existent field is rewritten to `false` assert_snapshot!( plan("SELECT foo, f64_field FROM data where non_existent !~ /f/"), @r###" - Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: Boolean(false) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "### @@ -883,49 +962,49 @@ mod test { fn test_column_matching_rules() { // Cast between numeric types assert_snapshot!(plan("SELECT f64_field::integer FROM data"), @r###" - Projection: data.time, CAST(data.f64_field AS Int64) AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Int64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Int64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, CAST(data.f64_field AS Int64) AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Int64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT i64_field::float FROM data"), @r###" - Projection: data.time, CAST(data.i64_field AS Float64) AS i64_field [time:Timestamp(Nanosecond, None), i64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, CAST(data.i64_field AS Float64) AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); // use field selector assert_snapshot!(plan("SELECT bool_field::field FROM data"), @r###" - Projection: data.time, data.bool_field AS bool_field [time:Timestamp(Nanosecond, None), bool_field:Boolean;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Boolean;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.bool_field AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Boolean;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); // invalid column reverence assert_snapshot!(plan("SELECT not_exists::tag FROM data"), @r###" - Projection: data.time, NULL AS not_exists [time:Timestamp(Nanosecond, None), not_exists:Null;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), not_exists:Null;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, NULL AS not_exists [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), not_exists:Null;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT not_exists::field FROM data"), @r###" - Projection: data.time, NULL AS not_exists [time:Timestamp(Nanosecond, None), not_exists:Null;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), not_exists:Null;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, NULL AS not_exists [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), not_exists:Null;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); // Returns NULL for invalid casts assert_snapshot!(plan("SELECT f64_field::string FROM data"), @r###" - Projection: data.time, NULL AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Null;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, NULL AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT f64_field::boolean FROM data"), @r###" - Projection: data.time, NULL AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Null;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, NULL AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT str_field::boolean FROM data"), @r###" - Projection: data.time, NULL AS str_field [time:Timestamp(Nanosecond, None), str_field:Null;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, NULL AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); } @@ -934,26 +1013,26 @@ mod test { fn test_explain() { assert_snapshot!(plan("EXPLAIN SELECT foo, f64_field FROM data"), @r###" Explain [plan_type:Utf8, plan:Utf8] - Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("EXPLAIN VERBOSE SELECT foo, f64_field FROM data"), @r###" Explain [plan_type:Utf8, plan:Utf8] - Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("EXPLAIN ANALYZE SELECT foo, f64_field FROM data"), @r###" Analyze [plan_type:Utf8, plan:Utf8] - Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("EXPLAIN ANALYZE VERBOSE SELECT foo, f64_field FROM data"), @r###" Analyze [plan_type:Utf8, plan:Utf8] - Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); } @@ -962,150 +1041,150 @@ mod test { fn test_select_cast_postfix_operator() { // Float casting assert_snapshot!(plan("SELECT f64_field::float FROM all_types"), @r###" - Projection: all_types.time, all_types.f64_field AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Float64;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, all_types.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT f64_field::unsigned FROM all_types"), @r###" - Projection: all_types.time, CAST(all_types.f64_field AS UInt64) AS f64_field [time:Timestamp(Nanosecond, None), f64_field:UInt64;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:UInt64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, CAST(all_types.f64_field AS UInt64) AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:UInt64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT f64_field::integer FROM all_types"), @r###" - Projection: all_types.time, CAST(all_types.f64_field AS Int64) AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Int64;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Int64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, CAST(all_types.f64_field AS Int64) AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Int64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT f64_field::string FROM all_types"), @r###" - Projection: all_types.time, NULL AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Null;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT f64_field::boolean FROM all_types"), @r###" - Projection: all_types.time, NULL AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Null;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); // Integer casting assert_snapshot!(plan("SELECT i64_field::float FROM all_types"), @r###" - Projection: all_types.time, CAST(all_types.i64_field AS Float64) AS i64_field [time:Timestamp(Nanosecond, None), i64_field:Float64;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, CAST(all_types.i64_field AS Float64) AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Float64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT i64_field::unsigned FROM all_types"), @r###" - Projection: all_types.time, CAST(all_types.i64_field AS UInt64) AS i64_field [time:Timestamp(Nanosecond, None), i64_field:UInt64;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:UInt64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, CAST(all_types.i64_field AS UInt64) AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:UInt64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT i64_field::integer FROM all_types"), @r###" - Projection: all_types.time, all_types.i64_field AS i64_field [time:Timestamp(Nanosecond, None), i64_field:Int64;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Int64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, all_types.i64_field AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Int64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT i64_field::string FROM all_types"), @r###" - Projection: all_types.time, NULL AS i64_field [time:Timestamp(Nanosecond, None), i64_field:Null;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT i64_field::boolean FROM all_types"), @r###" - Projection: all_types.time, NULL AS i64_field [time:Timestamp(Nanosecond, None), i64_field:Null;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); // Unsigned casting assert_snapshot!(plan("SELECT u64_field::float FROM all_types"), @r###" - Projection: all_types.time, CAST(all_types.u64_field AS Float64) AS u64_field [time:Timestamp(Nanosecond, None), u64_field:Float64;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, CAST(all_types.u64_field AS Float64) AS u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Float64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT u64_field::unsigned FROM all_types"), @r###" - Projection: all_types.time, all_types.u64_field AS u64_field [time:Timestamp(Nanosecond, None), u64_field:UInt64;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, all_types.u64_field AS u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:UInt64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT u64_field::integer FROM all_types"), @r###" - Projection: all_types.time, CAST(all_types.u64_field AS Int64) AS u64_field [time:Timestamp(Nanosecond, None), u64_field:Int64;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Int64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, CAST(all_types.u64_field AS Int64) AS u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Int64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT u64_field::string FROM all_types"), @r###" - Projection: all_types.time, NULL AS u64_field [time:Timestamp(Nanosecond, None), u64_field:Null;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT u64_field::boolean FROM all_types"), @r###" - Projection: all_types.time, NULL AS u64_field [time:Timestamp(Nanosecond, None), u64_field:Null;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); // String casting assert_snapshot!(plan("SELECT str_field::float FROM all_types"), @r###" - Projection: all_types.time, NULL AS str_field [time:Timestamp(Nanosecond, None), str_field:Null;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT str_field::unsigned FROM all_types"), @r###" - Projection: all_types.time, NULL AS str_field [time:Timestamp(Nanosecond, None), str_field:Null;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT str_field::integer FROM all_types"), @r###" - Projection: all_types.time, NULL AS str_field [time:Timestamp(Nanosecond, None), str_field:Null;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT str_field::string FROM all_types"), @r###" - Projection: all_types.time, all_types.str_field AS str_field [time:Timestamp(Nanosecond, None), str_field:Utf8;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Utf8;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, all_types.str_field AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Utf8;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT str_field::boolean FROM all_types"), @r###" - Projection: all_types.time, NULL AS str_field [time:Timestamp(Nanosecond, None), str_field:Null;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); // Boolean casting assert_snapshot!(plan("SELECT bool_field::float FROM all_types"), @r###" - Projection: all_types.time, NULL AS bool_field [time:Timestamp(Nanosecond, None), bool_field:Null;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT bool_field::unsigned FROM all_types"), @r###" - Projection: all_types.time, NULL AS bool_field [time:Timestamp(Nanosecond, None), bool_field:Null;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT bool_field::integer FROM all_types"), @r###" - Projection: all_types.time, NULL AS bool_field [time:Timestamp(Nanosecond, None), bool_field:Null;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT bool_field::string FROM all_types"), @r###" - Projection: all_types.time, NULL AS bool_field [time:Timestamp(Nanosecond, None), bool_field:Null;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT bool_field::boolean FROM all_types"), @r###" - Projection: all_types.time, all_types.bool_field AS bool_field [time:Timestamp(Nanosecond, None), bool_field:Boolean;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Boolean;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, all_types.bool_field AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Boolean;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); // Validate various projection expressions with casts assert_snapshot!(plan("SELECT f64_field::integer + i64_field + u64_field::integer FROM all_types"), @r###" - Projection: all_types.time, CAST(all_types.f64_field AS Int64) + all_types.i64_field + CAST(all_types.u64_field AS Int64) AS f64_field_i64_field_u64_field [time:Timestamp(Nanosecond, None), f64_field_i64_field_u64_field:Int64;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field_i64_field_u64_field:Int64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, CAST(all_types.f64_field AS Int64) + all_types.i64_field + CAST(all_types.u64_field AS Int64) AS f64_field_i64_field_u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field_i64_field_u64_field:Int64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT f64_field::integer + i64_field + str_field::integer FROM all_types"), @r###" - Projection: all_types.time, NULL AS f64_field_i64_field_str_field [time:Timestamp(Nanosecond, None), f64_field_i64_field_str_field:Null;N] - Sort: all_types.time ASC NULLS LAST [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field_i64_field_str_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS f64_field_i64_field_str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field_i64_field_str_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); } @@ -1120,43 +1199,44 @@ mod test { #[test] fn test_single_measurement() { assert_snapshot!(plan("SELECT f64_field FROM data"), @r###" - Projection: data.time, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT time, f64_field FROM data"), @r###" - Projection: data.time AS time, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT time as timestamp, f64_field FROM data"), @r###" - Projection: data.time AS timestamp, data.f64_field AS f64_field [timestamp:Timestamp(Nanosecond, None), f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] - TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Projection: iox::measurement, timestamp, f64_field [iox::measurement:Dictionary(Int32, Utf8), timestamp:Timestamp(Nanosecond, None), f64_field:Float64;N] + Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), timestamp:Timestamp(Nanosecond, None), f64_field:Float64;N, time:Timestamp(Nanosecond, None)] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS timestamp, data.f64_field AS f64_field, data.time [iox::measurement:Dictionary(Int32, Utf8), timestamp:Timestamp(Nanosecond, None), f64_field:Float64;N, time:Timestamp(Nanosecond, None)] + TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT foo, f64_field FROM data"), @r###" - Projection: data.time, data.foo AS foo, data.f64_field AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT foo, f64_field, i64_field FROM data"), @r###" - Projection: data.time, data.foo AS foo, data.f64_field AS f64_field, data.i64_field AS i64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N, i64_field:Int64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N, i64_field:Int64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field, data.i64_field AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N, i64_field:Int64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT /^f/ FROM data"), @r###" - Projection: data.time, data.f64_field AS f64_field, data.foo AS foo [time:Timestamp(Nanosecond, None), f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.f64_field AS f64_field, data.foo AS foo [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT * FROM data"), @r###" - Projection: data.time, data.TIME AS TIME, data.bar AS bar, data.bool_field AS bool_field, data.f64_field AS f64_field, data.foo AS foo, data.i64_field AS i64_field, data.mixedCase AS mixedCase, data.str_field AS str_field, data.with space AS with space [time:Timestamp(Nanosecond, None), TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, with space:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, bar ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, with space:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.TIME AS TIME, data.bar AS bar, data.bool_field AS bool_field, data.f64_field AS f64_field, data.foo AS foo, data.i64_field AS i64_field, data.mixedCase AS mixedCase, data.str_field AS str_field, data.with space AS with space [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT TIME FROM data"), @r###" - Projection: data.time, data.TIME AS TIME [time:Timestamp(Nanosecond, None), TIME:Boolean;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), TIME:Boolean;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.TIME AS TIME [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), TIME:Boolean;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); // TIME is a field } @@ -1165,23 +1245,23 @@ mod test { #[test] fn test_simple_arithmetic_in_projection() { assert_snapshot!(plan("SELECT foo, f64_field + f64_field FROM data"), @r###" - Projection: data.time, data.foo AS foo, data.f64_field + data.f64_field AS f64_field_f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field_f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field_f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field + data.f64_field AS f64_field_f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field_f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT foo, sin(f64_field) FROM data"), @r###" - Projection: data.time, data.foo AS foo, sin(data.f64_field) AS sin [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, sin:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, sin:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, sin(data.f64_field) AS sin [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, sin:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT foo, atan2(f64_field, 2) FROM data"), @r###" - Projection: data.time, data.foo AS foo, atan2(data.f64_field, Int64(2)) AS atan2 [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, atan2:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, atan2:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, atan2(data.f64_field, Int64(2)) AS atan2 [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, atan2:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT foo, f64_field + 0.5 FROM data"), @r###" - Projection: data.time, data.foo AS foo, data.f64_field + Float64(0.5) AS f64_field [time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Sort: data.time ASC NULLS LAST [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field + Float64(0.5) AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); } diff --git a/iox_query/src/plan/influxql/rewriter.rs b/iox_query/src/plan/influxql/rewriter.rs index f1072c1e0c..f595a5cb13 100644 --- a/iox_query/src/plan/influxql/rewriter.rs +++ b/iox_query/src/plan/influxql/rewriter.rs @@ -554,14 +554,14 @@ mod test { let stmt = rewrite_statement(&namespace, &stmt).unwrap(); assert_eq!( stmt.to_string(), - "SELECT host::tag AS host, region::tag AS region, usage_idle::float AS usage_idle, usage_system::float AS usage_system, usage_user::float AS usage_user FROM cpu" + "SELECT cpu::tag AS cpu, host::tag AS host, region::tag AS region, usage_idle::float AS usage_idle, usage_system::float AS usage_system, usage_user::float AS usage_user FROM cpu" ); let stmt = parse_select("SELECT * FROM cpu, disk"); let stmt = rewrite_statement(&namespace, &stmt).unwrap(); assert_eq!( stmt.to_string(), - "SELECT bytes_free::integer AS bytes_free, bytes_used::integer AS bytes_used, host::tag AS host, region::tag AS region, usage_idle::float AS usage_idle, usage_system::float AS usage_system, usage_user::float AS usage_user FROM cpu, disk" + "SELECT bytes_free::integer AS bytes_free, bytes_used::integer AS bytes_used, cpu::tag AS cpu, device::tag AS device, host::tag AS host, region::tag AS region, usage_idle::float AS usage_idle, usage_system::float AS usage_system, usage_user::float AS usage_user FROM cpu, disk" ); // Regular expression selects fields from multiple measurements @@ -577,7 +577,7 @@ mod test { let stmt = rewrite_statement(&namespace, &stmt).unwrap(); assert_eq!( stmt.to_string(), - "SELECT host::tag AS host, region::tag AS region FROM cpu" + "SELECT cpu::tag AS cpu, host::tag AS host, region::tag AS region FROM cpu" ); // Selective wildcard for fields @@ -593,7 +593,7 @@ mod test { let stmt = rewrite_statement(&namespace, &stmt).unwrap(); assert_eq!( stmt.to_string(), - "SELECT usage_idle::float AS usage_idle, host::tag AS host, region::tag AS region FROM cpu" + "SELECT usage_idle::float AS usage_idle, cpu::tag AS cpu, host::tag AS host, region::tag AS region FROM cpu" ); // GROUP BY expansion @@ -609,7 +609,7 @@ mod test { let stmt = rewrite_statement(&namespace, &stmt).unwrap(); assert_eq!( stmt.to_string(), - "SELECT usage_idle::float AS usage_idle FROM cpu GROUP BY host, region" + "SELECT usage_idle::float AS usage_idle FROM cpu GROUP BY cpu, host, region" ); // Does not include tags in projection when expanded in GROUP BY @@ -617,7 +617,7 @@ mod test { let stmt = rewrite_statement(&namespace, &stmt).unwrap(); assert_eq!( stmt.to_string(), - "SELECT usage_idle::float AS usage_idle, usage_system::float AS usage_system, usage_user::float AS usage_user FROM cpu GROUP BY host, region" + "SELECT usage_idle::float AS usage_idle, usage_system::float AS usage_system, usage_user::float AS usage_user FROM cpu GROUP BY cpu, host, region" ); // Does include explicitly listed tags in projection @@ -625,7 +625,7 @@ mod test { let stmt = rewrite_statement(&namespace, &stmt).unwrap(); assert_eq!( stmt.to_string(), - "SELECT host::tag AS host, usage_idle::float AS usage_idle, usage_system::float AS usage_system, usage_user::float AS usage_user FROM cpu GROUP BY host, region" + "SELECT host::tag AS host, usage_idle::float AS usage_idle, usage_system::float AS usage_system, usage_user::float AS usage_user FROM cpu GROUP BY cpu, host, region" ); // Fallible diff --git a/iox_query/src/plan/influxql/test_utils.rs b/iox_query/src/plan/influxql/test_utils.rs index f322dc799c..9971ddb44d 100644 --- a/iox_query/src/plan/influxql/test_utils.rs +++ b/iox_query/src/plan/influxql/test_utils.rs @@ -55,6 +55,7 @@ pub(crate) mod database { .with_time_column() .with_tag_column("host") .with_tag_column("region") + .with_tag_column("cpu") .with_f64_field_column("usage_user") .with_f64_field_column("usage_system") .with_f64_field_column("usage_idle") @@ -67,6 +68,7 @@ pub(crate) mod database { .with_time_column() .with_tag_column("host") .with_tag_column("region") + .with_tag_column("device") .with_i64_field_column("bytes_used") .with_i64_field_column("bytes_free") .with_one_row_of_data(),
194cb9c6b8844e3306a8038ae198b6d5146a1e8c
Carol (Nichols || Goulding)
2023-03-06 13:07:39
Use into_iter instead of VecDeque
Nothing was pushing into the VecDeque; can just sort and iterate over the collection as a Vec. In `identify_files_to_split`, the collection should be empty after the `for` loop, so it doesn't need to be pushed into the result.
null
fix: Use into_iter instead of VecDeque Nothing was pushing into the VecDeque; can just sort and iterate over the collection as a Vec. In `identify_files_to_split`, the collection should be empty after the `for` loop, so it doesn't need to be pushed into the result.
diff --git a/compactor2/src/components/split_or_compact/files_to_compact.rs b/compactor2/src/components/split_or_compact/files_to_compact.rs index 19677960b6..3b5a335749 100644 --- a/compactor2/src/components/split_or_compact/files_to_compact.rs +++ b/compactor2/src/components/split_or_compact/files_to_compact.rs @@ -1,5 +1,3 @@ -use std::collections::VecDeque; - use data_types::{CompactionLevel, ParquetFile, Timestamp}; use crate::components::{ @@ -43,21 +41,22 @@ pub fn limit_files_to_compact( let split = TargetLevelSplit::new(); let (start_level_files, mut target_level_files) = split.apply(files, start_level); - // Order start-level files by to group the files to commpact them correctly + // Order start-level files to group the files to compact them correctly let start_level_files = order_files(start_level_files, &start_level); - let mut start_level_files = start_level_files.iter().collect::<VecDeque<_>>(); + let mut start_level_files = start_level_files.into_iter(); // Go over start-level files and find overlapped files in target level let mut start_level_files_to_compact: Vec<ParquetFile> = Vec::new(); let mut target_level_files_to_compact = Vec::new(); let mut files_to_keep = Vec::new(); let mut total_size = 0; - while let Some(file) = start_level_files.pop_front() { + + for file in start_level_files.by_ref() { // A start-level file, if compacted, must be compacted with all of its overlapped target-level files. // Thus compute the size needed before deciding to compact this file and its overlaps or not // Time range of start_level_files_to_compact plus this file - let (min_time, max_time) = time_range(file, &start_level_files_to_compact); + let (min_time, max_time) = time_range(&file, &start_level_files_to_compact); // Get all target-level files that overlaps with the time range and not yet in target_level_files_to_compact let overlapped_files: Vec<&ParquetFile> = target_level_files @@ -75,13 +74,13 @@ pub fn limit_files_to_compact( // If total size is under limit, add this file and its overlapped files to files_to_compact if total_size + size <= max_compact_size as i64 { - start_level_files_to_compact.push(file.clone()); + start_level_files_to_compact.push(file); target_level_files_to_compact .extend(overlapped_files.into_iter().cloned().collect::<Vec<_>>()); total_size += size; } else { // Over limit, stop here - files_to_keep.push(file.clone()); + files_to_keep.push(file); break; } } @@ -90,7 +89,7 @@ pub fn limit_files_to_compact( target_level_files.retain(|f| !target_level_files_to_compact.iter().any(|x| x == f)); // All files left in start_level_files and target_level_files are kept for next round - target_level_files.extend(start_level_files.into_iter().cloned().collect::<Vec<_>>()); + target_level_files.extend(start_level_files); files_to_keep.extend(target_level_files); // All files in start_level_files_to_compact and target_level_files_to_compact will be compacted diff --git a/compactor2/src/components/split_or_compact/files_to_split.rs b/compactor2/src/components/split_or_compact/files_to_split.rs index 78a45ee376..3e2058480c 100644 --- a/compactor2/src/components/split_or_compact/files_to_split.rs +++ b/compactor2/src/components/split_or_compact/files_to_split.rs @@ -1,5 +1,3 @@ -use std::collections::VecDeque; - use data_types::{CompactionLevel, ParquetFile}; use itertools::Itertools; use observability_deps::tracing::debug; @@ -44,20 +42,17 @@ pub fn identify_files_to_split( // Get start-level and target-level files let len = files.len(); let split = TargetLevelSplit::new(); - let (start_level_files, mut target_level_files) = split.apply(files, start_level); + let (mut start_level_files, mut target_level_files) = split.apply(files, start_level); - // sort start_level files in their max_l0_created_at and convert it to VecDeque for pop_front - let mut start_level_files: VecDeque<ParquetFile> = start_level_files - .into_iter() - .sorted_by_key(|f| f.max_l0_created_at) - .collect(); + // sort start_level files in their max_l0_created_at + start_level_files.sort_by_key(|f| f.max_l0_created_at); // sort target level files in their min_time target_level_files.sort_by_key(|f| f.min_time); // Get files in start level that overlap with any file in target level let mut files_to_split = Vec::new(); let mut files_not_to_split = Vec::new(); - while let Some(file) = start_level_files.pop_front() { + for file in start_level_files { // Get target_level files that overlaps with this file let overlapped_target_level_files: Vec<&ParquetFile> = target_level_files .iter() @@ -93,8 +88,7 @@ pub fn identify_files_to_split( } // keep the rest of the files for next round - start_level_files.extend(target_level_files); - files_not_to_split.extend(start_level_files); + files_not_to_split.extend(target_level_files); assert_eq!(files_to_split.len() + files_not_to_split.len(), len); diff --git a/compactor2/src/driver.rs b/compactor2/src/driver.rs index 50088aaf07..1fdc0ea1f8 100644 --- a/compactor2/src/driver.rs +++ b/compactor2/src/driver.rs @@ -1,4 +1,4 @@ -use std::{collections::VecDeque, num::NonZeroUsize, sync::Arc, time::Duration}; +use std::{num::NonZeroUsize, sync::Arc, time::Duration}; use data_types::{CompactionLevel, ParquetFile, ParquetFileParams, PartitionId}; use futures::StreamExt; @@ -187,15 +187,14 @@ async fn try_compact_partition( let (files_now, files_later) = components.round_split.split(files, round_info.as_ref()); // Each branch must not overlap with each other - let mut branches = components + let branches = components .divide_initial .divide(files_now, round_info.as_ref()) - .into_iter() - .collect::<VecDeque<_>>(); + .into_iter(); let mut files_next = files_later; // loop for each "Branch" - while let Some(branch) = branches.pop_front() { + for branch in branches { let input_paths: Vec<ParquetFilePath> = branch.iter().map(ParquetFilePath::from).collect();
2a396c1b7313b4fac47d5eec0c62f8f9f046d1f9
Fraser Savage
2023-08-31 15:56:18
Add `table list <DATABASE>` command
This commit hooks the influxdb_iox_client and CLI up to the new `GetTables` gRPC endpoint on the `TableService`, allowing users to query a list of tables within a database and consequently see any custom partitioning schemes.
null
feat(cli): Add `table list <DATABASE>` command This commit hooks the influxdb_iox_client and CLI up to the new `GetTables` gRPC endpoint on the `TableService`, allowing users to query a list of tables within a database and consequently see any custom partitioning schemes.
diff --git a/influxdb_iox/src/commands/table/list.rs b/influxdb_iox/src/commands/table/list.rs new file mode 100644 index 0000000000..baba1c9a6b --- /dev/null +++ b/influxdb_iox/src/commands/table/list.rs @@ -0,0 +1,19 @@ +use crate::commands::table::Error; +use influxdb_iox_client::connection::Connection; + +/// List tables within the specified database +#[derive(Debug, clap::Parser, Default, Clone)] +pub struct Config { + /// The database to display the list of tables for + #[clap(action)] + database: String, +} + +pub async fn command(connection: Connection, config: Config) -> Result<(), Error> { + let mut client = influxdb_iox_client::table::Client::new(connection); + + let tables = client.get_tables(&config.database).await?; + println!("{}", serde_json::to_string_pretty(&tables)?); + + Ok(()) +} diff --git a/influxdb_iox/src/commands/table/mod.rs b/influxdb_iox/src/commands/table/mod.rs index c64b57e446..bb7f38e1a4 100644 --- a/influxdb_iox/src/commands/table/mod.rs +++ b/influxdb_iox/src/commands/table/mod.rs @@ -5,6 +5,7 @@ use observability_deps::tracing::info; use thiserror::Error; mod create; +mod list; #[allow(clippy::enum_variant_names)] #[derive(Debug, Error)] @@ -28,12 +29,15 @@ pub struct Config { /// All possible subcommands for table #[derive(Debug, clap::Parser)] enum Command { + /// List tables in a given database + List(list::Config), /// Create a new table Create(create::Config), } pub async fn command(connection: Connection, config: Config) -> Result<()> { match config.command { + Command::List(config) => list::command(connection, config).await?, Command::Create(config) => { info!("Creating table with config: {:?}", config); create::command(connection, config).await?; diff --git a/influxdb_iox_client/src/client/table.rs b/influxdb_iox_client/src/client/table.rs index ff6a5a0e02..a603fcda7c 100644 --- a/influxdb_iox_client/src/client/table.rs +++ b/influxdb_iox_client/src/client/table.rs @@ -27,6 +27,18 @@ impl Client { } } + /// Fetch the list of tables in the given namespace + pub async fn get_tables(&mut self, namespace_name: &str) -> Result<Vec<Table>, Error> { + Ok(self + .inner + .get_tables(GetTablesRequest { + namespace_name: namespace_name.to_string(), + }) + .await? + .into_inner() + .tables) + } + /// Create a table pub async fn create_table( &mut self,
45b3984aa30159e253c94e18bee24e518c65f63d
Marco Neumann
2022-11-02 08:18:33
simplify `QueryChunk` data access (#6015)
* refactor: simplify `QueryChunk` data access We have only two types for chunks (now that the RUB is gone): 1. In-memory RecordBatches 2. Parquet files Loads of logic is duplicated in the different `read_filter` implementations. Also `read_filter` hides a solid amount of logic from DataFusion, which will prevent certain (future) optimizations. To enable #5897 and to simplify the interface, let the chunks return the data (batches or metadata for parquet files) directly and let `iox_query` perform the actual heavy-lifting. * docs: improve Co-authored-by: Andrew Lamb <[email protected]> * docs: improve Co-authored-by: Andrew Lamb <[email protected]>
Co-authored-by: Andrew Lamb <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
refactor: simplify `QueryChunk` data access (#6015) * refactor: simplify `QueryChunk` data access We have only two types for chunks (now that the RUB is gone): 1. In-memory RecordBatches 2. Parquet files Loads of logic is duplicated in the different `read_filter` implementations. Also `read_filter` hides a solid amount of logic from DataFusion, which will prevent certain (future) optimizations. To enable #5897 and to simplify the interface, let the chunks return the data (batches or metadata for parquet files) directly and let `iox_query` perform the actual heavy-lifting. * docs: improve Co-authored-by: Andrew Lamb <[email protected]> * docs: improve Co-authored-by: Andrew Lamb <[email protected]> Co-authored-by: Andrew Lamb <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/compactor/src/query.rs b/compactor/src/query.rs index 37bfc9d435..21e6f5a2ea 100644 --- a/compactor/src/query.rs +++ b/compactor/src/query.rs @@ -4,40 +4,18 @@ use data_types::{ ChunkId, ChunkOrder, CompactionLevel, DeletePredicate, PartitionId, SequenceNumber, TableSummary, Timestamp, Tombstone, }; -use datafusion::{error::DataFusionError, physical_plan::SendableRecordBatchStream}; +use datafusion::error::DataFusionError; use iox_query::{ exec::{stringset::StringSet, IOxSessionContext}, util::create_basic_summary, - QueryChunk, QueryChunkMeta, + QueryChunk, QueryChunkData, QueryChunkMeta, }; -use observability_deps::tracing::trace; use parquet_file::chunk::ParquetChunk; use predicate::{delete_predicate::tombstones_to_delete_predicates, Predicate}; use schema::{merge::SchemaMerger, selection::Selection, sort::SortKey, Schema}; -use snafu::{ResultExt, Snafu}; use std::{any::Any, sync::Arc}; use uuid::Uuid; -#[derive(Debug, Snafu)] -#[allow(missing_copy_implementations, missing_docs)] -pub enum Error { - #[snafu(display("Failed to read parquet: {}", source))] - ReadParquet { - source: parquet_file::storage::ReadError, - }, - - #[snafu(display( - "Error reading IOx Metadata from Parquet IoxParquetMetadata: {}", - source - ))] - ReadParquetMeta { - source: parquet_file::storage::ReadError, - }, -} - -/// A specialized `Error` for Compactor's query errors -pub type Result<T, E = Error> = std::result::Result<T, E>; - /// QueryableParquetChunk that implements QueryChunk and QueryMetaChunk for building query plan #[derive(Debug, Clone)] pub struct QueryableParquetChunk { @@ -213,33 +191,8 @@ impl QueryChunk for QueryableParquetChunk { Ok(None) } - /// Provides access to raw `QueryChunk` data as an - /// asynchronous stream of `RecordBatch`es filtered by a *required* - /// predicate. Note that not all chunks can evaluate all types of - /// predicates and this function will return an error - /// if requested to evaluate with a predicate that is not supported - /// - /// This is the analog of the `TableProvider` in DataFusion - /// - /// The reason we can't simply use the `TableProvider` trait - /// directly is that the data for a particular Table lives in - /// several chunks within a partition, so there needs to be an - /// implementation of `TableProvider` that stitches together the - /// streams from several different `QueryChunk`s. - fn read_filter( - &self, - mut ctx: IOxSessionContext, - predicate: &Predicate, - selection: Selection<'_>, - ) -> Result<SendableRecordBatchStream, DataFusionError> { - ctx.set_metadata("storage", "compactor"); - ctx.set_metadata("projection", format!("{}", selection)); - trace!(?selection, "selection"); - - self.data - .read_filter(predicate, selection, ctx.inner()) - .context(ReadParquetSnafu) - .map_err(|e| DataFusionError::External(Box::new(e))) + fn data(&self) -> QueryChunkData { + QueryChunkData::Parquet(self.data.parquet_exec_input()) } /// Returns chunk type diff --git a/ingester/src/query_adaptor.rs b/ingester/src/query_adaptor.rs index be16efaa78..5b5df0f256 100644 --- a/ingester/src/query_adaptor.rs +++ b/ingester/src/query_adaptor.rs @@ -6,24 +6,16 @@ use std::{any::Any, sync::Arc}; use arrow::record_batch::RecordBatch; use arrow_util::util::ensure_schema; use data_types::{ChunkId, ChunkOrder, DeletePredicate, PartitionId, TableSummary}; -use datafusion::{ - error::DataFusionError, - physical_plan::{ - common::SizedRecordBatchStream, - metrics::{ExecutionPlanMetricsSet, MemTrackingMetrics}, - SendableRecordBatchStream, - }, -}; +use datafusion::error::DataFusionError; use iox_query::{ exec::{stringset::StringSet, IOxSessionContext}, util::{compute_timenanosecond_min_max, create_basic_summary}, - QueryChunk, QueryChunkMeta, + QueryChunk, QueryChunkData, QueryChunkMeta, }; -use observability_deps::tracing::trace; use once_cell::sync::OnceCell; use predicate::Predicate; use schema::{merge::merge_record_batch_schemas, selection::Selection, sort::SortKey, Schema}; -use snafu::{ResultExt, Snafu}; +use snafu::Snafu; use crate::data::table::TableName; @@ -230,42 +222,15 @@ impl QueryChunk for QueryAdaptor { Ok(None) } - /// Provides access to raw `QueryChunk` data as an - /// asynchronous stream of `RecordBatch`es - fn read_filter( - &self, - mut ctx: IOxSessionContext, - _predicate: &Predicate, - selection: Selection<'_>, - ) -> Result<SendableRecordBatchStream, DataFusionError> { - ctx.set_metadata("storage", "ingester"); - ctx.set_metadata("projection", format!("{}", selection)); - trace!(?selection, "selection"); - - let schema = self - .schema() - .select(selection) - .context(SchemaSnafu) - .map_err(|e| DataFusionError::External(Box::new(e)))?; - - // Apply the projection over all the data in self, ensuring each batch - // has the specified schema. - let batches = self - .project_selection(selection) - .into_iter() - .map(|batch| { - ensure_schema(&schema.as_arrow(), &batch) - .context(ConcatBatchesSnafu {}) - .map(Arc::new) - }) - .collect::<Result<Vec<_>, _>>() - .map_err(|e| DataFusionError::External(Box::new(e)))?; - - // Return stream of data - let dummy_metrics = ExecutionPlanMetricsSet::new(); - let mem_metrics = MemTrackingMetrics::new(&dummy_metrics, 0); - let stream = SizedRecordBatchStream::new(schema.as_arrow(), batches, mem_metrics); - Ok(Box::pin(stream)) + fn data(&self) -> QueryChunkData { + let schema = self.schema().as_arrow(); + + QueryChunkData::RecordBatches( + self.data + .iter() + .map(|b| ensure_schema(&schema, b).expect("schema handling broken")) + .collect(), + ) } /// Returns chunk type diff --git a/iox_query/src/lib.rs b/iox_query/src/lib.rs index 94db6974ae..08cd02adb0 100644 --- a/iox_query/src/lib.rs +++ b/iox_query/src/lib.rs @@ -10,12 +10,14 @@ clippy::dbg_macro )] +use arrow::record_batch::RecordBatch; use async_trait::async_trait; use data_types::{ChunkId, ChunkOrder, DeletePredicate, InfluxDbType, PartitionId, TableSummary}; -use datafusion::{error::DataFusionError, physical_plan::SendableRecordBatchStream}; +use datafusion::{error::DataFusionError, prelude::SessionContext}; use exec::{stringset::StringSet, IOxSessionContext}; use hashbrown::HashMap; use observability_deps::tracing::{debug, trace}; +use parquet_file::storage::ParquetExecInput; use predicate::{rpc_predicate::QueryDatabaseMeta, Predicate, PredicateMatch}; use schema::{ selection::Selection, @@ -173,6 +175,37 @@ pub trait QueryDatabase: QueryDatabaseMeta + Debug + Send + Sync { fn as_meta(&self) -> &dyn QueryDatabaseMeta; } +/// Raw data of a [`QueryChunk`]. +#[derive(Debug)] +pub enum QueryChunkData { + /// In-memory record batches. + /// + /// **IMPORTANT: All batches MUST have the schema that the [chunk reports](QueryChunkMeta::schema).** + RecordBatches(Vec<RecordBatch>), + + /// Parquet file. + /// + /// See [`ParquetExecInput`] for details. + Parquet(ParquetExecInput), +} + +impl QueryChunkData { + /// Read data into [`RecordBatch`]es. This is mostly meant for testing! + pub async fn read_to_batches( + self, + schema: Arc<Schema>, + session_ctx: &SessionContext, + ) -> Vec<RecordBatch> { + match self { + Self::RecordBatches(batches) => batches, + Self::Parquet(exec_input) => exec_input + .read_to_batches(schema.as_arrow(), Selection::All, session_ctx) + .await + .unwrap(), + } + } +} + /// Collection of data that shares the same partition key pub trait QueryChunk: QueryChunkMeta + Debug + Send + Sync + 'static { /// returns the Id of this chunk. Ids are unique within a @@ -222,25 +255,10 @@ pub trait QueryChunk: QueryChunkMeta + Debug + Send + Sync + 'static { predicate: &Predicate, ) -> Result<Option<StringSet>, DataFusionError>; - /// Provides access to raw `QueryChunk` data as an - /// asynchronous stream of `RecordBatch`es filtered by a *required* - /// predicate. Note that not all chunks can evaluate all types of - /// predicates and this function will return an error - /// if requested to evaluate with a predicate that is not supported - /// - /// This is the analog of the `TableProvider` in DataFusion + /// Provides access to raw [`QueryChunk`] data. /// - /// The reason we can't simply use the `TableProvider` trait - /// directly is that the data for a particular Table lives in - /// several chunks within a partition, so there needs to be an - /// implementation of `TableProvider` that stitches together the - /// streams from several different `QueryChunk`s. - fn read_filter( - &self, - ctx: IOxSessionContext, - predicate: &Predicate, - selection: Selection<'_>, - ) -> Result<SendableRecordBatchStream, DataFusionError>; + /// The engine assume that minimal work shall be performed to gather the `QueryChunkData`. + fn data(&self) -> QueryChunkData; /// Returns chunk type. Useful in tests and debug logs. fn chunk_type(&self) -> &str; diff --git a/iox_query/src/provider/physical.rs b/iox_query/src/provider/physical.rs index e677fabcb5..384583ae47 100644 --- a/iox_query/src/provider/physical.rs +++ b/iox_query/src/provider/physical.rs @@ -1,22 +1,28 @@ //! Implementation of a DataFusion PhysicalPlan node across partition chunks use super::adapter::SchemaAdapterStream; -use crate::{exec::IOxSessionContext, QueryChunk}; +use crate::{exec::IOxSessionContext, QueryChunk, QueryChunkData}; use arrow::datatypes::SchemaRef; use data_types::TableSummary; use datafusion::{ + datasource::listing::PartitionedFile, error::DataFusionError, execution::context::TaskContext, physical_plan::{ + execute_stream, expressions::PhysicalSortExpr, + file_format::{FileScanConfig, ParquetExec}, + memory::MemoryStream, metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet}, + stream::RecordBatchStreamAdapter, DisplayFormatType, ExecutionPlan, Partitioning, SendableRecordBatchStream, Statistics, }, }; +use futures::TryStreamExt; use observability_deps::tracing::trace; use predicate::Predicate; -use schema::{selection::Selection, Schema}; -use std::{fmt, sync::Arc}; +use schema::Schema; +use std::{collections::HashSet, fmt, sync::Arc}; /// Implements the DataFusion physical plan interface #[derive(Debug)] @@ -104,16 +110,13 @@ impl ExecutionPlan for IOxReadFilterNode { fn execute( &self, partition: usize, - _context: Arc<TaskContext>, + context: Arc<TaskContext>, ) -> datafusion::error::Result<SendableRecordBatchStream> { trace!(partition, "Start IOxReadFilterNode::execute"); let baseline_metrics = BaselineMetrics::new(&self.metrics, partition); - let timer = baseline_metrics.elapsed_compute().timer(); let schema = self.schema(); - let fields = schema.fields(); - let selection_cols = fields.iter().map(|f| f.name() as &str).collect::<Vec<_>>(); let chunk = Arc::clone(&self.chunks[partition]); @@ -125,32 +128,79 @@ impl ExecutionPlan for IOxReadFilterNode { // restrict the requested selection to the actual columns // available, and use SchemaAdapterStream to pad the rest of // the columns with NULLs if necessary - let selection_cols = restrict_selection(selection_cols, &chunk_table_schema); - let selection = Selection::Some(&selection_cols); - - let stream = chunk - .read_filter( - self.ctx.child_ctx("chunk read_filter"), - &self.predicate, - selection, - ) - .map_err(|e| { - DataFusionError::Execution(format!( - "Error creating scan for table {} chunk {}: {}", - self.table_name, - chunk.id(), - e - )) - })?; - - // all CPU time is now done, pass in baseline metrics to adapter - timer.done(); - - let adapter = SchemaAdapterStream::try_new(stream, schema, baseline_metrics) - .map_err(|e| DataFusionError::Internal(e.to_string()))?; + let final_output_column_names: HashSet<_> = + schema.fields().iter().map(|f| f.name()).collect(); + let projection: Vec<_> = chunk_table_schema + .iter() + .enumerate() + .filter(|(_idx, (_t, field))| final_output_column_names.contains(field.name())) + .map(|(idx, _)| idx) + .collect(); + let projection = (!((projection.len() == chunk_table_schema.len()) + && (projection.iter().enumerate().all(|(a, b)| a == *b)))) + .then_some(projection); + let incomplete_output_schema = projection + .as_ref() + .map(|projection| { + Arc::new( + chunk_table_schema + .as_arrow() + .project(projection) + .expect("projection broken"), + ) + }) + .unwrap_or_else(|| chunk_table_schema.as_arrow()); + + let stream = match chunk.data() { + QueryChunkData::RecordBatches(batches) => { + let stream = Box::pin(MemoryStream::try_new( + batches, + incomplete_output_schema, + projection, + )?); + let adapter = SchemaAdapterStream::try_new(stream, schema, baseline_metrics) + .map_err(|e| DataFusionError::Internal(e.to_string()))?; + Box::pin(adapter) as SendableRecordBatchStream + } + QueryChunkData::Parquet(exec_input) => { + let base_config = FileScanConfig { + object_store_url: exec_input.object_store_url, + file_schema: Arc::clone(&schema), + file_groups: vec![vec![PartitionedFile { + object_meta: exec_input.object_meta, + partition_values: vec![], + range: None, + extensions: None, + }]], + statistics: Statistics::default(), + projection: None, + limit: None, + table_partition_cols: vec![], + config_options: context.session_config().config_options(), + }; + let delete_predicates: Vec<_> = chunk + .delete_predicates() + .iter() + .map(|pred| Arc::new(pred.as_ref().clone().into())) + .collect(); + let predicate = self + .predicate + .clone() + .with_delete_predicates(&delete_predicates); + let exec = ParquetExec::new(base_config, predicate.filter_expr(), None); + let stream = RecordBatchStreamAdapter::new( + schema, + futures::stream::once(execute_stream(Arc::new(exec), context)).try_flatten(), + ); + + // Note: No SchemaAdapterStream required here because `ParquetExec` already creates NULL columns for us. + + Box::pin(stream) + } + }; trace!(partition, "End IOxReadFilterNode::execute"); - Ok(Box::pin(adapter)) + Ok(stream) } fn fmt_as(&self, t: DisplayFormatType, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -190,17 +240,3 @@ impl ExecutionPlan for IOxReadFilterNode { .unwrap_or_default() } } - -/// Removes any columns that are not present in schema, returning a possibly -/// restricted set of columns -fn restrict_selection<'a>( - selection_cols: Vec<&'a str>, - chunk_table_schema: &'a Schema, -) -> Vec<&'a str> { - let arrow_schema = chunk_table_schema.as_arrow(); - - selection_cols - .into_iter() - .filter(|col| arrow_schema.fields().iter().any(|f| f.name() == col)) - .collect() -} diff --git a/iox_query/src/test.rs b/iox_query/src/test.rs index 1a5bf26681..07cf132608 100644 --- a/iox_query/src/test.rs +++ b/iox_query/src/test.rs @@ -8,15 +8,14 @@ use crate::{ stringset::{StringSet, StringSetRef}, ExecutionContextProvider, Executor, ExecutorType, IOxSessionContext, }, - Predicate, PredicateMatch, QueryChunk, QueryChunkMeta, QueryCompletedToken, QueryDatabase, - QueryText, + Predicate, PredicateMatch, QueryChunk, QueryChunkData, QueryChunkMeta, QueryCompletedToken, + QueryDatabase, QueryText, }; use arrow::{ array::{ ArrayRef, DictionaryArray, Int64Array, StringArray, TimestampNanosecondArray, UInt64Array, }, datatypes::{DataType, Int32Type, TimeUnit}, - error::ArrowError, record_batch::RecordBatch, }; use async_trait::async_trait; @@ -24,9 +23,7 @@ use data_types::{ ChunkId, ChunkOrder, ColumnSummary, DeletePredicate, InfluxDbType, PartitionId, StatValues, Statistics, TableSummary, }; -use datafusion::{error::DataFusionError, physical_plan::SendableRecordBatchStream}; -use datafusion_util::stream_from_batches; -use futures::StreamExt; +use datafusion::error::DataFusionError; use hashbrown::HashSet; use observability_deps::tracing::debug; use parking_lot::Mutex; @@ -949,34 +946,8 @@ impl QueryChunk for TestChunk { self.may_contain_pk_duplicates } - fn read_filter( - &self, - _ctx: IOxSessionContext, - predicate: &Predicate, - selection: Selection<'_>, - ) -> Result<SendableRecordBatchStream, DataFusionError> { - self.check_error()?; - - // save the predicate - self.predicates.lock().push(predicate.clone()); - - let batches = match self - .schema - .df_projection(selection) - .map_err(|e| DataFusionError::External(Box::new(e)))? - { - None => self.table_data.clone(), - Some(projection) => self - .table_data - .iter() - .map(|batch| { - let batch = batch.project(&projection)?; - Ok(Arc::new(batch)) - }) - .collect::<std::result::Result<Vec<_>, ArrowError>>()?, - }; - - Ok(stream_from_batches(self.schema().as_arrow(), batches)) + fn data(&self) -> QueryChunkData { + QueryChunkData::RecordBatches(self.table_data.iter().map(|b| b.as_ref().clone()).collect()) } fn chunk_type(&self) -> &str { @@ -1071,17 +1042,10 @@ impl QueryChunkMeta for TestChunk { /// Return the raw data from the list of chunks pub async fn raw_data(chunks: &[Arc<dyn QueryChunk>]) -> Vec<RecordBatch> { + let ctx = IOxSessionContext::with_testing(); let mut batches = vec![]; for c in chunks { - let pred = Predicate::default(); - let selection = Selection::All; - let mut stream = c - .read_filter(IOxSessionContext::with_testing(), &pred, selection) - .expect("Error in read_filter"); - while let Some(b) = stream.next().await { - let b = b.expect("Error in stream"); - batches.push(b) - } + batches.append(&mut c.data().read_to_batches(c.schema(), ctx.inner()).await); } batches } diff --git a/iox_tests/src/util.rs b/iox_tests/src/util.rs index d6958cbe79..2d26075c1b 100644 --- a/iox_tests/src/util.rs +++ b/iox_tests/src/util.rs @@ -30,7 +30,6 @@ use parquet_file::{ metadata::IoxMetadata, storage::{ParquetStorage, StorageId}, }; -use predicate::Predicate; use schema::{ selection::Selection, sort::{adjust_sort_key_columns, compute_sort_key, SortKey}, @@ -389,14 +388,13 @@ impl TestTable { Arc::new(schema), self.catalog.parquet_store.clone(), ); - let rx = chunk - .read_filter( - &Predicate::default(), + chunk + .parquet_exec_input() + .read_to_batches( + chunk.schema().as_arrow(), Selection::All, &chunk.store().test_df_context(), ) - .unwrap(); - datafusion::physical_plan::common::collect(rx) .await .unwrap() } diff --git a/parquet_file/src/chunk.rs b/parquet_file/src/chunk.rs index cf3b8488fc..22e04ef780 100644 --- a/parquet_file/src/chunk.rs +++ b/parquet_file/src/chunk.rs @@ -1,10 +1,11 @@ //! A metadata summary of a Parquet file in object storage, with the ability to //! download & execute a scan. -use crate::{storage::ParquetStorage, ParquetFilePath}; +use crate::{ + storage::{ParquetExecInput, ParquetStorage}, + ParquetFilePath, +}; use data_types::{ParquetFile, TimestampMinMax}; -use datafusion::{physical_plan::SendableRecordBatchStream, prelude::SessionContext}; -use predicate::Predicate; use schema::{selection::Selection, Schema}; use std::{collections::BTreeSet, mem, sync::Arc}; use uuid::Uuid; @@ -78,21 +79,14 @@ impl ParquetChunk { } /// Return stream of data read from parquet file - pub fn read_filter( - &self, - predicate: &Predicate, - selection: Selection<'_>, - session_ctx: &SessionContext, - ) -> Result<SendableRecordBatchStream, crate::storage::ReadError> { + /// Inputs for [`ParquetExec`]. + /// + /// See [`ParquetExecInput`] for more information. + /// + /// [`ParquetExec`]: datafusion::physical_plan::file_format::ParquetExec + pub fn parquet_exec_input(&self) -> ParquetExecInput { let path: ParquetFilePath = self.parquet_file.as_ref().into(); - self.store.read_filter( - predicate, - selection, - Arc::clone(&self.schema.as_arrow()), - &path, - self.file_size_bytes(), - session_ctx, - ) + self.store.parquet_exec_input(&path, self.file_size_bytes()) } /// The total number of rows in all row groups in this chunk. diff --git a/parquet_file/src/storage.rs b/parquet_file/src/storage.rs index f4f3b7c6b9..0f245101f0 100644 --- a/parquet_file/src/storage.rs +++ b/parquet_file/src/storage.rs @@ -6,27 +6,26 @@ use crate::{ serialize::{self, CodecError}, ParquetFilePath, }; -use arrow::datatypes::{Field, SchemaRef}; +use arrow::{ + datatypes::{Field, SchemaRef}, + record_batch::RecordBatch, +}; use bytes::Bytes; use datafusion::{ datasource::{listing::PartitionedFile, object_store::ObjectStoreUrl}, + error::DataFusionError, execution::context::TaskContext, physical_plan::{ - execute_stream, file_format::{FileScanConfig, ParquetExec}, - stream::RecordBatchStreamAdapter, - SendableRecordBatchStream, Statistics, + ExecutionPlan, SendableRecordBatchStream, Statistics, }, prelude::SessionContext, }; use datafusion_util::config::iox_session_config; -use futures::TryStreamExt; use object_store::{DynObjectStore, ObjectMeta}; use observability_deps::tracing::*; -use predicate::Predicate; use schema::selection::{select_schema, Selection}; use std::{ - num::TryFromIntError, sync::Arc, time::{Duration, Instant}, }; @@ -53,38 +52,6 @@ pub enum UploadError { Upload(#[from] object_store::Error), } -/// Errors during Parquet file download & scan. -#[derive(Debug, Error)] -#[allow(clippy::large_enum_variant)] -pub enum ReadError { - /// Error writing the bytes fetched from object store to the temporary - /// parquet file on disk. - #[error("i/o error writing downloaded parquet: {0}")] - IO(#[from] std::io::Error), - - /// An error fetching Parquet file bytes from object store. - #[error("failed to read data from object store: {0}")] - ObjectStore(#[from] object_store::Error), - - /// An error reading the downloaded Parquet file. - #[error("invalid parquet file: {0}")] - Parquet(#[from] parquet::errors::ParquetError), - - /// Schema mismatch - #[error("Schema mismatch (expected VS actual parquet file) for file '{path}': {source}")] - SchemaMismatch { - /// Path of the affected parquet file. - path: object_store::path::Path, - - /// Source error - source: ProjectionError, - }, - - /// Malformed integer data for row count - #[error("Malformed row count integer")] - MalformedRowCount(#[from] TryFromIntError), -} - /// ID for an object store hooked up into DataFusion. #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] pub struct StorageId(&'static str); @@ -107,6 +74,68 @@ impl std::fmt::Display for StorageId { } } +/// Inputs required to build a [`ParquetExec`] for one or multiple files. +/// +/// The files shall be grouped by [`object_store_url`](Self::object_store_url). For each each object store, you shall +/// create one [`ParquetExec`] and put each file into its own "file group". +/// +/// [`ParquetExec`]: datafusion::physical_plan::file_format::ParquetExec +#[derive(Debug)] +pub struct ParquetExecInput { + /// Store where the file is located. + pub object_store_url: ObjectStoreUrl, + + /// Object metadata. + pub object_meta: ObjectMeta, +} + +impl ParquetExecInput { + /// Read parquet file into [`RecordBatch`]es. + /// + /// This should only be used for testing purposes. + pub async fn read_to_batches( + &self, + schema: SchemaRef, + selection: Selection<'_>, + session_ctx: &SessionContext, + ) -> Result<Vec<RecordBatch>, DataFusionError> { + // Compute final (output) schema after selection + let schema = Arc::new( + select_schema(selection, &schema) + .as_ref() + .clone() + .with_metadata(Default::default()), + ); + + let base_config = FileScanConfig { + object_store_url: self.object_store_url.clone(), + file_schema: schema, + file_groups: vec![vec![PartitionedFile { + object_meta: self.object_meta.clone(), + partition_values: vec![], + range: None, + extensions: None, + }]], + statistics: Statistics::default(), + projection: None, + limit: None, + table_partition_cols: vec![], + // TODO avoid this `copied_config` when config_options are directly available on context + config_options: session_ctx.copied_config().config_options(), + }; + let exec = ParquetExec::new(base_config, None, None); + let exec_schema = exec.schema(); + datafusion::physical_plan::collect(Arc::new(exec), session_ctx.task_ctx()) + .await + .map(|batches| { + for batch in &batches { + assert_eq!(batch.schema(), exec_schema); + } + batches + }) + } +} + /// The [`ParquetStorage`] type encapsulates [`RecordBatch`] persistence to an /// underlying [`ObjectStore`]. /// @@ -220,72 +249,22 @@ impl ParquetStorage { Ok((parquet_meta, file_size)) } - /// Pull the Parquet-encoded [`RecordBatch`] at the file path derived from - /// the provided [`ParquetFilePath`]. - /// - /// The `selection` projection is pushed down to the Parquet deserializer. + /// Inputs for [`ParquetExec`]. /// - /// This impl fetches the associated Parquet file bytes from object storage, - /// temporarily persisting them to a local temp file to feed to the arrow - /// reader. + /// See [`ParquetExecInput`] for more information. /// - /// No caching is performed by `read_filter()`, and each call to - /// `read_filter()` will re-download the parquet file unless the underlying - /// object store impl caches the fetched bytes. - /// - /// [`RecordBatch`]: arrow::record_batch::RecordBatch - pub fn read_filter( - &self, - predicate: &Predicate, - selection: Selection<'_>, - schema: SchemaRef, - path: &ParquetFilePath, - file_size: usize, - session_ctx: &SessionContext, - ) -> Result<SendableRecordBatchStream, ReadError> { - let path = path.object_store_path(); - trace!(path=?path, "fetching parquet data for filtered read"); - - // Compute final (output) schema after selection - let schema = Arc::new( - select_schema(selection, &schema) - .as_ref() - .clone() - .with_metadata(Default::default()), - ); - - // create ParquetExec node - let object_meta = ObjectMeta { - location: path, - // we don't care about the "last modified" field - last_modified: Default::default(), - size: file_size, - }; - let expr = predicate.filter_expr(); - let base_config = FileScanConfig { + /// [`ParquetExec`]: datafusion::physical_plan::file_format::ParquetExec + pub fn parquet_exec_input(&self, path: &ParquetFilePath, file_size: usize) -> ParquetExecInput { + ParquetExecInput { object_store_url: ObjectStoreUrl::parse(format!("iox://{}/", self.id)) .expect("valid object store URL"), - file_schema: Arc::clone(&schema), - file_groups: vec![vec![PartitionedFile { - object_meta, - partition_values: vec![], - range: None, - extensions: None, - }]], - statistics: Statistics::default(), - projection: None, - limit: None, - table_partition_cols: vec![], - // TODO avoid this `copied_config` when config_options are directly available on context - config_options: session_ctx.copied_config().config_options(), - }; - let exec = ParquetExec::new(base_config, expr, None); - - Ok(Box::pin(RecordBatchStreamAdapter::new( - Arc::clone(&schema), - futures::stream::once(execute_stream(Arc::new(exec), session_ctx.task_ctx())) - .try_flatten(), - ))) + object_meta: ObjectMeta { + location: path.object_store_path(), + // we don't care about the "last modified" field + last_modified: Default::default(), + size: file_size, + }, + } } } @@ -598,24 +577,13 @@ mod tests { file_size: usize, ) -> Result<RecordBatch, DataFusionError> { let path: ParquetFilePath = meta.into(); - let rx = store - .read_filter( - &Predicate::default(), - selection, - expected_schema, - &path, - file_size, - &store.test_df_context(), - ) - .expect("should read record batches from object store"); - let schema = rx.schema(); - datafusion::physical_plan::common::collect(rx) + store + .parquet_exec_input(&path, file_size) + .read_to_batches(expected_schema, selection, &store.test_df_context()) .await .map(|mut batches| { assert_eq!(batches.len(), 1); - let batch = batches.remove(0); - assert_eq!(batch.schema(), schema); - batch + batches.remove(0) }) } diff --git a/querier/src/chunk/mod.rs b/querier/src/chunk/mod.rs index 8b8e73272e..03a2e89082 100644 --- a/querier/src/chunk/mod.rs +++ b/querier/src/chunk/mod.rs @@ -340,14 +340,13 @@ pub mod tests { use arrow::{datatypes::DataType, record_batch::RecordBatch}; use arrow_util::assert_batches_eq; use data_types::{ColumnType, NamespaceSchema}; - use futures::StreamExt; use iox_query::{ exec::{ExecutorType, IOxSessionContext}, QueryChunk, QueryChunkMeta, }; use iox_tests::util::{TestCatalog, TestNamespace, TestParquetFileBuilder}; use metric::{Attributes, Observation, RawReporter}; - use schema::{builder::SchemaBuilder, selection::Selection, sort::SortKeyBuilder}; + use schema::{builder::SchemaBuilder, sort::SortKeyBuilder}; use test_helpers::maybe_start_logging; use tokio::runtime::Handle; @@ -397,13 +396,9 @@ pub mod tests { ctx: IOxSessionContext, ) -> Vec<RecordBatch> { chunk - .read_filter(ctx, &Default::default(), Selection::All) - .unwrap() - .collect::<Vec<_>>() + .data() + .read_to_batches(chunk.schema(), ctx.inner()) .await - .into_iter() - .map(Result::unwrap) - .collect() } struct TestData { diff --git a/querier/src/chunk/query_access.rs b/querier/src/chunk/query_access.rs index bcc9f29b8d..3b2765f929 100644 --- a/querier/src/chunk/query_access.rs +++ b/querier/src/chunk/query_access.rs @@ -1,35 +1,13 @@ use crate::chunk::QuerierChunk; use data_types::{ChunkId, ChunkOrder, DeletePredicate, PartitionId, TableSummary}; -use datafusion::{error::DataFusionError, physical_plan::SendableRecordBatchStream}; +use datafusion::error::DataFusionError; use iox_query::{ exec::{stringset::StringSet, IOxSessionContext}, - QueryChunk, QueryChunkMeta, + QueryChunk, QueryChunkData, QueryChunkMeta, }; -use observability_deps::tracing::debug; use predicate::Predicate; use schema::{selection::Selection, sort::SortKey, Schema}; -use snafu::{ResultExt, Snafu}; use std::{any::Any, sync::Arc}; -use trace::span::SpanRecorder; - -#[derive(Debug, Snafu)] -pub enum Error { - #[snafu(display("Parquet File Error in chunk {}: {}", chunk_id, source))] - ParquetFileChunk { - source: Box<parquet_file::storage::ReadError>, - chunk_id: ChunkId, - }, - - #[snafu(display( - "Could not find column name '{}' in read buffer column_values results for chunk {}", - column_name, - chunk_id, - ))] - ColumnNameNotFound { - column_name: String, - chunk_id: ChunkId, - }, -} impl QueryChunkMeta for QuerierChunk { fn summary(&self) -> Arc<TableSummary> { @@ -103,42 +81,8 @@ impl QueryChunk for QuerierChunk { Ok(None) } - fn read_filter( - &self, - mut ctx: IOxSessionContext, - predicate: &Predicate, - selection: Selection<'_>, - ) -> Result<SendableRecordBatchStream, DataFusionError> { - let span_recorder = SpanRecorder::new( - ctx.span() - .map(|span| span.child("QuerierChunk::read_filter")), - ); - let delete_predicates: Vec<_> = self - .delete_predicates() - .iter() - .map(|pred| Arc::new(pred.as_ref().clone().into())) - .collect(); - ctx.set_metadata("delete_predicates", delete_predicates.len() as i64); - - // merge the negated delete predicates into the select predicate - let pred_with_deleted_exprs = predicate.clone().with_delete_predicates(&delete_predicates); - debug!(?pred_with_deleted_exprs, "Merged negated predicate"); - - ctx.set_metadata("predicate", format!("{}", &pred_with_deleted_exprs)); - ctx.set_metadata("projection", format!("{}", selection)); - ctx.set_metadata("storage", "parquet"); - - let chunk_id = self.id(); - debug!(?predicate, "parquet read_filter"); - - // TODO(marco): propagate span all the way down to the object store cache access - let _span_recorder = span_recorder; - - self.parquet_chunk - .read_filter(&pred_with_deleted_exprs, selection, ctx.inner()) - .map_err(Box::new) - .context(ParquetFileChunkSnafu { chunk_id }) - .map_err(|e| DataFusionError::External(Box::new(e))) + fn data(&self) -> QueryChunkData { + QueryChunkData::Parquet(self.parquet_chunk.parquet_exec_input()) } fn chunk_type(&self) -> &str { diff --git a/querier/src/ingester/mod.rs b/querier/src/ingester/mod.rs index e655d6436f..0ab1cd04cf 100644 --- a/querier/src/ingester/mod.rs +++ b/querier/src/ingester/mod.rs @@ -12,7 +12,6 @@ use data_types::{ TableSummary, TimestampMinMax, }; use datafusion::error::DataFusionError; -use datafusion_util::MemoryStream; use futures::{stream::FuturesUnordered, TryStreamExt}; use generated_types::{ influxdata::iox::ingester::v1::GetWriteInfoResponse, @@ -25,7 +24,7 @@ use influxdb_iox_client::flight::{ use iox_query::{ exec::{stringset::StringSet, IOxSessionContext}, util::{compute_timenanosecond_min_max, create_basic_summary}, - QueryChunk, QueryChunkMeta, + QueryChunk, QueryChunkData, QueryChunkMeta, }; use iox_time::{Time, TimeProvider}; use metric::{DurationHistogram, Metric}; @@ -1111,30 +1110,8 @@ impl QueryChunk for IngesterChunk { Ok(None) } - fn read_filter( - &self, - _ctx: IOxSessionContext, - predicate: &Predicate, - selection: Selection<'_>, - ) -> Result<datafusion::physical_plan::SendableRecordBatchStream, DataFusionError> { - trace!(?predicate, ?selection, input_batches=?self.batches, "Reading data"); - - // Apply selection to in-memory batch - let batches = match self - .schema - .df_projection(selection) - .map_err(|e| DataFusionError::External(Box::new(e)))? - { - None => self.batches.clone(), - Some(projection) => self - .batches - .iter() - .map(|batch| batch.project(&projection)) - .collect::<std::result::Result<Vec<_>, ArrowError>>()?, - }; - trace!(?predicate, ?selection, output_batches=?batches, input_batches=?self.batches, "Reading data"); - - Ok(Box::pin(MemoryStream::new(batches))) + fn data(&self) -> QueryChunkData { + QueryChunkData::RecordBatches(self.batches.clone()) } fn chunk_type(&self) -> &str { diff --git a/querier/src/ingester/test_util.rs b/querier/src/ingester/test_util.rs index 39ae18531d..8328ef2c79 100644 --- a/querier/src/ingester/test_util.rs +++ b/querier/src/ingester/test_util.rs @@ -1,10 +1,8 @@ use super::IngesterConnection; -use arrow::record_batch::RecordBatch; use async_trait::async_trait; use data_types::ShardIndex; -use futures::StreamExt; use generated_types::influxdata::iox::ingester::v1::GetWriteInfoResponse; -use iox_query::{exec::IOxSessionContext, util::create_basic_summary, QueryChunk}; +use iox_query::util::create_basic_summary; use parking_lot::Mutex; use schema::selection::Selection; use schema::Schema as IOxSchema; @@ -38,7 +36,7 @@ impl IngesterConnection for MockIngesterConnection { _namespace_name: Arc<str>, _table_name: Arc<str>, columns: Vec<String>, - predicate: &predicate::Predicate, + _predicate: &predicate::Predicate, _expected_schema: Arc<schema::Schema>, _span: Option<Span>, ) -> super::Result<Vec<super::IngesterPartition>> { @@ -77,14 +75,14 @@ impl IngesterConnection for MockIngesterConnection { .chunks .into_iter() .map(|ic| async move { - let mut batches: Vec<RecordBatch> = vec![]; - let mut stream = ic - .read_filter(IOxSessionContext::with_testing(), predicate, selection) - .expect("Error in read_filter"); - while let Some(b) = stream.next().await { - let b = b.expect("Error in stream"); - batches.push(b) - } + let batches: Vec<_> = ic + .batches + .iter() + .map(|batch| match ic.schema.df_projection(selection).unwrap() { + Some(projection) => batch.project(&projection).unwrap(), + None => batch.clone(), + }) + .collect(); assert!(!batches.is_empty(), "Error: empty batches"); let new_schema = IOxSchema::try_from(batches[0].schema()).unwrap(); diff --git a/querier/src/table/mod.rs b/querier/src/table/mod.rs index 6d7d6b4335..e049335eaa 100644 --- a/querier/src/table/mod.rs +++ b/querier/src/table/mod.rs @@ -520,7 +520,7 @@ mod tests { use iox_query::exec::IOxSessionContext; use iox_tests::util::{TestCatalog, TestParquetFileBuilder, TestTable}; use predicate::Predicate; - use schema::{builder::SchemaBuilder, selection::Selection, InfluxFieldType}; + use schema::{builder::SchemaBuilder, InfluxFieldType}; use std::sync::Arc; use test_helpers::maybe_start_logging; use trace::{span::SpanStatus, RingBufferTraceCollector}; @@ -711,8 +711,8 @@ mod tests { .await .unwrap(); assert_eq!(chunks.len(), 1); - let chunk = &chunks[0]; + assert_eq!(chunk.chunk_type(), "IngesterPartition"); // verify chunk schema let schema = chunk.schema(); @@ -739,17 +739,9 @@ mod tests { // verify chunk data let batches = chunk - .read_filter( - IOxSessionContext::with_testing(), - &Default::default(), - Selection::All, - ) - .unwrap() - .collect::<Vec<_>>() - .await - .into_iter() - .map(Result::unwrap) - .collect::<Vec<_>>(); + .data() + .read_to_batches(chunk.schema(), IOxSessionContext::with_testing().inner()) + .await; let expected = vec![ "+-----+------+------+--------------------------------+", "| foo | tag1 | tag2 | time |",
c834ec171fd80844ca52d6723dff2b9803e5adc1
Fraser Savage
2023-07-24 13:07:04
Custom partition template API create using `time` tag value is rejected
This removes the double negative from the error message and adds coverage at the router's gRPC API level for the rejection of the bad TagValue value.
null
test(router): Custom partition template API create using `time` tag value is rejected This removes the double negative from the error message and adds coverage at the router's gRPC API level for the rejection of the bad TagValue value.
diff --git a/data_types/src/partition_template.rs b/data_types/src/partition_template.rs index 97e5965a82..40a496c0d3 100644 --- a/data_types/src/partition_template.rs +++ b/data_types/src/partition_template.rs @@ -451,7 +451,7 @@ mod serialization { if value.contains(TAG_VALUE_KEY_TIME) { return Err(ValidationError::InvalidTagValue(format!( - "{TAG_VALUE_KEY_TIME} cannot not be used" + "{TAG_VALUE_KEY_TIME} cannot be used" ))); } } diff --git a/router/tests/grpc.rs b/router/tests/grpc.rs index e7057282ca..3e3e5477f9 100644 --- a/router/tests/grpc.rs +++ b/router/tests/grpc.rs @@ -981,6 +981,38 @@ async fn test_invalid_strftime_partition_template() { ); } +#[tokio::test] +async fn test_invalid_tag_value_partition_template() { + // Initialise a TestContext without a namespace autocreation policy. + let ctx = TestContextBuilder::default().build().await; + + // Explicitly create a namespace with a custom partition template. + let req = CreateNamespaceRequest { + name: "bananas_test".to_string(), + retention_period_ns: None, + partition_template: Some(PartitionTemplate { + parts: vec![TemplatePart { + part: Some(template_part::Part::TagValue("time".into())), + }], + }), + service_protection_limits: None, + }; + + // Check namespace creation returned an error + let got = ctx + .grpc_delegate() + .namespace_service() + .create_namespace(Request::new(req)) + .await; + + assert_error!( + got, + ref status + if status.code() == Code::InvalidArgument + && status.message() == "invalid tag value in partition template: time cannot be used" + ); +} + #[tokio::test] async fn test_namespace_partition_template_implicit_table_creation() { // Initialise a TestContext without a namespace autocreation policy.
4c54d100981423a8f8f70e085171578cb6963117
Fraser Savage
2023-07-13 12:20:58
Simplify test code and reference actor handle passing
As pointed out, use of the turbofish for the `MockPersistQueue` default constructor can be avoided by a specialised `Default` implementation on the type. The WAL reference actor handle is internally refcounted, so this commit also stops wrapping it in an `Arc`.
Co-authored-by: Dom <[email protected]>
refactor(ingester): Simplify test code and reference actor handle passing As pointed out, use of the turbofish for the `MockPersistQueue` default constructor can be avoided by a specialised `Default` implementation on the type. The WAL reference actor handle is internally refcounted, so this commit also stops wrapping it in an `Arc`. Co-authored-by: Dom <[email protected]>
diff --git a/ingester/benches/wal.rs b/ingester/benches/wal.rs index c383615a19..d8eb0fbc3e 100644 --- a/ingester/benches/wal.rs +++ b/ingester/benches/wal.rs @@ -8,7 +8,7 @@ use generated_types::influxdata::{ }; use ingester::internal_implementation_details::{ encode::encode_write_op, - queue::{MockPersistQueue, NopObserver}, + queue::MockPersistQueue, write::{ PartitionedData as PayloadPartitionedData, TableData as PayloadTableData, WriteOperation, }, @@ -63,7 +63,7 @@ fn wal_replay_bench(c: &mut Criterion) { // overhead. let sink = NopSink::default(); - let persist = MockPersistQueue::<NopObserver>::default(); + let persist = MockPersistQueue::default(); // Replay the wal into the NOP. ingester::replay(&wal, &sink, Arc::new(persist), &metric::Registry::default()) diff --git a/ingester/src/init.rs b/ingester/src/init.rs index dfc5b64d8f..e03eae4c02 100644 --- a/ingester/src/init.rs +++ b/ingester/src/init.rs @@ -294,7 +294,6 @@ where // Prepare the WAL segment reference tracker let (wal_reference_handle, wal_reference_actor) = WalReferenceHandle::new(Arc::clone(&wal), &metrics); - let wal_reference_handle = Arc::new(wal_reference_handle); // Spawn the persist workers to compact partition data, convert it into // Parquet files, and upload them to object storage. @@ -308,7 +307,7 @@ where // Register a post-persistence observer that emits Parquet file // attributes as metrics, and notifies the WAL segment reference tracker of // completed persist actions. - ParquetFileInstrumentation::new(Arc::clone(&wal_reference_handle), &metrics), + ParquetFileInstrumentation::new(wal_reference_handle.clone(), &metrics), &metrics, ); let persist_handle = Arc::new(persist_handle); @@ -337,6 +336,8 @@ where )); // Start the WAL reference actor and then replay the WAL log files, if any. + // The tokio handle does not need retained here as the actor handle is + // responsible for aborting the actor's run loop when dropped. tokio::spawn(wal_reference_actor.run()); let max_sequence_number = wal_replay::replay(&wal, &buffer, Arc::clone(&persist_handle), &metrics) @@ -355,7 +356,7 @@ where &metrics, ), Arc::clone(&wal), - Arc::clone(&wal_reference_handle), + wal_reference_handle.clone(), ), "wal", ), @@ -376,7 +377,7 @@ where let rotation_task = tokio::spawn(periodic_rotation( Arc::clone(&wal), wal_rotation_period, - Arc::clone(&wal_reference_handle), + wal_reference_handle.clone(), Arc::clone(&buffer), Arc::clone(&persist_handle), )); @@ -399,7 +400,7 @@ where Arc::clone(&buffer), Arc::clone(&persist_handle), Arc::clone(&wal), - Arc::clone(&wal_reference_handle), + wal_reference_handle, )); Ok(IngesterGuard { diff --git a/ingester/src/init/graceful_shutdown.rs b/ingester/src/init/graceful_shutdown.rs index c63014330d..94902f6941 100644 --- a/ingester/src/init/graceful_shutdown.rs +++ b/ingester/src/init/graceful_shutdown.rs @@ -41,7 +41,7 @@ pub(super) async fn graceful_shutdown_handler<F, T, P>( buffer: T, persist: P, wal: Arc<wal::Wal>, - wal_reference_handle: Arc<WalReferenceHandle>, + wal_reference_handle: WalReferenceHandle, ) where F: Future<Output = CancellationToken> + Send, T: PartitionIter + Sync, @@ -170,10 +170,9 @@ mod tests { let (_tempdir, wal) = new_wal().await; let (wal_reference_handle, wal_reference_actor) = WalReferenceHandle::new(Arc::clone(&wal), &metric::Registry::default()); - let wal_reference_handle = Arc::new(wal_reference_handle); - let persist = Arc::new(MockPersistQueue::new_with_observer(Arc::clone( - &wal_reference_handle, - ))); + let persist = Arc::new(MockPersistQueue::new_with_observer( + wal_reference_handle.clone(), + )); tokio::spawn(wal_reference_actor.run()); // Ensure there is always more than 1 segment in the test, but notify the ref tracker. @@ -193,7 +192,7 @@ mod tests { vec![Arc::clone(&partition)], Arc::clone(&persist), Arc::clone(&wal), - Arc::clone(&wal_reference_handle), + wal_reference_handle, ) .await; @@ -220,10 +219,9 @@ mod tests { let (_tempdir, wal) = new_wal().await; let (wal_reference_handle, wal_reference_actor) = WalReferenceHandle::new(Arc::clone(&wal), &metric::Registry::default()); - let wal_reference_handle = Arc::new(wal_reference_handle); - let persist = Arc::new(MockPersistQueue::new_with_observer(Arc::clone( - &wal_reference_handle, - ))); + let persist = Arc::new(MockPersistQueue::new_with_observer( + wal_reference_handle.clone(), + )); tokio::spawn(wal_reference_actor.run()); // Ensure there is always more than 1 segment in the test, but notify the ref tracker. @@ -251,7 +249,7 @@ mod tests { vec![Arc::clone(&partition)], Arc::clone(&persist), Arc::clone(&wal), - Arc::clone(&wal_reference_handle), + wal_reference_handle, )); // Wait a small duration of time for the first buffer emptiness check to @@ -336,10 +334,9 @@ mod tests { let (_tempdir, wal) = new_wal().await; let (wal_reference_handle, wal_reference_actor) = WalReferenceHandle::new(Arc::clone(&wal), &metric::Registry::default()); - let wal_reference_handle = Arc::new(wal_reference_handle); - let persist = Arc::new(MockPersistQueue::new_with_observer(Arc::clone( - &wal_reference_handle, - ))); + let persist = Arc::new(MockPersistQueue::new_with_observer( + wal_reference_handle.clone(), + )); tokio::spawn(wal_reference_actor.run()); // Ensure there is always more than 1 segment in the test, but notify the ref tracker. @@ -364,7 +361,7 @@ mod tests { Arc::clone(&buffer), Arc::clone(&persist), Arc::clone(&wal), - Arc::clone(&wal_reference_handle), + wal_reference_handle.clone(), )); // Wait for the shutdown to complete. diff --git a/ingester/src/init/wal_replay.rs b/ingester/src/init/wal_replay.rs index 5cf7540e4e..4bb0c57a38 100644 --- a/ingester/src/init/wal_replay.rs +++ b/ingester/src/init/wal_replay.rs @@ -279,7 +279,7 @@ mod tests { buffer_tree::partition::PartitionData, dml_payload::IngestOp, dml_sink::mock_sink::MockDmlSink, - persist::{completion_observer::NopObserver, queue::mock::MockPersistQueue}, + persist::queue::mock::MockPersistQueue, test_util::{ assert_write_ops_eq, make_multi_table_write_op, make_write_op, PartitionDataBuilder, ARBITRARY_NAMESPACE_ID, ARBITRARY_PARTITION_ID, ARBITRARY_PARTITION_KEY, @@ -417,7 +417,7 @@ mod tests { assert_eq!(wal.closed_segments().len(), 2); // Initialise the mock persist system - let persist = Arc::new(MockPersistQueue::<NopObserver>::default()); + let persist = Arc::new(MockPersistQueue::default()); // Replay the results into a mock to capture the DmlWrites and returns // some dummy partitions when iterated over. diff --git a/ingester/src/persist/hot_partitions.rs b/ingester/src/persist/hot_partitions.rs index cfc5976cd6..06cc633c72 100644 --- a/ingester/src/persist/hot_partitions.rs +++ b/ingester/src/persist/hot_partitions.rs @@ -109,7 +109,7 @@ mod tests { use parking_lot::Mutex; use crate::{ - persist::{completion_observer::NopObserver, queue::mock::MockPersistQueue}, + persist::queue::mock::MockPersistQueue, test_util::{PartitionDataBuilder, ARBITRARY_TABLE_NAME}, }; @@ -131,7 +131,7 @@ mod tests { let p = Arc::new(Mutex::new(p)); let metrics = metric::Registry::default(); - let persist_handle = Arc::new(MockPersistQueue::<NopObserver>::default()); + let persist_handle = Arc::new(MockPersistQueue::default()); let hot_partition_persister = HotPartitionPersister::new(Arc::clone(&persist_handle), max_cost, &metrics); diff --git a/ingester/src/persist/queue.rs b/ingester/src/persist/queue.rs index 7e549cc212..dbb9714e5c 100644 --- a/ingester/src/persist/queue.rs +++ b/ingester/src/persist/queue.rs @@ -83,10 +83,19 @@ pub(crate) mod mock { } /// A mock [`PersistQueue`] implementation. - #[derive(Debug, Default)] - pub struct MockPersistQueue<O = NopObserver> { + #[derive(Debug)] + pub struct MockPersistQueue<O> { state: Mutex<State>, - completion_observer: Arc<O>, + completion_observer: O, + } + + impl Default for MockPersistQueue<NopObserver> { + fn default() -> Self { + Self { + state: Default::default(), + completion_observer: Default::default(), + } + } } impl<O> MockPersistQueue<O> @@ -95,7 +104,7 @@ pub(crate) mod mock { { /// Creates a queue that notifies the [`PersistCompletionObserver`] /// on persist enqueue completion. - pub fn new_with_observer(completion_observer: Arc<O>) -> Self { + pub fn new_with_observer(completion_observer: O) -> Self { Self { state: Default::default(), completion_observer, @@ -122,7 +131,7 @@ pub(crate) mod mock { #[async_trait] impl<O> PersistQueue for MockPersistQueue<O> where - O: PersistCompletionObserver + 'static, + O: PersistCompletionObserver + Clone + 'static, { #[allow(clippy::async_yields_async)] async fn enqueue( @@ -135,7 +144,7 @@ pub(crate) mod mock { let mut guard = self.state.lock(); guard.calls.push(Arc::clone(&partition)); - let completion_observer = Arc::clone(&self.completion_observer); + let completion_observer = self.completion_observer.clone(); // Spawn a persist task that randomly completes (soon) in the // future. // diff --git a/ingester/src/wal/rotate_task.rs b/ingester/src/wal/rotate_task.rs index 0afbb1fbf2..cd7ecf3685 100644 --- a/ingester/src/wal/rotate_task.rs +++ b/ingester/src/wal/rotate_task.rs @@ -12,7 +12,7 @@ use crate::{ pub(crate) async fn periodic_rotation<T, P>( wal: Arc<wal::Wal>, period: Duration, - wal_reference_handle: Arc<WalReferenceHandle>, + wal_reference_handle: WalReferenceHandle, buffer: T, persist: P, ) where @@ -102,7 +102,7 @@ mod tests { use crate::{ buffer_tree::partition::{persisting::PersistingData, PartitionData}, dml_payload::IngestOp, - persist::{completion_observer::NopObserver, queue::mock::MockPersistQueue}, + persist::queue::mock::MockPersistQueue, test_util::{ make_write_op, new_persist_notification, PartitionDataBuilder, ARBITRARY_NAMESPACE_ID, ARBITRARY_PARTITION_ID, ARBITRARY_PARTITION_KEY, ARBITRARY_TABLE_ID, @@ -147,7 +147,7 @@ mod tests { // Initialise a mock persist queue to inspect the calls made to the // persist subsystem. - let persist_handle = Arc::new(MockPersistQueue::<NopObserver>::default()); + let persist_handle = Arc::new(MockPersistQueue::default()); // Initialise the WAL, write the operation to it let tmp_dir = tempdir().expect("no temp dir available"); @@ -175,14 +175,13 @@ mod tests { let (wal_reference_handle, wal_reference_actor) = WalReferenceHandle::new(Arc::clone(&wal), &metrics); - let wal_reference_handle = Arc::new(wal_reference_handle); tokio::spawn(wal_reference_actor.run()); // Start the rotation task let rotate_task_handle = tokio::spawn(periodic_rotation( Arc::clone(&wal), TICK_INTERVAL, - Arc::clone(&wal_reference_handle), + wal_reference_handle.clone(), vec![Arc::clone(&p)], Arc::clone(&persist_handle), )); @@ -329,14 +328,13 @@ mod tests { let (wal_reference_handle, wal_reference_actor) = WalReferenceHandle::new(Arc::clone(&wal), &metrics); - let wal_reference_handle = Arc::new(wal_reference_handle); tokio::spawn(wal_reference_actor.run()); // Start the rotation task let rotate_task_handle = tokio::spawn(periodic_rotation( Arc::clone(&wal), TICK_INTERVAL, - Arc::clone(&wal_reference_handle), + wal_reference_handle, vec![Arc::clone(&p)], Arc::clone(&persist_handle), )); diff --git a/ingester/src/wal/wal_sink.rs b/ingester/src/wal/wal_sink.rs index a1f7a1b4dd..fe6a143479 100644 --- a/ingester/src/wal/wal_sink.rs +++ b/ingester/src/wal/wal_sink.rs @@ -142,7 +142,7 @@ impl WalAppender for Arc<wal::Wal> { } #[async_trait] -impl UnbufferedWriteNotifier for Arc<WalReferenceHandle> { +impl UnbufferedWriteNotifier for WalReferenceHandle { async fn notify_failed_write_buffer(&self, set: SequenceNumberSet) { self.enqueue_unbuffered_write(set).await; }
e74a7a7dd4ecd853cce9486a0810a026d56a2358
Fraser Savage
2023-06-27 16:34:04
Test correct assignment of write per-partition sequence numbers
This adds extra test coverage for the ingester's WAL replay & RPC write paths, as well as the WAL E2E tests, to ensure that all sequence numbers present in a WriteOperation/WalOperation are encoded and present when decoded.
null
test(wal): Test correct assignment of write per-partition sequence numbers This adds extra test coverage for the ingester's WAL replay & RPC write paths, as well as the WAL E2E tests, to ensure that all sequence numbers present in a WriteOperation/WalOperation are encoded and present when decoded.
diff --git a/ingester/src/init/wal_replay.rs b/ingester/src/init/wal_replay.rs index 4db6fe1341..2eed3cf369 100644 --- a/ingester/src/init/wal_replay.rs +++ b/ingester/src/init/wal_replay.rs @@ -271,6 +271,7 @@ mod tests { use assert_matches::assert_matches; use async_trait::async_trait; + use lazy_static::lazy_static; use metric::{Attributes, Metric}; use parking_lot::Mutex; use wal::Wal; @@ -281,9 +282,9 @@ mod tests { dml_sink::mock_sink::MockDmlSink, persist::queue::mock::MockPersistQueue, test_util::{ - assert_dml_writes_eq, make_write_op, PartitionDataBuilder, ARBITRARY_NAMESPACE_ID, - ARBITRARY_PARTITION_ID, ARBITRARY_PARTITION_KEY, ARBITRARY_TABLE_ID, - ARBITRARY_TABLE_NAME, + assert_write_ops_eq, make_multi_table_write_op, make_write_op, PartitionDataBuilder, + ARBITRARY_NAMESPACE_ID, ARBITRARY_PARTITION_ID, ARBITRARY_PARTITION_KEY, + ARBITRARY_TABLE_ID, ARBITRARY_TABLE_NAME, }, wal::wal_sink::WalSink, }; @@ -311,6 +312,10 @@ mod tests { } } + lazy_static! { + static ref ALTERNATIVE_TABLE_NAME: &'static str = "arán"; + } + #[tokio::test] async fn test_replay() { let dir = tempfile::tempdir().unwrap(); @@ -340,18 +345,30 @@ mod tests { ), None, ); - let op3 = make_write_op( + + // Add a write hitting multiple tables for good measure + let op3 = make_multi_table_write_op( &ARBITRARY_PARTITION_KEY, ARBITRARY_NAMESPACE_ID, - &ARBITRARY_TABLE_NAME, - ARBITRARY_TABLE_ID, - 42, + [ + ( + ARBITRARY_TABLE_NAME.to_string().as_str(), + ARBITRARY_TABLE_ID, + SequenceNumber::new(42), + ), + ( + &ALTERNATIVE_TABLE_NAME, + TableId::new(ARBITRARY_TABLE_ID.get() + 1), + SequenceNumber::new(43), + ), + ] + .into_iter(), // Overwrite op2 &format!( - r#"{},region=Asturias temp=15 4242424242"#, - &*ARBITRARY_TABLE_NAME + r#"{},region=Asturias temp=15 4242424242 + {},region=Mayo temp=12 4242424242"#, + &*ARBITRARY_TABLE_NAME, &*ALTERNATIVE_TABLE_NAME, ), - None, ); // The write portion of this test. @@ -427,7 +444,7 @@ mod tests { .await .expect("failed to replay WAL"); - assert_eq!(max_sequence_number, Some(SequenceNumber::new(42))); + assert_eq!(max_sequence_number, Some(SequenceNumber::new(43))); // Assert the ops were pushed into the DmlSink exactly as generated. let ops = mock_iter.sink.get_calls(); @@ -438,9 +455,9 @@ mod tests { IngestOp::Write(ref w2), IngestOp::Write(ref w3) ] => { - assert_dml_writes_eq(w1.clone(), op1); - assert_dml_writes_eq(w2.clone(), op2); - assert_dml_writes_eq(w3.clone(), op3); + assert_write_ops_eq(w1.clone(), op1); + assert_write_ops_eq(w2.clone(), op2); + assert_write_ops_eq(w3.clone(), op3); } ); diff --git a/ingester/src/server/grpc/rpc_write.rs b/ingester/src/server/grpc/rpc_write.rs index 7d3de046a5..be02dbd92a 100644 --- a/ingester/src/server/grpc/rpc_write.rs +++ b/ingester/src/server/grpc/rpc_write.rs @@ -227,17 +227,22 @@ mod tests { column::{SemanticType, Values}, Column, DatabaseBatch, TableBatch, }; - use std::sync::Arc; + use lazy_static::lazy_static; + use std::{collections::HashSet, sync::Arc}; use super::*; use crate::{ dml_payload::IngestOp, - dml_sink::mock_sink::MockDmlSink, - test_util::{ARBITRARY_NAMESPACE_ID, ARBITRARY_PARTITION_KEY, ARBITRARY_TABLE_ID}, + test_util::{ARBITRARY_NAMESPACE_ID, ARBITRARY_TABLE_ID}, }; + use crate::{dml_sink::mock_sink::MockDmlSink, test_util::ARBITRARY_PARTITION_KEY}; const PERSIST_QUEUE_DEPTH: usize = 42; + lazy_static! { + static ref ALTERNATIVE_TABLE_ID: TableId = TableId::new(76); + } + macro_rules! test_rpc_write { ( $name:ident, @@ -316,6 +321,75 @@ mod tests { } ); + test_rpc_write!( + apply_ok_independently_sequenced_partitions, + request = proto::WriteRequest { + payload: Some(DatabaseBatch { + database_id: ARBITRARY_NAMESPACE_ID.get(), + partition_key: ARBITRARY_PARTITION_KEY.to_string(), + table_batches: vec![ + TableBatch { + table_id: ARBITRARY_TABLE_ID.get(), + columns: vec![Column { + column_name: String::from("time"), + semantic_type: SemanticType::Time.into(), + values: Some(Values { + i64_values: vec![4242], + f64_values: vec![], + u64_values: vec![], + string_values: vec![], + bool_values: vec![], + bytes_values: vec![], + packed_string_values: None, + interned_string_values: None, + }), + null_mask: vec![0], + }], + row_count:1 , + }, + TableBatch { + table_id: ALTERNATIVE_TABLE_ID.get(), + columns: vec![Column { + column_name: String::from("time"), + semantic_type: SemanticType::Time.into(), + values: Some(Values { + i64_values: vec![7676], + f64_values: vec![], + u64_values: vec![], + string_values: vec![], + bool_values: vec![], + bytes_values: vec![], + packed_string_values: None, + interned_string_values: None, + }), + null_mask: vec![0], + }], + row_count: 1, + }, + ], + }), + }, + sink_ret = Ok(()), + want_err = false, + want_calls = [IngestOp::Write(w)] => { + // Assert the properties of the applied IngestOp match the expected + // values. Notably a sequence number should be assigned _per partition_. + assert_eq!(w.namespace(), ARBITRARY_NAMESPACE_ID); + assert_eq!(w.tables().count(), 2); + assert_eq!(*w.partition_key(), *ARBITRARY_PARTITION_KEY); + let sequence_numbers = w.tables().map(|t| t.1.partitioned_data().sequence_number()).collect::<HashSet<_>>(); + assert_eq!( + sequence_numbers, + [ + SequenceNumber::new(1), + SequenceNumber::new(2), + ] + .into_iter() + .collect::<HashSet<_>>(), + ); + } + ); + test_rpc_write!( no_payload, request = proto::WriteRequest { payload: None }, diff --git a/ingester/src/test_util.rs b/ingester/src/test_util.rs index 9a8a75cb1a..ec58d58c8c 100644 --- a/ingester/src/test_util.rs +++ b/ingester/src/test_util.rs @@ -4,6 +4,7 @@ use data_types::{ partition_template::TablePartitionTemplateOverride, NamespaceId, PartitionId, PartitionKey, SequenceNumber, TableId, }; +use hashbrown::HashSet; use iox_catalog::{interface::Catalog, test_helpers::arbitrary_namespace}; use lazy_static::lazy_static; use mutable_batch_lp::lines_to_batches; @@ -312,6 +313,52 @@ pub(crate) fn make_write_op( WriteOperation::new(namespace_id, tables_by_id, partition_key.clone(), span_ctx) } +/// Construct a [`WriteOperation`] with the specified parameters for LP covering +/// multiple separately sequenced table writes. +/// +/// # Panics +/// +/// This method panics if `table_sequence_numbers` contains a different number +/// of tables to the batches derived from `lines` OR if a [`SequenceNumber`] +/// is re-used within the write. +#[track_caller] +pub(crate) fn make_multi_table_write_op< + 'a, + I: ExactSizeIterator<Item = (&'a str, TableId, SequenceNumber)>, +>( + partition_key: &PartitionKey, + namespace_id: NamespaceId, + table_sequence_numbers: I, + lines: &str, +) -> WriteOperation { + let mut tables_by_name = lines_to_batches(lines, 0).expect("invalid LP"); + assert_eq!( + tables_by_name.len(), + table_sequence_numbers.len(), + "number of tables in LP does not match number of table_sequence_numbers" + ); + + let mut seen_sequence_numbers = HashSet::<SequenceNumber>::new(); + + let tables_by_id = table_sequence_numbers + .map(|(table_name, table_id, sequence_number)| { + let mb = tables_by_name + .remove(table_name) + .expect("table name does not exist in LP"); + assert!( + seen_sequence_numbers.insert(sequence_number), + "duplicate sequence number {sequence_number:?} observed" + ); + ( + table_id, + TableData::new(table_id, PartitionedData::new(sequence_number, mb)), + ) + }) + .collect(); + + WriteOperation::new(namespace_id, tables_by_id, partition_key.clone(), None) +} + pub(crate) async fn populate_catalog( catalog: &dyn Catalog, namespace: &str, @@ -332,17 +379,18 @@ pub(crate) async fn populate_catalog( /// Assert `a` and `b` have identical metadata, and that when converting /// them to Arrow batches they produces identical output. #[track_caller] -pub(crate) fn assert_dml_writes_eq(a: WriteOperation, b: WriteOperation) { +pub(crate) fn assert_write_ops_eq(a: WriteOperation, b: WriteOperation) { assert_eq!(a.namespace(), b.namespace(), "namespace"); assert_eq!(a.tables().count(), b.tables().count(), "table count"); assert_eq!(a.partition_key(), b.partition_key(), "partition key"); // Assert sequence numbers were reassigned for (a_table, b_table) in a.tables().zip(b.tables()) { + assert_eq!(a_table.0, b_table.0, "table id mismatch"); assert_eq!( a_table.1.partitioned_data().sequence_number(), b_table.1.partitioned_data().sequence_number(), - "sequence number" + "sequence number mismatch" ); } diff --git a/wal/src/lib.rs b/wal/src/lib.rs index 1c6fb8bbd1..ae0bfe8848 100644 --- a/wal/src/lib.rs +++ b/wal/src/lib.rs @@ -696,22 +696,25 @@ mod tests { let wal = Wal::new(&dir.path()).await.unwrap(); let w1 = test_data("m1,t=foo v=1i 1"); - let w2 = test_data("m1,t=foo v=2i 2"); + // Use multiple tables for a write to test per-partition sequencing is preserved + let w2 = test_data("m1,t=foo v=2i 2\nm2,u=bar v=1i 1"); let op1 = SequencedWalOp { table_write_sequence_numbers: vec![(TableId::new(0), 0)].into_iter().collect(), op: WalOp::Write(w1), }; let op2 = SequencedWalOp { - table_write_sequence_numbers: vec![(TableId::new(0), 1)].into_iter().collect(), + table_write_sequence_numbers: vec![(TableId::new(0), 1), (TableId::new(1), 2)] + .into_iter() + .collect(), op: WalOp::Write(w2), }; let op3 = SequencedWalOp { - table_write_sequence_numbers: vec![(TableId::new(0), 2)].into_iter().collect(), + table_write_sequence_numbers: vec![(TableId::new(0), 3)].into_iter().collect(), op: WalOp::Delete(test_delete()), }; let op4 = SequencedWalOp { - table_write_sequence_numbers: vec![(TableId::new(0), 2)].into_iter().collect(), + table_write_sequence_numbers: vec![(TableId::new(0), 3)].into_iter().collect(), op: WalOp::Persist(test_persist()), }; @@ -732,7 +735,7 @@ mod tests { // Assert the set has recorded the op IDs. // // Note that one op has a duplicate sequence number above! - assert_eq!(ids.len(), 3); + assert_eq!(ids.len(), 4); // Assert the sequence number set contains the specified ops. let ids = ids.iter().collect::<Vec<_>>(); @@ -742,6 +745,7 @@ mod tests { SequenceNumber::new(0), SequenceNumber::new(1), SequenceNumber::new(2), + SequenceNumber::new(3), ] ); @@ -752,9 +756,11 @@ mod tests { .collect::<Vec<std::collections::HashMap<TableId, u64>>>(), [ [(TableId::new(0), 0)].into_iter().collect(), - [(TableId::new(0), 1)].into_iter().collect(), - [(TableId::new(0), 2)].into_iter().collect(), - [(TableId::new(0), 2)].into_iter().collect(), + [(TableId::new(0), 1), (TableId::new(1), 2)] + .into_iter() + .collect(), + [(TableId::new(0), 3)].into_iter().collect(), + [(TableId::new(0), 3)].into_iter().collect(), ] .into_iter() .collect::<Vec<std::collections::HashMap<TableId, u64>>>(), @@ -807,7 +813,7 @@ mod tests { let wal = Wal::new(dir.path()).await.unwrap(); let w1 = test_data("m1,t=foo v=1i 1"); - let w2 = test_data("m2,u=foo w=2i 2"); + let w2 = test_data("m1,t=foo v=2i 2\nm2,u=foo w=2i 2"); let w3 = test_data("m1,t=foo v=3i 3"); let op1 = SequencedWalOp { @@ -815,20 +821,22 @@ mod tests { op: WalOp::Write(w1.to_owned()), }; let op2 = SequencedWalOp { - table_write_sequence_numbers: vec![(TableId::new(0), 1)].into_iter().collect(), + table_write_sequence_numbers: vec![(TableId::new(0), 1), (TableId::new(1), 2)] + .into_iter() + .collect(), op: WalOp::Write(w2.to_owned()), }; let op3 = SequencedWalOp { - table_write_sequence_numbers: vec![(TableId::new(0), 2)].into_iter().collect(), + table_write_sequence_numbers: vec![(TableId::new(0), 3)].into_iter().collect(), op: WalOp::Delete(test_delete()), }; let op4 = SequencedWalOp { - table_write_sequence_numbers: vec![(TableId::new(0), 2)].into_iter().collect(), + table_write_sequence_numbers: vec![(TableId::new(0), 3)].into_iter().collect(), op: WalOp::Persist(test_persist()), }; // A third write entry coming after a delete and persist entry must still be yielded let op5 = SequencedWalOp { - table_write_sequence_numbers: vec![(TableId::new(0), 3)].into_iter().collect(), + table_write_sequence_numbers: vec![(TableId::new(0), 4)].into_iter().collect(), op: WalOp::Write(w3.to_owned()), }; diff --git a/wal/tests/end_to_end.rs b/wal/tests/end_to_end.rs index b2580ab910..331c9b49bf 100644 --- a/wal/tests/end_to_end.rs +++ b/wal/tests/end_to_end.rs @@ -22,16 +22,16 @@ async fn crud() { ); // Can write an entry to the open segment - let op = arbitrary_sequenced_wal_op(42); + let op = arbitrary_sequenced_wal_op([42, 43]); let summary = unwrap_summary(wal.write_op(op)).await; - assert_eq!(summary.total_bytes, 126); - assert_eq!(summary.bytes_written, 110); + assert_eq!(summary.total_bytes, 140); + assert_eq!(summary.bytes_written, 124); // Can write another entry; total_bytes accumulates - let op = arbitrary_sequenced_wal_op(43); + let op = arbitrary_sequenced_wal_op([44, 45]); let summary = unwrap_summary(wal.write_op(op)).await; - assert_eq!(summary.total_bytes, 236); - assert_eq!(summary.bytes_written, 110); + assert_eq!(summary.total_bytes, 264); + assert_eq!(summary.bytes_written, 124); // Still no closed segments let closed = wal.closed_segments(); @@ -42,10 +42,15 @@ async fn crud() { // Can't read entries from the open segment; have to rotate first let (closed_segment_details, ids) = wal.rotate().unwrap(); - assert_eq!(closed_segment_details.size(), 236); + assert_eq!(closed_segment_details.size(), 264); assert_eq!( ids.iter().collect::<Vec<_>>(), - [SequenceNumber::new(42), SequenceNumber::new(43)] + [ + SequenceNumber::new(42), + SequenceNumber::new(43), + SequenceNumber::new(44), + SequenceNumber::new(45) + ] ); // There's one closed segment @@ -53,20 +58,25 @@ async fn crud() { let closed_segment_ids: Vec<_> = closed.iter().map(|c| c.id()).collect(); assert_eq!(closed_segment_ids, &[closed_segment_details.id()]); - // Can read the written entries from the closed segment, - // ensuring the per-partition sequence numbers match up to the current - // op-level sequence number while it is the source of truth. + // Can read the written entries from the closed segment, ensuring that the + // per-partition sequence numbers are preserved. let mut reader = wal.reader_for_segment(closed_segment_details.id()).unwrap(); - let op = reader.next().unwrap().unwrap(); - op[0] + let mut op = reader.next().unwrap().unwrap(); + let mut got_sequence_numbers = op + .remove(0) .table_write_sequence_numbers - .values() - .for_each(|sequence_number| assert_eq!(*sequence_number, 42)); - let op = reader.next().unwrap().unwrap(); - op[0] + .into_values() + .collect::<Vec<_>>(); + got_sequence_numbers.sort(); + assert_eq!(got_sequence_numbers, Vec::<u64>::from([42, 43]),); + let mut op = reader.next().unwrap().unwrap(); + let mut got_sequence_numbers = op + .remove(0) .table_write_sequence_numbers - .values() - .for_each(|sequence_number| assert_eq!(*sequence_number, 43)); + .into_values() + .collect::<Vec<_>>(); + got_sequence_numbers.sort(); + assert_eq!(got_sequence_numbers, Vec::<u64>::from([44, 45]),); // Can delete a segment, leaving no closed segments again wal.delete(closed_segment_details.id()).await.unwrap(); @@ -85,10 +95,10 @@ async fn replay() { // WAL. { let wal = wal::Wal::new(dir.path()).await.unwrap(); - let op = arbitrary_sequenced_wal_op(42); + let op = arbitrary_sequenced_wal_op([42]); let _ = unwrap_summary(wal.write_op(op)).await; wal.rotate().unwrap(); - let op = arbitrary_sequenced_wal_op(43); + let op = arbitrary_sequenced_wal_op([43, 44]); let _ = unwrap_summary(wal.write_op(op)).await; } @@ -102,22 +112,27 @@ async fn replay() { assert_eq!(closed_segment_ids.len(), 2); // Can read the written entries from the previously closed segment - // ensuring the per-partition sequence numbers match up to the current - // op-level sequence number while it is the source of truth. + // ensuring the per-partition sequence numbers are preserved. let mut reader = wal.reader_for_segment(closed_segment_ids[0]).unwrap(); - let op = reader.next().unwrap().unwrap(); - op[0] + let mut op = reader.next().unwrap().unwrap(); + let mut got_sequence_numbers = op + .remove(0) .table_write_sequence_numbers - .values() - .for_each(|sequence_number| assert_eq!(*sequence_number, 42)); + .into_values() + .collect::<Vec<_>>(); + got_sequence_numbers.sort(); + assert_eq!(got_sequence_numbers, Vec::<u64>::from([42])); // Can read the written entries from the previously open segment let mut reader = wal.reader_for_segment(closed_segment_ids[1]).unwrap(); - let op = reader.next().unwrap().unwrap(); - op[0] + let mut op = reader.next().unwrap().unwrap(); + let mut got_sequence_numbers = op + .remove(0) .table_write_sequence_numbers - .values() - .for_each(|sequence_number| assert_eq!(*sequence_number, 43)); + .into_values() + .collect::<Vec<_>>(); + got_sequence_numbers.sort(); + assert_eq!(got_sequence_numbers, Vec::<u64>::from([43, 44])); } #[tokio::test] @@ -128,17 +143,20 @@ async fn ordering() { { let wal = wal::Wal::new(dir.path()).await.unwrap(); - let op = arbitrary_sequenced_wal_op(42); + let op = arbitrary_sequenced_wal_op([42, 43]); let _ = unwrap_summary(wal.write_op(op)).await; let (_, ids) = wal.rotate().unwrap(); - assert_eq!(ids.iter().collect::<Vec<_>>(), [SequenceNumber::new(42)]); + assert_eq!( + ids.iter().collect::<Vec<_>>(), + [SequenceNumber::new(42), SequenceNumber::new(43)] + ); - let op = arbitrary_sequenced_wal_op(43); + let op = arbitrary_sequenced_wal_op([44]); let _ = unwrap_summary(wal.write_op(op)).await; let (_, ids) = wal.rotate().unwrap(); - assert_eq!(ids.iter().collect::<Vec<_>>(), [SequenceNumber::new(43)]); + assert_eq!(ids.iter().collect::<Vec<_>>(), [SequenceNumber::new(44)]); - let op = arbitrary_sequenced_wal_op(44); + let op = arbitrary_sequenced_wal_op([45]); let _ = unwrap_summary(wal.write_op(op)).await; } @@ -162,15 +180,21 @@ async fn ordering() { assert!(ids.is_empty()); } -// TODO(savage): This needs changing to generate multiple partitioned sequence numbers for each -// write. -fn arbitrary_sequenced_wal_op(sequence_number: u64) -> SequencedWalOp { - let w = test_data("m1,t=foo v=1i 1"); +fn arbitrary_sequenced_wal_op<I: IntoIterator<Item = u64>>(sequence_numbers: I) -> SequencedWalOp { + let sequence_numbers = sequence_numbers.into_iter().collect::<Vec<_>>(); + let lp = sequence_numbers + .iter() + .enumerate() + .fold(String::new(), |string, (idx, _)| { + string + &format!("m{},t=foo v=1i 1\n", idx) + }); + let w = test_data(lp.as_str()); SequencedWalOp { table_write_sequence_numbers: w .table_batches .iter() - .map(|table_batch| (TableId::new(table_batch.table_id), sequence_number)) + .zip(sequence_numbers.iter()) + .map(|(table_batch, &id)| (TableId::new(table_batch.table_id), id)) .collect(), op: WalOp::Write(w), }
4b24c988ad63b55b24638a6edd94aecabd0c63d6
Martin Hilton
2023-04-26 14:52:49
JDBC compatible Handshake (#7660)
* refactor(authz): move extract_header_token into authz Move the extract_header_token method into the authz package so that it can be shared by the query path. The method is renamed to reflect the fact that it can now also extract a token from gRPC metadata. The extract_token function is now a little more generic to allow it to be used with HTTP header values and gRPC metadata values. * feat(service_grpc_flight): JDBC compatible Handshake While testing some JDBC based clients we found that some, Tableau in this case, cannot be configured with authoriztion tokens. In these cases we need to be able to support username/password. The approach taken is to ignore the username and make the token the password. This is the same approach being taken throughout the product. To facilitate this the Flight RPC Handshake command has been extended to look for Basic authorization credentials and respond with the appropriate Bearer authorization header. While adding end-to-end tests the subprocess commands were causing a deadlock. These have been changed to using the tonic::process module. There are also some small changes to the JDBC test application where the hardcoded values were clashing with the authorization parameters. * fix: lint * chore: apply suggestions from code review Co-authored-by: Andrew Lamb <[email protected]> * chore: review suggestion ---------
Co-authored-by: Andrew Lamb <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
feat(service_grpc_flight): JDBC compatible Handshake (#7660) * refactor(authz): move extract_header_token into authz Move the extract_header_token method into the authz package so that it can be shared by the query path. The method is renamed to reflect the fact that it can now also extract a token from gRPC metadata. The extract_token function is now a little more generic to allow it to be used with HTTP header values and gRPC metadata values. * feat(service_grpc_flight): JDBC compatible Handshake While testing some JDBC based clients we found that some, Tableau in this case, cannot be configured with authoriztion tokens. In these cases we need to be able to support username/password. The approach taken is to ignore the username and make the token the password. This is the same approach being taken throughout the product. To facilitate this the Flight RPC Handshake command has been extended to look for Basic authorization credentials and respond with the appropriate Bearer authorization header. While adding end-to-end tests the subprocess commands were causing a deadlock. These have been changed to using the tonic::process module. There are also some small changes to the JDBC test application where the hardcoded values were clashing with the authorization parameters. * fix: lint * chore: apply suggestions from code review Co-authored-by: Andrew Lamb <[email protected]> * chore: review suggestion --------- Co-authored-by: Andrew Lamb <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/Cargo.lock b/Cargo.lock index caa0a7b25b..6e826b9c6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -515,6 +515,7 @@ name = "authz" version = "0.1.0" dependencies = [ "async-trait", + "base64 0.21.0", "generated_types", "http", "observability_deps", diff --git a/authz/Cargo.toml b/authz/Cargo.toml index 8e91c1d831..86de4fc65b 100644 --- a/authz/Cargo.toml +++ b/authz/Cargo.toml @@ -16,6 +16,7 @@ workspace-hack = { version = "0.1", path = "../workspace-hack" } # crates.io dependencies in alphabetical order. async-trait = "0.1" +base64 = "0.21.0" snafu = "0.7" tonic = { workspace = true } diff --git a/authz/src/lib.rs b/authz/src/lib.rs index 35450b8bd9..6dcfdaa157 100644 --- a/authz/src/lib.rs +++ b/authz/src/lib.rs @@ -17,6 +17,7 @@ #![allow(rustdoc::private_intra_doc_links)] use async_trait::async_trait; +use base64::{prelude::BASE64_STANDARD, Engine}; use generated_types::influxdata::iox::authz::v1 as proto; use observability_deps::tracing::warn; use snafu::Snafu; @@ -27,6 +28,26 @@ pub use permission::{Action, Permission, Resource}; #[cfg(feature = "http")] pub mod http; +/// Extract a token from an HTTP header or gRPC metadata value. +pub fn extract_token<T: AsRef<[u8]> + ?Sized>(value: Option<&T>) -> Option<Vec<u8>> { + let mut parts = value?.as_ref().splitn(2, |&v| v == b' '); + let token = match parts.next()? { + b"Token" | b"Bearer" => parts.next()?.to_vec(), + b"Basic" => parts + .next() + .and_then(|v| BASE64_STANDARD.decode(v).ok())? + .splitn(2, |&v| v == b':') + .nth(1)? + .to_vec(), + _ => return None, + }; + if token.is_empty() { + None + } else { + Some(token) + } +} + /// An authorizer is used to validate the associated with /// an authorization token that has been extracted from a request. #[async_trait] @@ -41,14 +62,14 @@ pub trait Authorizer: std::fmt::Debug + Send + Sync { /// empty permission sets. async fn permissions( &self, - token: Option<&[u8]>, + token: Option<Vec<u8>>, perms: &[Permission], ) -> Result<Vec<Permission>, Error>; /// Make a test request that determines if end-to-end communication /// with the service is working. async fn probe(&self) -> Result<(), Error> { - self.permissions(Some(b""), &[]).await?; + self.permissions(Some(b"".to_vec()), &[]).await?; Ok(()) } @@ -58,7 +79,7 @@ pub trait Authorizer: std::fmt::Debug + Send + Sync { /// error is returned. async fn require_any_permission( &self, - token: Option<&[u8]>, + token: Option<Vec<u8>>, perms: &[Permission], ) -> Result<(), Error> { if self.permissions(token, perms).await?.is_empty() { @@ -73,7 +94,7 @@ pub trait Authorizer: std::fmt::Debug + Send + Sync { impl<T: Authorizer> Authorizer for Option<T> { async fn permissions( &self, - token: Option<&[u8]>, + token: Option<Vec<u8>>, perms: &[Permission], ) -> Result<Vec<Permission>, Error> { match self { @@ -87,7 +108,7 @@ impl<T: Authorizer> Authorizer for Option<T> { impl<T: AsRef<dyn Authorizer> + std::fmt::Debug + Send + Sync> Authorizer for T { async fn permissions( &self, - token: Option<&[u8]>, + token: Option<Vec<u8>>, perms: &[Permission], ) -> Result<Vec<Permission>, Error> { self.as_ref().permissions(token, perms).await @@ -120,11 +141,11 @@ impl IoxAuthorizer { impl Authorizer for IoxAuthorizer { async fn permissions( &self, - token: Option<&[u8]>, + token: Option<Vec<u8>>, perms: &[Permission], ) -> Result<Vec<Permission>, Error> { let req = proto::AuthorizeRequest { - token: token.ok_or(Error::NoToken)?.to_vec(), + token: token.ok_or(Error::NoToken)?, permissions: perms .iter() .filter_map(|p| p.clone().try_into().ok()) @@ -200,4 +221,31 @@ mod tests { format!("{e}") ) } + + #[test] + fn test_extract_token() { + assert_eq!(None, extract_token::<&str>(None)); + assert_eq!(None, extract_token(Some(""))); + assert_eq!(None, extract_token(Some("Basic"))); + assert_eq!(None, extract_token(Some("Basic Og=="))); // ":" + assert_eq!(None, extract_token(Some("Basic dXNlcm5hbWU6"))); // "username:" + assert_eq!(None, extract_token(Some("Basic Og=="))); // ":" + assert_eq!( + Some(b"password".to_vec()), + extract_token(Some("Basic OnBhc3N3b3Jk")) + ); // ":password" + assert_eq!( + Some(b"password2".to_vec()), + extract_token(Some("Basic dXNlcm5hbWU6cGFzc3dvcmQy")) + ); // "username:password2" + assert_eq!(None, extract_token(Some("Bearer"))); + assert_eq!(None, extract_token(Some("Bearer "))); + assert_eq!(Some(b"token".to_vec()), extract_token(Some("Bearer token"))); + assert_eq!(None, extract_token(Some("Token"))); + assert_eq!(None, extract_token(Some("Token "))); + assert_eq!( + Some(b"token2".to_vec()), + extract_token(Some("Token token2")) + ); + } } diff --git a/influxdb_iox/tests/end_to_end_cases/flightsql.rs b/influxdb_iox/tests/end_to_end_cases/flightsql.rs index 99f2715d49..79394a22a7 100644 --- a/influxdb_iox/tests/end_to_end_cases/flightsql.rs +++ b/influxdb_iox/tests/end_to_end_cases/flightsql.rs @@ -15,7 +15,7 @@ use arrow_flight::{ FlightClient, FlightDescriptor, IpcMessage, }; use arrow_util::test_util::batches_to_sorted_lines; -use assert_cmd::Command; +use assert_cmd::assert::OutputAssertExt; use assert_matches::assert_matches; use bytes::Bytes; use datafusion::common::assert_contains; @@ -26,6 +26,7 @@ use prost::Message; use test_helpers_end_to_end::{ maybe_skip_integration, Authorizer, MiniCluster, Step, StepTest, StepTestState, }; +use tokio::process::Command; #[tokio::test] async fn flightsql_adhoc_query() { @@ -1220,113 +1221,259 @@ async fn flightsql_jdbc() { let jdbc_url = format!("{jdbc_addr}?useEncryption=false&iox-namespace-name={namespace}"); println!("jdbc_url {jdbc_url}"); + jdbc_tests(&jdbc_url, table_name).await; + } + .boxed() + })), + ], + ) + .run() + .await +} - // find the jdbc_client to run - let path = PathBuf::from(std::env::var("PWD").expect("can not get PWD")) - .join("influxdb_iox/tests/jdbc_client/jdbc_client"); - println!("Path to jdbc client: {path:?}"); - - // Validate basic query: jdbc_client <url> query 'sql' - Command::from_std(std::process::Command::new(&path)) - .arg(&jdbc_url) - .arg("query") - .arg(format!("select * from {table_name} order by time")) - .arg(&querier_addr) - .assert() - .success() - .stdout(predicate::str::contains("Running SQL Query")) - .stdout(predicate::str::contains( - "A, B, 1970-01-01 00:00:00.000123456, 42", - )) - .stdout(predicate::str::contains( - "A, C, 1970-01-01 00:00:00.000123457, 43", - )); +#[tokio::test] +/// Runs the `jdbc_client` program against IOx to verify authenticated JDBC via FlightSQL is working +/// +/// Example command: +/// +/// ```shell +/// TEST_INFLUXDB_JDBC=true TEST_INFLUXDB_IOX_CATALOG_DSN=postgresql://postgres@localhost:5432/postgres cargo test --test end_to_end jdbc +/// ``` +async fn flightsql_jdbc_authz_token() { + test_helpers::maybe_start_logging(); + let database_url = maybe_skip_integration!(); - // Validate prepared query: jdbc_client <url> prepared_query 'sql' - Command::from_std(std::process::Command::new(&path)) - .arg(&jdbc_url) - .arg("prepared_query") - .arg(format!("select tag1, tag2 from {table_name} order by time")) - .arg(&querier_addr) - .assert() - .success() - .stdout(predicate::str::contains("Running Prepared SQL Query")) - .stdout(predicate::str::contains("A, B")); - - // CommandGetCatalogs output - let expected_catalogs = "**************\n\ - Catalogs:\n\ - **************\n\ - TABLE_CAT\n\ - ------------\n\ - public"; - - // CommandGetSchemas output - let expected_schemas = "**************\n\ - Schemas:\n\ - **************\n\ - TABLE_SCHEM, TABLE_CATALOG\n\ - ------------\n\ - information_schema, public\n\ - iox, public\n\ - system, public"; - - // CommandGetTables output - let expected_tables_no_filter = "**************\n\ - Tables:\n\ - **************\n\ - TABLE_CAT, TABLE_SCHEM, TABLE_NAME, TABLE_TYPE, REMARKS, TYPE_CAT, TYPE_SCHEM, TYPE_NAME, SELF_REFERENCING_COL_NAME, REF_GENERATION\n\ - ------------\n\ - public, information_schema, columns, VIEW, null, null, null, null, null, null\n\ - public, information_schema, df_settings, VIEW, null, null, null, null, null, null\n\ - public, information_schema, tables, VIEW, null, null, null, null, null, null\n\ - public, information_schema, views, VIEW, null, null, null, null, null, null\n\ - public, iox, the_table, BASE TABLE, null, null, null, null, null, null\n\ - public, system, queries, BASE TABLE, null, null, null, null, null, null"; - - // CommandGetTables output - let expected_tables_with_filters = "**************\n\ - Tables (system table filter):\n\ - **************\n\ - TABLE_CAT, TABLE_SCHEM, TABLE_NAME, TABLE_TYPE, REMARKS, TYPE_CAT, TYPE_SCHEM, TYPE_NAME, SELF_REFERENCING_COL_NAME, REF_GENERATION\n\ - ------------\n\ - public, system, queries, BASE TABLE, null, null, null, null, null, null"; - - // CommandGetTableTypes output - let expected_table_types = "**************\n\ - Table Types:\n\ - **************\n\ - TABLE_TYPE\n\ - ------------\n\ - BASE TABLE\n\ - VIEW"; - - // Validate metadata: jdbc_client <url> metadata - let mut assert = Command::from_std(std::process::Command::new(&path)) - .arg(&jdbc_url) - .arg("metadata") - .assert() - .success() - .stdout(predicate::str::contains(expected_catalogs)) - .stdout(predicate::str::contains(expected_schemas)) - .stdout(predicate::str::contains(expected_tables_no_filter)) - .stdout(predicate::str::contains(expected_tables_with_filters)) - .stdout(predicate::str::contains(expected_table_types)); - - let expected_metadata = EXPECTED_METADATA - .trim() - .replace("REPLACE_ME_WITH_JBDC_URL", &jdbc_url); - - for expected in expected_metadata.lines() { - assert = assert.stdout(predicate::str::contains(expected)); - } + if std::env::var("TEST_INFLUXDB_JDBC").ok().is_none() { + println!("Skipping JDBC test because TEST_INFLUXDB_JDBC is not set"); + return; + } + + let table_name = "the_table"; + + // Set up the authorizer ================================= + let mut authz = Authorizer::create().await; + + // Set up the cluster ==================================== + let mut cluster = MiniCluster::create_non_shared2_with_authz(database_url, authz.addr()).await; + + let write_token = authz.create_token_for(cluster.namespace(), &["ACTION_WRITE"]); + let read_token = + authz.create_token_for(cluster.namespace(), &["ACTION_READ", "ACTION_READ_SCHEMA"]); + + StepTest::new( + &mut cluster, + vec![ + Step::WriteLineProtocolWithAuthorization { + line_protocol: format!( + "{table_name},tag1=A,tag2=B val=42i 123456\n\ + {table_name},tag1=A,tag2=C val=43i 123457" + ), + authorization: format!("Token {}", write_token.clone()), + }, + Step::Custom(Box::new(move |state: &mut StepTestState| { + let token = read_token.clone(); + // satisfy the borrow checker + async move { + let namespace = state.cluster().namespace(); + + // querier_addr looks like: http://127.0.0.1:8092 + let querier_addr = state.cluster().querier().querier_grpc_base().to_string(); + println!("Querier {querier_addr}, namespace {namespace}"); + + // JDBC URL looks like this: + // jdbc:arrow-flight-sql://localhost:8082?useEncryption=false&iox-namespace-name=26f7e5a4b7be365b_917b97a92e883afc + let jdbc_addr = querier_addr.replace("http://", "jdbc:arrow-flight-sql://"); + let jdbc_url = + format!("{jdbc_addr}?useEncryption=false&iox-namespace-name={namespace}&token={token}"); + println!("jdbc_url {jdbc_url}"); + jdbc_tests(&jdbc_url, table_name).await; } .boxed() })), ], ) .run() - .await + .await; + + authz.close().await; +} + +#[tokio::test] +/// Runs the `jdbc_client` program against IOx to verify authenticated JDBC via FlightSQL is working +/// +/// In this mode the username is empty and password is the authorization token +/// +/// Example command: +/// +/// ```shell +/// TEST_INFLUXDB_JDBC=true TEST_INFLUXDB_IOX_CATALOG_DSN=postgresql://postgres@localhost:5432/postgres cargo test --test end_to_end jdbc +/// ``` +async fn flightsql_jdbc_authz_handshake() { + test_helpers::maybe_start_logging(); + let database_url = maybe_skip_integration!(); + + if std::env::var("TEST_INFLUXDB_JDBC").ok().is_none() { + println!("Skipping JDBC test because TEST_INFLUXDB_JDBC is not set"); + return; + } + + let table_name = "the_table"; + + // Set up the authorizer ================================= + let mut authz = Authorizer::create().await; + + // Set up the cluster ==================================== + let mut cluster = MiniCluster::create_non_shared2_with_authz(database_url, authz.addr()).await; + + let write_token = authz.create_token_for(cluster.namespace(), &["ACTION_WRITE"]); + let read_token = + authz.create_token_for(cluster.namespace(), &["ACTION_READ", "ACTION_READ_SCHEMA"]); + + StepTest::new( + &mut cluster, + vec![ + Step::WriteLineProtocolWithAuthorization { + line_protocol: format!( + "{table_name},tag1=A,tag2=B val=42i 123456\n\ + {table_name},tag1=A,tag2=C val=43i 123457" + ), + authorization: format!("Token {}", write_token.clone()), + }, + Step::Custom(Box::new(move |state: &mut StepTestState| { + let token = read_token.clone(); + // satisfy the borrow checker + async move { + let namespace = state.cluster().namespace(); + + // querier_addr looks like: http://127.0.0.1:8092 + let querier_addr = state.cluster().querier().querier_grpc_base().to_string(); + println!("Querier {querier_addr}, namespace {namespace}"); + + // JDBC URL looks like this: + // jdbc:arrow-flight-sql://localhost:8082?useEncryption=false&iox-namespace-name=26f7e5a4b7be365b_917b97a92e883afc + let jdbc_addr = querier_addr.replace("http://", "jdbc:arrow-flight-sql://"); + let jdbc_url = + format!("{jdbc_addr}?useEncryption=false&iox-namespace-name={namespace}&user=&password={token}"); + println!("jdbc_url {jdbc_url}"); + jdbc_tests(&jdbc_url, table_name).await; + } + .boxed() + })), + ], + ) + .run() + .await; + + authz.close().await; +} + +async fn jdbc_tests(jdbc_url: &str, table_name: &str) { + // find the jdbc_client to run + let path = PathBuf::from(std::env::var("PWD").expect("can not get PWD")) + .join("influxdb_iox/tests/jdbc_client/jdbc_client"); + println!("Path to jdbc client: {path:?}"); + + // Validate basic query: jdbc_client <url> query 'sql' + Command::new(&path) + .arg(jdbc_url) + .arg("query") + .arg(format!("select * from {table_name} order by time")) + .output() + .await + .unwrap() + .assert() + .success() + .stdout(predicate::str::contains("Running SQL Query")) + .stdout(predicate::str::contains( + "A, B, 1970-01-01 00:00:00.000123456, 42", + )) + .stdout(predicate::str::contains( + "A, C, 1970-01-01 00:00:00.000123457, 43", + )); + + // Validate prepared query: jdbc_client <url> prepared_query 'sql' + Command::new(&path) + .arg(jdbc_url) + .arg("prepared_query") + .arg(format!("select tag1, tag2 from {table_name} order by time")) + .output() + .await + .unwrap() + .assert() + .success() + .stdout(predicate::str::contains("Running Prepared SQL Query")) + .stdout(predicate::str::contains("A, B")); + + // CommandGetCatalogs output + let expected_catalogs = "**************\n\ + Catalogs:\n\ + **************\n\ + TABLE_CAT\n\ + ------------\n\ + public"; + + // CommandGetSchemas output + let expected_schemas = "**************\n\ + Schemas:\n\ + **************\n\ + TABLE_SCHEM, TABLE_CATALOG\n\ + ------------\n\ + information_schema, public\n\ + iox, public\n\ + system, public"; + + // CommandGetTables output + let expected_tables_no_filter = "**************\n\ + Tables:\n\ + **************\n\ + TABLE_CAT, TABLE_SCHEM, TABLE_NAME, TABLE_TYPE, REMARKS, TYPE_CAT, TYPE_SCHEM, TYPE_NAME, SELF_REFERENCING_COL_NAME, REF_GENERATION\n\ + ------------\n\ + public, information_schema, columns, VIEW, null, null, null, null, null, null\n\ + public, information_schema, df_settings, VIEW, null, null, null, null, null, null\n\ + public, information_schema, tables, VIEW, null, null, null, null, null, null\n\ + public, information_schema, views, VIEW, null, null, null, null, null, null\n\ + public, iox, the_table, BASE TABLE, null, null, null, null, null, null\n\ + public, system, queries, BASE TABLE, null, null, null, null, null, null"; + + // CommandGetTables output + let expected_tables_with_filters = "**************\n\ + Tables (system table filter):\n\ + **************\n\ + TABLE_CAT, TABLE_SCHEM, TABLE_NAME, TABLE_TYPE, REMARKS, TYPE_CAT, TYPE_SCHEM, TYPE_NAME, SELF_REFERENCING_COL_NAME, REF_GENERATION\n\ + ------------\n\ + public, system, queries, BASE TABLE, null, null, null, null, null, null"; + + // CommandGetTableTypes output + let expected_table_types = "**************\n\ + Table Types:\n\ + **************\n\ + TABLE_TYPE\n\ + ------------\n\ + BASE TABLE\n\ + VIEW"; + + // Validate metadata: jdbc_client <url> metadata + let mut assert = Command::new(&path) + .arg(jdbc_url) + .arg("metadata") + .output() + .await + .unwrap() + .assert() + .success() + .stdout(predicate::str::contains(expected_catalogs)) + .stdout(predicate::str::contains(expected_schemas)) + .stdout(predicate::str::contains(expected_tables_no_filter)) + .stdout(predicate::str::contains(expected_tables_with_filters)) + .stdout(predicate::str::contains(expected_table_types)); + + let expected_metadata = EXPECTED_METADATA + .trim() + .replace("REPLACE_ME_WITH_JBDC_URL", jdbc_url); + + for expected in expected_metadata.lines() { + assert = assert.stdout(predicate::str::contains(expected)); + } } /// Ensures that the schema returned as part of GetFlightInfo matches @@ -1768,7 +1915,6 @@ getStringFunctions: arrow_typeof, ascii, bit_length, btrim, char_length, charact getSystemFunctions: array, arrow_typeof, struct getTimeDateFunctions: current_date, current_time, date_bin, date_part, date_trunc, datepart, datetrunc, from_unixtime, now, to_timestamp, to_timestamp_micros, to_timestamp_millis, to_timestamp_seconds getURL: REPLACE_ME_WITH_JBDC_URL -getUserName: test isCatalogAtStart: false isReadOnly: true locatorsUpdateCopy: false diff --git a/influxdb_iox/tests/jdbc_client/Main.java b/influxdb_iox/tests/jdbc_client/Main.java index b147264ab1..bcc0051c09 100644 --- a/influxdb_iox/tests/jdbc_client/Main.java +++ b/influxdb_iox/tests/jdbc_client/Main.java @@ -90,8 +90,6 @@ public class Main { System.out.println("----- Connecting -------"); System.out.println("URL: " + url); Properties props = new Properties(); - props.put("user", "test"); - props.put("password", "**token**"); Connection conn = DriverManager.getConnection(url, props); return conn; } @@ -240,7 +238,6 @@ public class Main { System.out.println("getSystemFunctions: " + md.getSystemFunctions()); System.out.println("getTimeDateFunctions: " + md.getTimeDateFunctions()); System.out.println("getURL: " + md.getURL()); - System.out.println("getUserName: " + md.getUserName()); System.out.println("isCatalogAtStart: " + md.isCatalogAtStart()); System.out.println("isReadOnly: " + md.isReadOnly()); System.out.println("locatorsUpdateCopy: " + md.locatorsUpdateCopy()); diff --git a/router/src/server/http/write/single_tenant/auth.rs b/router/src/server/http/write/single_tenant/auth.rs index 4ab9511576..3970a42f11 100644 --- a/router/src/server/http/write/single_tenant/auth.rs +++ b/router/src/server/http/write/single_tenant/auth.rs @@ -3,29 +3,11 @@ use std::sync::Arc; use authz::{ - self, http::AuthorizationHeaderExtension, Action, Authorizer, Error, Permission, Resource, + self, extract_token, http::AuthorizationHeaderExtension, Action, Authorizer, Error, Permission, + Resource, }; -use base64::{prelude::BASE64_STANDARD, Engine}; use data_types::NamespaceName; -use hyper::{header::HeaderValue, Body, Request}; - -fn extract_header_token(header_value: &'_ HeaderValue) -> Option<Vec<u8>> { - let mut parts = header_value.as_bytes().splitn(2, |&v| v == b' '); - let token = match parts.next()? { - b"Token" | b"Bearer" => parts.next()?.to_vec(), - b"Basic" => parts - .next() - .and_then(|v| BASE64_STANDARD.decode(v).ok())? - .splitn(2, |&v| v == b':') - .nth(1)? - .to_vec(), - _ => return None, - }; - if token.is_empty() { - return None; - } - Some(token) -} +use hyper::{Body, Request}; pub(crate) async fn authorize( authz: &Arc<dyn Authorizer>, @@ -33,21 +15,19 @@ pub(crate) async fn authorize( namespace: &NamespaceName<'_>, query_param_token: Option<String>, ) -> Result<(), Error> { - let token = req - .extensions() - .get::<AuthorizationHeaderExtension>() - .and_then(|v| v.as_ref()) - .and_then(extract_header_token) - .or_else(|| query_param_token.map(|t| t.into_bytes())); + let token = extract_token( + req.extensions() + .get::<AuthorizationHeaderExtension>() + .and_then(|v| v.as_ref()), + ) + .or_else(|| query_param_token.map(|t| t.into_bytes())); let perms = [Permission::ResourceAction( Resource::Database(namespace.to_string()), Action::Write, )]; - authz - .require_any_permission(token.as_deref(), &perms) - .await?; + authz.require_any_permission(token, &perms).await?; Ok(()) } @@ -68,13 +48,16 @@ pub mod mock { impl Authorizer for MockAuthorizer { async fn permissions( &self, - token: Option<&[u8]>, + token: Option<Vec<u8>>, perms: &[Permission], ) -> Result<Vec<Permission>, authz::Error> { match token { - Some(b"GOOD") => Ok(perms.to_vec()), - Some(b"UGLY") => Err(authz::Error::verification("test", "test error")), - Some(_) => Ok(vec![]), + Some(token) => match (&token as &dyn AsRef<[u8]>).as_ref() { + b"GOOD" => Ok(perms.to_vec()), + b"BAD" => Ok(vec![]), + b"UGLY" => Err(authz::Error::verification("test", "test error")), + _ => panic!("unexpected token"), + }, None => Err(authz::Error::NoToken), } } @@ -84,6 +67,7 @@ pub mod mock { #[cfg(test)] mod tests { use assert_matches::assert_matches; + use base64::{prelude::BASE64_STANDARD, Engine}; use data_types::NamespaceId; use hyper::header::HeaderValue; diff --git a/router/src/server/http/write/single_tenant/mod.rs b/router/src/server/http/write/single_tenant/mod.rs index b9adfe576b..23555eb1e0 100644 --- a/router/src/server/http/write/single_tenant/mod.rs +++ b/router/src/server/http/write/single_tenant/mod.rs @@ -201,7 +201,7 @@ mod tests { impl Authorizer for MockCountingAuthorizer { async fn permissions( &self, - _token: Option<&[u8]>, + _token: Option<Vec<u8>>, perms: &[Permission], ) -> Result<Vec<Permission>, authz::Error> { *self.calls_counter.lock() += 1; diff --git a/service_grpc_flight/src/lib.rs b/service_grpc_flight/src/lib.rs index bc095955bc..950f7bde83 100644 --- a/service_grpc_flight/src/lib.rs +++ b/service_grpc_flight/src/lib.rs @@ -11,7 +11,7 @@ use arrow_flight::{ Action, ActionType, Criteria, Empty, FlightData, FlightDescriptor, FlightEndpoint, FlightInfo, HandshakeRequest, HandshakeResponse, PutResult, SchemaResult, Ticket, }; -use authz::Authorizer; +use authz::{extract_token, Authorizer}; use data_types::NamespaceNameError; use datafusion::{error::DataFusionError, physical_plan::ExecutionPlan}; use flightsql::FlightSQLCommand; @@ -27,7 +27,10 @@ use request::{IoxGetRequest, RunQuery}; use service_common::{datafusion_error_to_tonic_code, planner::Planner, QueryNamespaceProvider}; use snafu::{OptionExt, ResultExt, Snafu}; use std::{fmt::Debug, pin::Pin, sync::Arc, task::Poll, time::Instant}; -use tonic::{metadata::MetadataMap, Request, Response, Streaming}; +use tonic::{ + metadata::{AsciiMetadataValue, MetadataMap}, + Request, Response, Streaming, +}; use trace::{ctx::SpanContext, span::SpanExt}; use trace_http::ctx::{RequestLogContext, RequestLogContextExt}; use tracker::InstrumentedAsyncOwnedSemaphorePermit; @@ -486,7 +489,7 @@ where )], }; self.authz - .require_any_permission(authz_token.as_deref(), &perms) + .require_any_permission(authz_token, &perms) .await .map_err(Error::from)?; @@ -522,18 +525,40 @@ where &self, request: Request<Streaming<HandshakeRequest>>, ) -> Result<Response<Self::HandshakeStream>, tonic::Status> { + // Note that the JDBC driver doesn't send the iox-namespace-name metadata + // in the handshake request, even if configured in the JDBC URL, + // so we cannot actually do any access checking here. + let authz_token = get_flight_authz(request.metadata()); + let request = request .into_inner() .message() .await? .context(InvalidHandshakeSnafu)?; + // The handshake method is used for authentication. IOx ignores the + // username and returns the password itself as the token to use for + // subsequent requests + let response_header = authz_token + .map(|mut v| { + let mut nv = b"Bearer ".to_vec(); + nv.append(&mut v); + nv + }) + .map(AsciiMetadataValue::try_from) + .transpose() + .map_err(|e| tonic::Status::invalid_argument(e.to_string()))?; + let response = HandshakeResponse { protocol_version: request.protocol_version, payload: request.payload, }; let output = futures::stream::iter(std::iter::once(Ok(response))); - Ok(Response::new(Box::pin(output) as Self::HandshakeStream)) + let mut response = Response::new(Box::pin(output) as Self::HandshakeStream); + if let Some(header) = response_header { + response.metadata_mut().insert("authorization", header); + } + Ok(response) } async fn list_flights( @@ -568,7 +593,7 @@ where let perms = flightsql_permissions(&namespace_name, &cmd); self.authz - .require_any_permission(authz_token.as_deref(), &perms) + .require_any_permission(authz_token, &perms) .await .map_err(Error::from)?; @@ -657,7 +682,7 @@ where let perms = flightsql_permissions(&namespace_name, &cmd); self.authz - .require_any_permission(authz_token.as_deref(), &perms) + .require_any_permission(authz_token, &perms) .await .map_err(Error::from)?; @@ -761,14 +786,7 @@ fn get_flightsql_namespace(metadata: &MetadataMap) -> Result<String> { /// Retrieve the authorization token associated with the request. fn get_flight_authz(metadata: &MetadataMap) -> Option<Vec<u8>> { - let val = metadata.get("authorization")?.as_ref(); - if val.len() < b"Bearer ".len() { - return None; - } - match val.split_at(b"Bearer ".len()) { - (b"Bearer ", token) => Some(token.to_vec()), - _ => None, - } + extract_token(metadata.get("authorization")) } fn flightsql_permissions(namespace_name: &str, cmd: &FlightSQLCommand) -> Vec<authz::Permission> { @@ -1050,14 +1068,16 @@ mod tests { impl Authorizer for MockAuthorizer { async fn permissions( &self, - token: Option<&[u8]>, + token: Option<Vec<u8>>, perms: &[Permission], ) -> Result<Vec<Permission>, authz::Error> { match token { - Some(b"GOOD") => Ok(perms.to_vec()), - Some(b"BAD") => Ok(vec![]), - Some(b"UGLY") => Err(authz::Error::verification("test", "test error")), - Some(_) => panic!("unexpected token"), + Some(token) => match (&token as &dyn AsRef<[u8]>).as_ref() { + b"GOOD" => Ok(perms.to_vec()), + b"BAD" => Ok(vec![]), + b"UGLY" => Err(authz::Error::verification("test", "test error")), + _ => panic!("unexpected token"), + }, None => Err(authz::Error::NoToken), } }
81722dc19b4a24ef8e97dc1e12c9941f43fe0800
Stuart Carnie
2022-10-14 09:37:49
AST traversal using Visitor pattern (#5796)
* feat: Partition implementation of Visitable for InfluxQL AST * feat: Added consistent structures for each clause to simplify visitor Continued to expand `accept` and `pre` / `post` visit implementations. * feat: Added insta and tests using snapshots (thanks @crepererum) The insta crate simplifies the process of validating the combination of visitor and accept implementations are called and in the correct order. * chore: Run cargo hakari tasks * feat: Added remaining snapshot tests Some tests are failing as some minor type changes must be added along with the addition of related visitor functions. * feat: Add types to represent each clause in numerous statements These clauses permit distinct visit functions on the `Visitor` type. * chore: Reformat `SELECT` * chore: Explicitly specify access to export selected types only This required completing all the missing documentation for the exported types. * chore: Update Cargo.lock * chore: macro to implement common traits and hide 0th tuple element
Co-authored-by: CircleCI[bot] <[email protected]>
feat: AST traversal using Visitor pattern (#5796) * feat: Partition implementation of Visitable for InfluxQL AST * feat: Added consistent structures for each clause to simplify visitor Continued to expand `accept` and `pre` / `post` visit implementations. * feat: Added insta and tests using snapshots (thanks @crepererum) The insta crate simplifies the process of validating the combination of visitor and accept implementations are called and in the correct order. * chore: Run cargo hakari tasks * feat: Added remaining snapshot tests Some tests are failing as some minor type changes must be added along with the addition of related visitor functions. * feat: Add types to represent each clause in numerous statements These clauses permit distinct visit functions on the `Visitor` type. * chore: Reformat `SELECT` * chore: Explicitly specify access to export selected types only This required completing all the missing documentation for the exported types. * chore: Update Cargo.lock * chore: macro to implement common traits and hide 0th tuple element Co-authored-by: CircleCI[bot] <[email protected]>
diff --git a/Cargo.lock b/Cargo.lock index 5d629855fb..62fac7061b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -667,9 +667,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.0.14" +version = "4.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea54a38e4bce14ff6931c72e5b3c43da7051df056913d4e7e1fcdb1c03df69d" +checksum = "6bf8832993da70a4c6d13c581f4463c2bdda27b9bf1c5498dc4365543abe6d6f" dependencies = [ "atty", "bitflags", @@ -684,7 +684,7 @@ dependencies = [ name = "clap_blocks" version = "0.1.0" dependencies = [ - "clap 4.0.14", + "clap 4.0.15", "data_types", "futures", "humantime", @@ -834,6 +834,19 @@ dependencies = [ "workspace-hack", ] +[[package]] +name = "console" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c050367d967ced717c04b65d8c619d863ef9292ce0c5760028655a2fb298718c" +dependencies = [ + "encode_unicode", + "lazy_static", + "libc", + "terminal_size", + "winapi", +] + [[package]] name = "console-api" version = "0.4.0" @@ -1432,6 +1445,12 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +[[package]] +name = "encode_unicode" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" + [[package]] name = "encoding_rs" version = "0.8.31" @@ -1699,7 +1718,7 @@ version = "0.1.0" dependencies = [ "chrono", "chrono-english", - "clap 4.0.14", + "clap 4.0.15", "clap_blocks", "data_types", "filetime", @@ -2171,6 +2190,7 @@ name = "influxdb_influxql_parser" version = "0.1.0" dependencies = [ "assert_matches", + "insta", "nom", "test_helpers", "workspace-hack", @@ -2185,7 +2205,7 @@ dependencies = [ "assert_cmd", "backtrace", "bytes", - "clap 4.0.14", + "clap 4.0.15", "clap_blocks", "compactor", "console-subscriber", @@ -2373,6 +2393,20 @@ dependencies = [ "write_summary", ] +[[package]] +name = "insta" +version = "1.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581d4e3314cae4536e5d22ffd23189d4a374696c5ef733eadafae0ed273fd303" +dependencies = [ + "console", + "lazy_static", + "linked-hash-map", + "serde", + "similar", + "yaml-rust", +] + [[package]] name = "instant" version = "0.1.12" @@ -2430,7 +2464,7 @@ dependencies = [ "bytes", "chrono", "chrono-english", - "clap 4.0.14", + "clap 4.0.15", "criterion", "futures", "handlebars", @@ -2524,7 +2558,7 @@ dependencies = [ "async-trait", "bytes", "chrono", - "clap 4.0.14", + "clap 4.0.15", "clap_blocks", "data_types", "flate2", @@ -2680,7 +2714,7 @@ name = "ioxd_test" version = "0.1.0" dependencies = [ "async-trait", - "clap 4.0.14", + "clap 4.0.15", "hyper", "ioxd_common", "metric", @@ -2854,6 +2888,12 @@ dependencies = [ "cc", ] +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + [[package]] name = "linux-raw-sys" version = "0.0.46" @@ -5200,6 +5240,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "termtree" version = "0.2.4" @@ -5606,7 +5656,7 @@ version = "0.1.0" dependencies = [ "async-trait", "chrono", - "clap 4.0.14", + "clap 4.0.15", "futures", "observability_deps", "snafu", @@ -5745,7 +5795,7 @@ name = "trogging" version = "0.1.0" dependencies = [ "atty", - "clap 4.0.14", + "clap 4.0.15", "logfmt", "observability_deps", "regex", @@ -6158,6 +6208,7 @@ dependencies = [ "serde", "serde_json", "sha2", + "similar", "smallvec", "sqlx", "sqlx-core", @@ -6234,6 +6285,15 @@ dependencies = [ "workspace-hack", ] +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "yansi" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index 37f7b6b2c6..fe94e1e82b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -121,3 +121,10 @@ inherits = "release" codegen-units = 16 lto = false incremental = true + +# Per insta docs: https://insta.rs/docs/quickstart/#optional-faster-runs +[profile.dev.package.insta] +opt-level = 3 + +[profile.dev.package.similar] +opt-level = 3 diff --git a/influxdb_influxql_parser/Cargo.toml b/influxdb_influxql_parser/Cargo.toml index cc1b8ae84b..3b35b18281 100644 --- a/influxdb_influxql_parser/Cargo.toml +++ b/influxdb_influxql_parser/Cargo.toml @@ -9,4 +9,5 @@ workspace-hack = { path = "../workspace-hack"} [dev-dependencies] # In alphabetical order test_helpers = { path = "../test_helpers" } -assert_matches = "1" \ No newline at end of file +assert_matches = "1" +insta = { version = "1.21.0", features = ["yaml"] } \ No newline at end of file diff --git a/influxdb_influxql_parser/src/common.rs b/influxdb_influxql_parser/src/common.rs index a6b245c397..167f53edc1 100644 --- a/influxdb_influxql_parser/src/common.rs +++ b/influxdb_influxql_parser/src/common.rs @@ -11,6 +11,7 @@ use nom::combinator::{map, opt, value}; use nom::multi::separated_list1; use nom::sequence::{pair, preceded, terminated}; use std::fmt::{Display, Formatter}; +use std::ops::{Deref, DerefMut}; /// Represents a measurement name as either an identifier or a regular expression. #[derive(Clone, Debug, PartialEq, Eq)] @@ -44,8 +45,13 @@ impl Display for MeasurementName { /// Represents a fully-qualified, 3-part measurement name. #[derive(Clone, Debug, PartialEq, Eq)] pub struct QualifiedMeasurementName { + /// An optional database name. pub database: Option<Identifier>, + + /// An optional retention policy. pub retention_policy: Option<Identifier>, + + /// The measurement name. pub name: MeasurementName, } @@ -87,7 +93,7 @@ impl Display for QualifiedMeasurementName { /// policy_name ::= identifier /// measurement_name ::= identifier | regex_lit /// ``` -pub fn qualified_measurement_name(i: &str) -> ParseResult<&str, QualifiedMeasurementName> { +pub(crate) fn qualified_measurement_name(i: &str) -> ParseResult<&str, QualifiedMeasurementName> { let (remaining_input, (opt_db_rp, name)) = pair( opt(alt(( // database "." retention_policy "." @@ -122,49 +128,153 @@ pub fn qualified_measurement_name(i: &str) -> ParseResult<&str, QualifiedMeasure )) } +/// Implements common behaviour for u64 tuple-struct types +#[macro_export] +macro_rules! impl_tuple_clause { + ($NAME:ident, $FOR:ty) => { + impl $NAME { + /// Create a new instance with the specified value. + pub fn new(value: $FOR) -> Self { + Self(value) + } + } + + impl std::ops::DerefMut for $NAME { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } + } + + impl std::ops::Deref for $NAME { + type Target = $FOR; + + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + impl From<$FOR> for $NAME { + fn from(value: $FOR) -> Self { + Self(value) + } + } + }; +} + +/// Represents the value for a `LIMIT` clause. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct LimitClause(pub(crate) u64); + +impl_tuple_clause!(LimitClause, u64); + +impl Display for LimitClause { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "LIMIT {}", self.0) + } +} + /// Parse a `LIMIT <n>` clause. -pub fn limit_clause(i: &str) -> ParseResult<&str, u64> { +pub(crate) fn limit_clause(i: &str) -> ParseResult<&str, LimitClause> { preceded( pair(tag_no_case("LIMIT"), multispace1), expect( "invalid LIMIT clause, expected unsigned integer", - unsigned_integer, + map(unsigned_integer, LimitClause), ), )(i) } +/// Represents the value for a `OFFSET` clause. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct OffsetClause(pub(crate) u64); + +impl_tuple_clause!(OffsetClause, u64); + +impl Display for OffsetClause { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "OFFSET {}", self.0) + } +} + /// Parse an `OFFSET <n>` clause. -pub fn offset_clause(i: &str) -> ParseResult<&str, u64> { +pub(crate) fn offset_clause(i: &str) -> ParseResult<&str, OffsetClause> { preceded( pair(tag_no_case("OFFSET"), multispace1), expect( "invalid OFFSET clause, expected unsigned integer", - unsigned_integer, + map(unsigned_integer, OffsetClause), ), )(i) } /// Parse a terminator that ends a SQL statement. -pub fn statement_terminator(i: &str) -> ParseResult<&str, ()> { +pub(crate) fn statement_terminator(i: &str) -> ParseResult<&str, ()> { value((), char(';'))(i) } +/// Represents the `WHERE` clause of a statement. +#[derive(Debug, Clone, PartialEq)] +pub struct WhereClause(pub(crate) ConditionalExpression); + +impl WhereClause { + /// Create an instance of a `WhereClause` using `expr` + pub fn new(expr: ConditionalExpression) -> Self { + Self(expr) + } +} + +impl DerefMut for WhereClause { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl Deref for WhereClause { + type Target = ConditionalExpression; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Display for WhereClause { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "WHERE {}", self.0) + } +} + /// Parse a `WHERE` clause. -pub fn where_clause(i: &str) -> ParseResult<&str, ConditionalExpression> { +pub(crate) fn where_clause(i: &str) -> ParseResult<&str, WhereClause> { preceded( pair(tag_no_case("WHERE"), multispace1), - conditional_expression, + map(conditional_expression, WhereClause), )(i) } /// Represents an InfluxQL `ORDER BY` clause. #[derive(Default, Debug, Clone, Copy, Eq, PartialEq)] pub enum OrderByClause { + /// Signals the `ORDER BY` is in ascending order. #[default] Ascending, + + /// Signals the `ORDER BY` is in descending order. Descending, } +impl Display for OrderByClause { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "ORDER BY TIME {}", + match self { + Self::Ascending => "ASC", + Self::Descending => "DESC", + } + ) + } +} + /// Parse an InfluxQL `ORDER BY` clause. /// /// An `ORDER BY` in InfluxQL is limited when compared to the equivalent @@ -187,7 +297,7 @@ pub enum OrderByClause { /// ``` /// /// [EBNF]: https://www.w3.org/TR/2010/REC-xquery-20101214/#EBNFNotation -pub fn order_by_clause(i: &str) -> ParseResult<&str, OrderByClause> { +pub(crate) fn order_by_clause(i: &str) -> ParseResult<&str, OrderByClause> { let order = || { preceded( multispace1, @@ -221,29 +331,18 @@ pub fn order_by_clause(i: &str) -> ParseResult<&str, OrderByClause> { /// Parser is a trait that allows a type to parse itself. pub trait Parser: Sized { + /// Parse this type from the string `i`. fn parse(i: &str) -> ParseResult<&str, Self>; } /// `OneOrMore` is a container for representing a minimum of one `T`. -/// -/// `OneOrMore` provides a default implementation of [`fmt::Display`], -/// which displays the contents separated by commas. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct OneOrMore<T: Display + Parser> { - contents: Vec<T>, +pub struct OneOrMore<T> { + pub(crate) contents: Vec<T>, } -impl<T: Display + Parser> Display for OneOrMore<T> { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - Display::fmt(self.first(), f)?; - for arg in self.rest() { - write!(f, ", {}", arg)?; - } - Ok(()) - } -} - -impl<T: Display + Parser> OneOrMore<T> { +#[allow(clippy::len_without_is_empty)] +impl<T> OneOrMore<T> { /// Construct a new `OneOrMore<T>` with `contents`. /// /// **NOTE:** that `new` panics if contents is empty. @@ -272,11 +371,11 @@ impl<T: Display + Parser> OneOrMore<T> { } } -impl<T: Display + Parser> OneOrMore<T> { +impl<T: Parser> OneOrMore<T> { /// Parse a list of one or more `T`, separated by commas. /// /// Returns an error using `msg` if `separated_list1` fails to parse any elements. - pub fn separated_list1<'a>( + pub(crate) fn separated_list1<'a>( msg: &'static str, ) -> impl FnMut(&'a str) -> ParseResult<&'a str, Self> { move |i: &str| { @@ -303,7 +402,7 @@ mod tests { impl From<&str> for MeasurementName { /// Convert a `str` to [`MeasurementName::Name`]. fn from(s: &str) -> Self { - Self::Name(Identifier(s.into())) + Self::Name(Identifier::new(s.into())) } } @@ -408,15 +507,15 @@ mod tests { #[test] fn test_limit_clause() { let (_, got) = limit_clause("LIMIT 587").unwrap(); - assert_eq!(got, 587); + assert_eq!(*got, 587); // case insensitive let (_, got) = limit_clause("limit 587").unwrap(); - assert_eq!(got, 587); + assert_eq!(*got, 587); // extra spaces between tokens let (_, got) = limit_clause("LIMIT 123").unwrap(); - assert_eq!(got, 123); + assert_eq!(*got, 123); // not digits assert_expect_error!( @@ -440,15 +539,15 @@ mod tests { #[test] fn test_offset_clause() { let (_, got) = offset_clause("OFFSET 587").unwrap(); - assert_eq!(got, 587); + assert_eq!(*got, 587); // case insensitive let (_, got) = offset_clause("offset 587").unwrap(); - assert_eq!(got, 587); + assert_eq!(*got, 587); // extra spaces between tokens let (_, got) = offset_clause("OFFSET 123").unwrap(); - assert_eq!(got, 123); + assert_eq!(*got, 123); // not digits assert_expect_error!( @@ -537,6 +636,16 @@ mod tests { type OneOrMoreString = OneOrMore<String>; + impl Display for OneOrMoreString { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + Display::fmt(self.first(), f)?; + for arg in self.rest() { + write!(f, ", {}", arg)?; + } + Ok(()) + } + } + #[test] #[should_panic(expected = "OneOrMore requires elements")] fn test_one_or_more() { diff --git a/influxdb_influxql_parser/src/delete.rs b/influxdb_influxql_parser/src/delete.rs index 6d8a8c7cad..464f30c563 100644 --- a/influxdb_influxql_parser/src/delete.rs +++ b/influxdb_influxql_parser/src/delete.rs @@ -1,5 +1,4 @@ -use crate::common::where_clause; -use crate::expression::conditional::ConditionalExpression; +use crate::common::{where_clause, WhereClause}; use crate::internal::{expect, ParseResult}; use crate::simple_from_clause::{delete_from_clause, DeleteFromClause}; use nom::branch::alt; @@ -9,17 +8,21 @@ use nom::combinator::{map, opt}; use nom::sequence::{pair, preceded}; use std::fmt::{Display, Formatter}; +/// Represents a `DELETE` statement. #[derive(Clone, Debug, PartialEq)] pub enum DeleteStatement { - /// A DELETE with a measurement or measurements and an optional conditional expression - /// to restrict which series are deleted. + /// A DELETE with a `FROM` clause specifying one or more measurements + /// and an optional `WHERE` clause to restrict which series are deleted. FromWhere { + /// Represents the `FROM` clause. from: DeleteFromClause, - condition: Option<ConditionalExpression>, + + /// Represents the optional `WHERE` clause. + condition: Option<WhereClause>, }, - /// A `DELETE` with a conditional expression to restrict which series are deleted. - Where(ConditionalExpression), + /// A `DELETE` with a `WHERE` clause to restrict which series are deleted. + Where(WhereClause), } impl Display for DeleteStatement { @@ -28,12 +31,12 @@ impl Display for DeleteStatement { match self { Self::FromWhere { from, condition } => { - write!(f, " FROM {}", from)?; - if let Some(condition) = condition { - write!(f, " WHERE {}", condition)?; + write!(f, " {}", from)?; + if let Some(where_clause) = condition { + write!(f, " {}", where_clause)?; } } - Self::Where(condition) => write!(f, " WHERE {}", condition)?, + Self::Where(where_clause) => write!(f, " {}", where_clause)?, }; Ok(()) @@ -41,7 +44,7 @@ impl Display for DeleteStatement { } /// Parse a `DELETE` statement. -pub fn delete_statement(i: &str) -> ParseResult<&str, DeleteStatement> { +pub(crate) fn delete_statement(i: &str) -> ParseResult<&str, DeleteStatement> { // delete ::= "DELETE" ( from_clause where_clause? | where_clause ) preceded( tag_no_case("DELETE"), diff --git a/influxdb_influxql_parser/src/drop.rs b/influxdb_influxql_parser/src/drop.rs index 2d83fd0763..afef146fff 100644 --- a/influxdb_influxql_parser/src/drop.rs +++ b/influxdb_influxql_parser/src/drop.rs @@ -19,7 +19,7 @@ impl Display for DropMeasurementStatement { } } -pub fn drop_statement(i: &str) -> ParseResult<&str, DropMeasurementStatement> { +pub(crate) fn drop_statement(i: &str) -> ParseResult<&str, DropMeasurementStatement> { preceded( pair(tag_no_case("DROP"), multispace1), expect( diff --git a/influxdb_influxql_parser/src/explain.rs b/influxdb_influxql_parser/src/explain.rs index c9576aa3e8..5c3923264e 100644 --- a/influxdb_influxql_parser/src/explain.rs +++ b/influxdb_influxql_parser/src/explain.rs @@ -10,7 +10,7 @@ use nom::sequence::{preceded, tuple}; use std::fmt::{Display, Formatter}; /// Represents various options for an `EXPLAIN` statement. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ExplainOption { /// `EXPLAIN VERBOSE statement` Verbose, @@ -38,8 +38,11 @@ impl Display for ExplainOption { /// ``` #[derive(Debug, Clone, PartialEq)] pub struct ExplainStatement { - options: Option<ExplainOption>, - select: Box<SelectStatement>, + /// Represents any options specified for the `EXPLAIN` statement. + pub options: Option<ExplainOption>, + + /// Represents the `SELECT` statement to be explained and / or analyzed. + pub select: Box<SelectStatement>, } impl Display for ExplainStatement { @@ -53,7 +56,7 @@ impl Display for ExplainStatement { } /// Parse an `EXPLAIN` statement. -pub fn explain_statement(i: &str) -> ParseResult<&str, ExplainStatement> { +pub(crate) fn explain_statement(i: &str) -> ParseResult<&str, ExplainStatement> { map( tuple(( tag_no_case("EXPLAIN"), diff --git a/influxdb_influxql_parser/src/expression.rs b/influxdb_influxql_parser/src/expression.rs index 250bbf2058..8292984e28 100644 --- a/influxdb_influxql_parser/src/expression.rs +++ b/influxdb_influxql_parser/src/expression.rs @@ -1,4 +1,9 @@ +pub use arithmetic::*; +pub use conditional::*; + +/// Provides arithmetic expression parsing. pub mod arithmetic; +/// Provides conditional expression parsing. pub mod conditional; #[cfg(test)] diff --git a/influxdb_influxql_parser/src/expression/arithmetic.rs b/influxdb_influxql_parser/src/expression/arithmetic.rs index a3aa7d0eed..6b2a8304f8 100644 --- a/influxdb_influxql_parser/src/expression/arithmetic.rs +++ b/influxdb_influxql_parser/src/expression/arithmetic.rs @@ -68,12 +68,21 @@ pub enum Expr { UnaryOp(UnaryOperator, Box<Expr>), /// Function call - Call { name: String, args: Vec<Expr> }, + Call { + /// Represents the name of the function call. + name: String, + + /// Represents the list of arguments to the function call. + args: Vec<Expr>, + }, /// Binary operations, such as `1 + 2`. Binary { + /// Represents the left-hand side of the binary expression. lhs: Box<Expr>, + /// Represents the operator to apply to the binary expression. op: BinaryOperator, + /// Represents the right-hand side of the binary expression. rhs: Box<Expr>, }, @@ -142,7 +151,10 @@ impl Display for Expr { /// Specifies the data type of a wildcard (`*`) when using the `::` operator. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum WildcardType { + /// Indicates the wildcard refers to tags only. Tag, + + /// Indicates the wildcard refers to fields only. Field, } @@ -163,11 +175,17 @@ impl Display for WildcardType { /// [cast]: https://docs.influxdata.com/influxdb/v1.8/query_language/explore-data/#cast-operations #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum VarRefDataType { + /// Represents a 64-bit float. Float, + /// Represents a 64-bit integer. Integer, + /// Represents a UTF-8 string. String, + /// Represents a boolean. Boolean, + /// Represents a tag. Tag, + /// Represents a field. Field, } @@ -187,7 +205,9 @@ impl Display for VarRefDataType { /// An InfluxQL unary operator. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum UnaryOperator { + /// Represents the unary `+` operator. Plus, + /// Represents the unary `-` operator. Minus, } @@ -203,14 +223,22 @@ impl Display for UnaryOperator { /// An InfluxQL binary operators. #[derive(Clone, Debug, Copy, PartialEq, Eq)] pub enum BinaryOperator { - Add, // + - Sub, // - - Mul, // * - Div, // / - Mod, // % - BitwiseAnd, // & - BitwiseOr, // | - BitwiseXor, // ^ + /// Represents the `+` operator. + Add, + /// Represents the `-` operator. + Sub, + /// Represents the `*` operator. + Mul, + /// Represents the `/` operator. + Div, + /// Represents the `%` or modulus operator. + Mod, + /// Represents the `&` or bitwise-and operator. + BitwiseAnd, + /// Represents the `|` or bitwise-or operator. + BitwiseOr, + /// Represents the `^` or bitwise-xor operator. + BitwiseXor, } impl Display for BinaryOperator { @@ -259,7 +287,7 @@ where } /// Parse a function call expression -pub fn call_expression<T>(i: &str) -> ParseResult<&str, Expr> +pub(crate) fn call_expression<T>(i: &str) -> ParseResult<&str, Expr> where T: ArithmeticParsers, { @@ -283,7 +311,7 @@ where } /// Parse a variable reference, which is an identifier followed by an optional cast expression. -pub fn var_ref(i: &str) -> ParseResult<&str, Expr> { +pub(crate) fn var_ref(i: &str) -> ParseResult<&str, Expr> { map( pair( identifier, @@ -342,7 +370,7 @@ where /// Parse an arithmetic expression. /// /// This includes the addition, subtraction, bitwise or, and bitwise xor operators. -pub fn arithmetic<T>(i: &str) -> ParseResult<&str, Expr> +pub(crate) fn arithmetic<T>(i: &str) -> ParseResult<&str, Expr> where T: ArithmeticParsers, { @@ -363,7 +391,7 @@ where } /// A trait for customizing arithmetic parsers. -pub trait ArithmeticParsers { +pub(crate) trait ArithmeticParsers { /// Parse an operand of an arithmetic expression. fn operand(i: &str) -> ParseResult<&str, Expr>; } diff --git a/influxdb_influxql_parser/src/expression/conditional.rs b/influxdb_influxql_parser/src/expression/conditional.rs index 991966ce4b..15b665d5a1 100644 --- a/influxdb_influxql_parser/src/expression/conditional.rs +++ b/influxdb_influxql_parser/src/expression/conditional.rs @@ -13,19 +13,31 @@ use nom::sequence::{delimited, preceded, tuple}; use std::fmt; use std::fmt::{Display, Formatter, Write}; +/// Represents on of the conditional operators supported by [`ConditionalExpression::Binary`]. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ConditionalOperator { - Eq, // = - NotEq, // != - EqRegex, // =~ - NotEqRegex, // !~ - Lt, // < - LtEq, // <= - Gt, // > - GtEq, // >= - In, // IN - And, // AND - Or, // OR + /// Represents the `=` operator. + Eq, + /// Represents the `!=` operator. + NotEq, + /// Represents the `=~` (regular expression equals) operator. + EqRegex, + /// Represents the `!~` (regular expression not equals) operator. + NotEqRegex, + /// Represents the `<` operator. + Lt, + /// Represents the `<=` operator. + LtEq, + /// Represents the `>` operator. + Gt, + /// Represents the `>=` operator. + GtEq, + /// Represents the `IN` operator. + In, + /// Represents the `AND` operator. + And, + /// Represents the `OR` operator. + Or, } impl Display for ConditionalOperator { @@ -46,6 +58,7 @@ impl Display for ConditionalOperator { } } +/// Represents a conditional expression. #[derive(Debug, Clone, PartialEq)] pub enum ConditionalExpression { /// Represents an arithmetic expression. @@ -53,8 +66,11 @@ pub enum ConditionalExpression { /// Binary operations, such as `foo = 'bar'` or `true AND false`. Binary { + /// Represents the left-hand side of the conditional binary expression. lhs: Box<ConditionalExpression>, + /// Represents the operator to apply to the conditional binary expression. op: ConditionalOperator, + /// Represents the right-hand side of the conditional binary expression. rhs: Box<ConditionalExpression>, }, @@ -162,7 +178,7 @@ fn disjunction(i: &str) -> ParseResult<&str, ConditionalExpression> { } /// Parse an InfluxQL conditional expression. -pub fn conditional_expression(i: &str) -> ParseResult<&str, ConditionalExpression> { +pub(crate) fn conditional_expression(i: &str) -> ParseResult<&str, ConditionalExpression> { disjunction(i) } @@ -181,7 +197,7 @@ fn reduce_expr( } /// Returns true if `expr` is a valid [`Expr::Call`] expression for the `now` function. -pub fn is_valid_now_call(expr: &Expr) -> bool { +pub(crate) fn is_valid_now_call(expr: &Expr) -> bool { match expr { Expr::Call { name, args } => name.to_lowercase() == "now" && args.is_empty(), _ => false, diff --git a/influxdb_influxql_parser/src/expression/test_util.rs b/influxdb_influxql_parser/src/expression/test_util.rs index ec39ee36ae..1d1d16c77a 100644 --- a/influxdb_influxql_parser/src/expression/test_util.rs +++ b/influxdb_influxql_parser/src/expression/test_util.rs @@ -33,7 +33,7 @@ macro_rules! regex { macro_rules! param { ($EXPR: expr) => { $crate::expression::arithmetic::Expr::BindParameter( - $crate::parameter::BindParameter($EXPR.into()).into(), + $crate::parameter::BindParameter::new($EXPR.into()).into(), ) }; } diff --git a/influxdb_influxql_parser/src/identifier.rs b/influxdb_influxql_parser/src/identifier.rs index 895a5bbdad..2564978aab 100644 --- a/influxdb_influxql_parser/src/identifier.rs +++ b/influxdb_influxql_parser/src/identifier.rs @@ -14,7 +14,7 @@ use crate::internal::ParseResult; use crate::keywords::sql_keyword; use crate::string::double_quoted_string; -use crate::write_quoted_string; +use crate::{impl_tuple_clause, write_quoted_string}; use nom::branch::alt; use nom::bytes::complete::tag; use nom::character::complete::{alpha1, alphanumeric1}; @@ -25,7 +25,7 @@ use std::fmt; use std::fmt::{Display, Formatter, Write}; /// Parse an unquoted InfluxQL identifier. -pub fn unquoted_identifier(i: &str) -> ParseResult<&str, &str> { +pub(crate) fn unquoted_identifier(i: &str) -> ParseResult<&str, &str> { preceded( not(sql_keyword), recognize(pair( @@ -37,13 +37,9 @@ pub fn unquoted_identifier(i: &str) -> ParseResult<&str, &str> { /// A type that represents an InfluxQL identifier. #[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub struct Identifier(pub String); +pub struct Identifier(pub(crate) String); -impl From<String> for Identifier { - fn from(s: String) -> Self { - Self(s) - } -} +impl_tuple_clause!(Identifier, String); impl From<&str> for Identifier { fn from(s: &str) -> Self { @@ -59,7 +55,7 @@ impl Display for Identifier { } /// Parses an InfluxQL [Identifier]. -pub fn identifier(i: &str) -> ParseResult<&str, Identifier> { +pub(crate) fn identifier(i: &str) -> ParseResult<&str, Identifier> { // See: https://github.com/influxdata/influxql/blob/df51a45762be9c1b578f01718fa92d286a843fe9/scanner.go#L358-L362 alt(( map(unquoted_identifier, Into::into), diff --git a/influxdb_influxql_parser/src/internal.rs b/influxdb_influxql_parser/src/internal.rs index a18c6f5a10..a4367e0ae7 100644 --- a/influxdb_influxql_parser/src/internal.rs +++ b/influxdb_influxql_parser/src/internal.rs @@ -109,4 +109,4 @@ impl<I> NomParseError<I> for Error<I> { /// ParseResult is a type alias for [`nom::IResult`] used by nom combinator /// functions for parsing InfluxQL. -pub type ParseResult<I, T, E = Error<I>> = nom::IResult<I, T, E>; +pub(crate) type ParseResult<I, T, E = Error<I>> = nom::IResult<I, T, E>; diff --git a/influxdb_influxql_parser/src/keywords.rs b/influxdb_influxql_parser/src/keywords.rs index 4648920c34..3af1146c9d 100644 --- a/influxdb_influxql_parser/src/keywords.rs +++ b/influxdb_influxql_parser/src/keywords.rs @@ -126,7 +126,7 @@ fn keyword_show_to_write(i: &str) -> ParseResult<&str, &str> { } /// Matches any InfluxQL reserved keyword. -pub fn sql_keyword(i: &str) -> ParseResult<&str, &str> { +pub(crate) fn sql_keyword(i: &str) -> ParseResult<&str, &str> { // NOTE that the alt function takes a tuple with a maximum arity of 21, hence // the reason these are broken into groups alt(( diff --git a/influxdb_influxql_parser/src/lib.rs b/influxdb_influxql_parser/src/lib.rs index 231e3fe0e9..9df8080544 100644 --- a/influxdb_influxql_parser/src/lib.rs +++ b/influxdb_influxql_parser/src/lib.rs @@ -14,10 +14,29 @@ clippy::dbg_macro )] +pub use crate::common::*; +pub use crate::delete::*; +pub use crate::drop::*; +pub use crate::explain::*; +pub use crate::expression::*; +pub use crate::identifier::*; +pub use crate::literal::*; +pub use crate::parameter::*; +pub use crate::select::*; +pub use crate::show::*; +pub use crate::show_field_keys::*; +pub use crate::show_measurements::*; +pub use crate::show_retention_policies::*; +pub use crate::show_tag_keys::*; +pub use crate::show_tag_values::*; +pub use crate::simple_from_clause::*; +pub use crate::statement::*; +pub use crate::string::*; +pub use crate::visit::*; + use crate::common::statement_terminator; use crate::internal::Error as InternalError; use crate::statement::statement; -pub use crate::statement::Statement; use nom::character::complete::multispace0; use nom::combinator::eof; use nom::Offset; @@ -46,6 +65,7 @@ mod show_tag_values; mod simple_from_clause; mod statement; mod string; +mod visit; /// A error returned when parsing an InfluxQL query using /// [`parse_statements`] fails. diff --git a/influxdb_influxql_parser/src/literal.rs b/influxdb_influxql_parser/src/literal.rs index 8fda175130..5497521a30 100644 --- a/influxdb_influxql_parser/src/literal.rs +++ b/influxdb_influxql_parser/src/literal.rs @@ -1,6 +1,6 @@ use crate::internal::{map_fail, ParseResult}; use crate::string::{regex, single_quoted_string, Regex}; -use crate::write_escaped; +use crate::{impl_tuple_clause, write_escaped}; use nom::branch::alt; use nom::bytes::complete::{tag, tag_no_case}; use nom::character::complete::{char, digit1, multispace0}; @@ -25,7 +25,7 @@ const NANOS_PER_DAY: i64 = 24 * NANOS_PER_HOUR; /// Number of nanoseconds in a week. const NANOS_PER_WEEK: i64 = 7 * NANOS_PER_DAY; -// Primitive InfluxQL literal values, such as strings and regular expressions. +/// Primitive InfluxQL literal values, such as strings and regular expressions. #[derive(Clone, Debug, PartialEq)] pub enum Literal { /// Unsigned integer literal. @@ -118,7 +118,7 @@ fn integer(i: &str) -> ParseResult<&str, i64> { /// ```text /// INTEGER ::= [0-9]+ /// ``` -pub fn unsigned_integer(i: &str) -> ParseResult<&str, u64> { +pub(crate) fn unsigned_integer(i: &str) -> ParseResult<&str, u64> { map_fail("unable to parse unsigned integer", digit1, &str::parse)(i) } @@ -141,7 +141,9 @@ fn float(i: &str) -> ParseResult<&str, f64> { /// Represents any signed number. #[derive(Debug, Clone, Copy, PartialEq)] pub enum Number { + /// Contains a 64-bit integer. Integer(i64), + /// Contains a 64-bit float. Float(f64), } @@ -167,7 +169,7 @@ impl From<i64> for Number { } /// Parse a signed [`Number`]. -pub fn number(i: &str) -> ParseResult<&str, Number> { +pub(crate) fn number(i: &str) -> ParseResult<&str, Number> { let (remaining, sign) = opt(alt((char('-'), char('+'))))(i)?; preceded( multispace0, @@ -204,13 +206,9 @@ enum DurationUnit { /// Represents an InfluxQL duration in nanoseconds. #[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct Duration(i64); +pub struct Duration(pub(crate) i64); -impl From<i64> for Duration { - fn from(v: i64) -> Self { - Self(v) - } -} +impl_tuple_clause!(Duration, i64); static DIVISORS: [(i64, &str); 8] = [ (NANOS_PER_WEEK, "w"), @@ -276,7 +274,7 @@ fn single_duration(i: &str) -> ParseResult<&str, i64> { } /// Parse the input for an InfluxQL duration. -pub fn duration(i: &str) -> ParseResult<&str, Duration> { +pub(crate) fn duration(i: &str) -> ParseResult<&str, Duration> { map( fold_many1(single_duration, || 0, |acc, fragment| acc + fragment), Duration, @@ -286,7 +284,7 @@ pub fn duration(i: &str) -> ParseResult<&str, Duration> { /// Parse an InfluxQL literal, except a [`Regex`]. /// /// Use [`literal`] for parsing any literals, excluding regular expressions. -pub fn literal_no_regex(i: &str) -> ParseResult<&str, Literal> { +pub(crate) fn literal_no_regex(i: &str) -> ParseResult<&str, Literal> { alt(( // NOTE: order is important, as floats should be tested before durations and integers. map(float, Literal::Float), @@ -298,12 +296,12 @@ pub fn literal_no_regex(i: &str) -> ParseResult<&str, Literal> { } /// Parse any InfluxQL literal. -pub fn literal(i: &str) -> ParseResult<&str, Literal> { +pub(crate) fn literal(i: &str) -> ParseResult<&str, Literal> { alt((literal_no_regex, map(regex, Literal::Regex)))(i) } /// Parse an InfluxQL literal regular expression. -pub fn literal_regex(i: &str) -> ParseResult<&str, Literal> { +pub(crate) fn literal_regex(i: &str) -> ParseResult<&str, Literal> { map(regex, Literal::Regex)(i) } diff --git a/influxdb_influxql_parser/src/parameter.rs b/influxdb_influxql_parser/src/parameter.rs index ea62861ec8..599e38ff60 100644 --- a/influxdb_influxql_parser/src/parameter.rs +++ b/influxdb_influxql_parser/src/parameter.rs @@ -9,7 +9,7 @@ use crate::internal::ParseResult; use crate::string::double_quoted_string; -use crate::write_quoted_string; +use crate::{impl_tuple_clause, write_quoted_string}; use nom::branch::alt; use nom::bytes::complete::tag; use nom::character::complete::{alphanumeric1, char}; @@ -26,13 +26,9 @@ fn unquoted_parameter(i: &str) -> ParseResult<&str, &str> { /// A type that represents an InfluxQL bind parameter. #[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub struct BindParameter(pub String); +pub struct BindParameter(pub(crate) String); -impl From<String> for BindParameter { - fn from(s: String) -> Self { - Self(s) - } -} +impl_tuple_clause!(BindParameter, String); impl From<&str> for BindParameter { fn from(s: &str) -> Self { @@ -49,7 +45,7 @@ impl Display for BindParameter { } /// Parses an InfluxQL [BindParameter]. -pub fn parameter(i: &str) -> ParseResult<&str, BindParameter> { +pub(crate) fn parameter(i: &str) -> ParseResult<&str, BindParameter> { // See: https://github.com/influxdata/influxql/blob/df51a45762be9c1b578f01718fa92d286a843fe9/scanner.go#L358-L362 preceded( char('$'), diff --git a/influxdb_influxql_parser/src/select.rs b/influxdb_influxql_parser/src/select.rs index 7b9764c182..b23c786d23 100644 --- a/influxdb_influxql_parser/src/select.rs +++ b/influxdb_influxql_parser/src/select.rs @@ -1,19 +1,20 @@ use crate::common::{ limit_clause, offset_clause, order_by_clause, qualified_measurement_name, where_clause, - OneOrMore, OrderByClause, Parser, QualifiedMeasurementName, + LimitClause, OffsetClause, OneOrMore, OrderByClause, Parser, QualifiedMeasurementName, + WhereClause, }; use crate::expression::arithmetic::Expr::Wildcard; use crate::expression::arithmetic::{ arithmetic, call_expression, var_ref, ArithmeticParsers, Expr, WildcardType, }; -use crate::expression::conditional::{is_valid_now_call, ConditionalExpression}; +use crate::expression::conditional::is_valid_now_call; use crate::identifier::{identifier, Identifier}; use crate::internal::{expect, verify, ParseResult}; use crate::literal::{duration, literal, number, unsigned_integer, Literal, Number}; use crate::parameter::parameter; use crate::select::MeasurementSelection::Subquery; use crate::string::{regex, single_quoted_string, Regex}; -use crate::write_escaped; +use crate::{impl_tuple_clause, write_escaped}; use nom::branch::alt; use nom::bytes::complete::{tag, tag_no_case}; use nom::character::complete::{char, multispace0, multispace1}; @@ -22,6 +23,7 @@ use nom::sequence::{delimited, pair, preceded, tuple}; use std::fmt; use std::fmt::{Display, Formatter, Write}; +/// Represents a `SELECT` statement. #[derive(Clone, Debug, PartialEq)] pub struct SelectStatement { /// Expressions returned by the selection. @@ -31,85 +33,83 @@ pub struct SelectStatement { pub from: FromMeasurementClause, /// A conditional expression to filter the selection. - pub condition: Option<ConditionalExpression>, + pub condition: Option<WhereClause>, /// Expressions used for grouping the selection. - pub group_by: Option<GroupByList>, + pub group_by: Option<GroupByClause>, - /// The [fill option][fill] specified for the selection. If the value is [`None`], + /// The [fill clause] specifies the fill behaviour for the selection. If the value is [`None`], /// it is the same behavior as `fill(none)`. /// /// [fill]: https://docs.influxdata.com/influxdb/v1.8/query_language/explore-data/#group-by-time-intervals-and-fill - pub fill_option: Option<FillOption>, + pub fill: Option<FillClause>, /// Configures the ordering of the selection by time. pub order_by: Option<OrderByClause>, /// A value to restrict the number of rows returned. - pub limit: Option<u64>, + pub limit: Option<LimitClause>, /// A value to specify an offset to start retrieving rows. - pub offset: Option<u64>, + pub offset: Option<OffsetClause>, /// A value to restrict the number of series returned. - pub series_limit: Option<u64>, + pub series_limit: Option<SLimitClause>, /// A value to specify an offset to start retrieving series. - pub series_offset: Option<u64>, + pub series_offset: Option<SOffsetClause>, /// The timezone for the query, specified as [`tz('<time zone>')`][time_zone_clause]. /// /// [time_zone_clause]: https://docs.influxdata.com/influxdb/v1.8/query_language/explore-data/#the-time-zone-clause - pub timezone: Option<String>, + pub timezone: Option<TimeZoneClause>, } impl Display for SelectStatement { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "SELECT {} FROM {}", self.fields, self.from)?; + write!(f, "SELECT {} {}", self.fields, self.from)?; - if let Some(condition) = &self.condition { - write!(f, " WHERE {}", condition)?; + if let Some(where_clause) = &self.condition { + write!(f, " {}", where_clause)?; } if let Some(group_by) = &self.group_by { - write!(f, " GROUP BY {}", group_by)?; + write!(f, " {}", group_by)?; } - if let Some(fill_option) = &self.fill_option { - write!(f, " FILL({})", fill_option)?; + if let Some(fill_clause) = &self.fill { + write!(f, " {}", fill_clause)?; } - if let Some(OrderByClause::Descending) = &self.order_by { - write!(f, " ORDER BY TIME DESC")?; + if let Some(order_by) = &self.order_by { + write!(f, " {}", order_by)?; } if let Some(limit) = &self.limit { - write!(f, " LIMIT {}", limit)?; + write!(f, " {}", limit)?; } if let Some(offset) = &self.offset { - write!(f, " OFFSET {}", offset)?; + write!(f, " {}", offset)?; } if let Some(slimit) = &self.series_limit { - write!(f, " SLIMIT {}", slimit)?; + write!(f, " {}", slimit)?; } if let Some(soffset) = &self.series_offset { - write!(f, " SOFFSET {}", soffset)?; + write!(f, " {}", soffset)?; } - if let Some(tz) = &self.timezone { - f.write_str(" TZ('")?; - write_escaped!(f, tz, '\n' => "\\n", '\\' => "\\\\", '\'' => "\\'", '"' => "\\\""); - f.write_str("')")?; + if let Some(tz_clause) = &self.timezone { + write!(f, " {}", tz_clause)?; } Ok(()) } } -pub fn select_statement(i: &str) -> ParseResult<&str, SelectStatement> { +pub(crate) fn select_statement(i: &str) -> ParseResult<&str, SelectStatement> { let ( remaining, ( @@ -150,7 +150,7 @@ pub fn select_statement(i: &str) -> ParseResult<&str, SelectStatement> { from, condition, group_by, - fill_option, + fill: fill_option, order_by, limit, offset, @@ -161,10 +161,13 @@ pub fn select_statement(i: &str) -> ParseResult<&str, SelectStatement> { )) } -/// Represents a single measurement selection found in a `FROM` clause. +/// Represents a single measurement selection for a `FROM` clause. #[derive(Clone, Debug, PartialEq)] pub enum MeasurementSelection { + /// The measurement selection is measurement name or regular expression. Name(QualifiedMeasurementName), + + /// The measurement selection is a subquery. Subquery(Box<SelectStatement>), } @@ -196,6 +199,16 @@ impl Parser for MeasurementSelection { /// Represents a `FROM` clause for a `SELECT` statement. pub type FromMeasurementClause = OneOrMore<MeasurementSelection>; +impl Display for FromMeasurementClause { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "FROM {}", self.first())?; + for arg in self.rest() { + write!(f, ", {}", arg)?; + } + Ok(()) + } +} + fn from_clause(i: &str) -> ParseResult<&str, FromMeasurementClause> { preceded( pair(tag_no_case("FROM"), multispace1), @@ -205,7 +218,18 @@ fn from_clause(i: &str) -> ParseResult<&str, FromMeasurementClause> { )(i) } -pub type GroupByList = OneOrMore<Dimension>; +/// Represents the collection of dimensions for a `GROUP BY` clause. +pub type GroupByClause = OneOrMore<Dimension>; + +impl Display for GroupByClause { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "GROUP BY {}", self.first())?; + for arg in self.rest() { + write!(f, ", {}", arg)?; + } + Ok(()) + } +} /// Used to parse the interval argument of the TIME function struct TimeCallIntervalArgument; @@ -251,11 +275,14 @@ impl ArithmeticParsers for TimeCallOffsetArgument { } } +/// Represents a dimension of a `GROUP BY` clause. #[derive(Clone, Debug, PartialEq)] pub enum Dimension { /// Represents a `TIME` call in a `GROUP BY` clause. Time { + /// The first argument of the `TIME` call. interval: Expr, + /// An optional second argument to specify the offset applied to the `TIME` call. offset: Option<Expr>, }, @@ -336,7 +363,7 @@ fn time_call_expression(i: &str) -> ParseResult<&str, Dimension> { /// ```text /// group_by_clause ::= dimension ( "," dimension )* /// ``` -fn group_by_clause(i: &str) -> ParseResult<&str, GroupByList> { +fn group_by_clause(i: &str) -> ParseResult<&str, GroupByClause> { preceded( tuple(( tag_no_case("GROUP"), @@ -344,15 +371,15 @@ fn group_by_clause(i: &str) -> ParseResult<&str, GroupByList> { expect("invalid GROUP BY clause, expected BY", tag_no_case("BY")), multispace1, )), - GroupByList::separated_list1( + GroupByClause::separated_list1( "invalid GROUP BY clause, expected wildcard, TIME, identifier or regular expression", ), )(i) } -/// Represents all cases of an option argument of a `FILL` clause. +/// Represents a `FILL` clause, and specifies all possible cases of the argument to the `FILL` clause. #[derive(Debug, Clone, Copy, PartialEq)] -pub enum FillOption { +pub enum FillClause { /// Empty aggregate windows will contain null values and is specified as `fill(null)` Null, @@ -372,22 +399,27 @@ pub enum FillOption { Linear, } -impl Display for FillOption { +impl Display for FillClause { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_str("FILL(")?; match self { - Self::Null => f.write_str("NULL"), - Self::None => f.write_str("NONE"), - Self::Value(v) => fmt::Display::fmt(v, f), - Self::Previous => f.write_str("PREVIOUS"), - Self::Linear => f.write_str("LINEAR"), + Self::Null => f.write_str("NULL")?, + Self::None => f.write_str("NONE")?, + Self::Value(v) => fmt::Display::fmt(v, f)?, + Self::Previous => f.write_str("PREVIOUS")?, + Self::Linear => f.write_str("LINEAR")?, } + f.write_str(")") } } /// Represents an expression specified in the projection list of a `SELECT` statement. #[derive(Debug, Clone, PartialEq)] pub struct Field { + /// The expression which represents the field projection. pub expr: Expr, + + /// An optional alias for the field projection. pub alias: Option<Identifier>, } @@ -440,8 +472,19 @@ fn wildcard(i: &str) -> ParseResult<&str, Option<WildcardType>> { )(i) } +/// Represents the field projection list of a `SELECT` statement. pub type FieldList = OneOrMore<Field>; +impl Display for FieldList { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(self.first(), f)?; + for arg in self.rest() { + write!(f, ", {}", arg)?; + } + Ok(()) + } +} + /// Parse a field expression. /// /// A field expression is an arithmetic expression accepting @@ -495,7 +538,7 @@ fn field_list(i: &str) -> ParseResult<&str, FieldList> { /// fill_option ::= "NULL" | "NONE" | "PREVIOUS" | "LINEAR" | number /// number ::= signed_integer | signed_float /// ``` -fn fill_clause(i: &str) -> ParseResult<&str, FillOption> { +fn fill_clause(i: &str) -> ParseResult<&str, FillClause> { preceded( tag_no_case("FILL"), delimited( @@ -505,11 +548,11 @@ fn fill_clause(i: &str) -> ParseResult<&str, FillOption> { preceded( multispace0, alt(( - value(FillOption::Null, tag_no_case("NULL")), - value(FillOption::None, tag_no_case("NONE")), - map(number, FillOption::Value), - value(FillOption::Previous, tag_no_case("PREVIOUS")), - value(FillOption::Linear, tag_no_case("LINEAR")), + value(FillClause::Null, tag_no_case("NULL")), + value(FillClause::None, tag_no_case("NONE")), + map(number, FillClause::Value), + value(FillClause::Previous, tag_no_case("PREVIOUS")), + value(FillClause::Linear, tag_no_case("LINEAR")), )), ), ), @@ -518,49 +561,87 @@ fn fill_clause(i: &str) -> ParseResult<&str, FillOption> { )(i) } +/// Represents the value for a `SLIMIT` clause. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct SLimitClause(pub(crate) u64); + +impl_tuple_clause!(SLimitClause, u64); + +impl Display for SLimitClause { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "SLIMIT {}", self.0) + } +} + /// Parse a series limit (`SLIMIT <n>`) clause. /// /// ```text /// slimit_clause ::= "SLIMIT" unsigned_integer /// ``` -fn slimit_clause(i: &str) -> ParseResult<&str, u64> { +fn slimit_clause(i: &str) -> ParseResult<&str, SLimitClause> { preceded( pair(tag_no_case("SLIMIT"), multispace1), expect( "invalid SLIMIT clause, expected unsigned integer", - unsigned_integer, + map(unsigned_integer, SLimitClause), ), )(i) } +/// Represents the value for a `SOFFSET` clause. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct SOffsetClause(pub(crate) u64); + +impl_tuple_clause!(SOffsetClause, u64); + +impl Display for SOffsetClause { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "SOFFSET {}", self.0) + } +} + /// Parse a series offset (`SOFFSET <n>`) clause. /// /// ```text /// soffset_clause ::= "SOFFSET" unsigned_integer /// ``` -fn soffset_clause(i: &str) -> ParseResult<&str, u64> { +fn soffset_clause(i: &str) -> ParseResult<&str, SOffsetClause> { preceded( pair(tag_no_case("SOFFSET"), multispace1), expect( "invalid SLIMIT clause, expected unsigned integer", - unsigned_integer, + map(unsigned_integer, SOffsetClause), ), )(i) } +/// Represents the value of the time zone string of a `TZ` clause. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TimeZoneClause(pub(crate) String); + +impl_tuple_clause!(TimeZoneClause, String); + +impl Display for TimeZoneClause { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_str("TZ('")?; + write_escaped!(f, self.0, '\n' => "\\n", '\\' => "\\\\", '\'' => "\\'", '"' => "\\\""); + f.write_str("')") + } +} + /// Parse a timezone clause. /// /// ```text /// timezone_clause ::= "TZ" "(" single_quoted_string ")" /// ``` -fn timezone_clause(i: &str) -> ParseResult<&str, String> { +fn timezone_clause(i: &str) -> ParseResult<&str, TimeZoneClause> { preceded( tag_no_case("TZ"), delimited( preceded(multispace0, char('(')), expect( "invalid TZ clause, expected string", - preceded(multispace0, single_quoted_string), + preceded(multispace0, map(single_quoted_string, TimeZoneClause)), ), preceded(multispace0, char(')')), ), @@ -608,7 +689,10 @@ mod test { ); let (_, got) = select_statement("SELECT value FROM foo ORDER BY TIME ASC").unwrap(); - assert_eq!(format!("{}", got), r#"SELECT value FROM foo"#); + assert_eq!( + format!("{}", got), + r#"SELECT value FROM foo ORDER BY TIME ASC"# + ); let (_, got) = select_statement("SELECT value FROM foo LIMIT 5").unwrap(); assert_eq!(format!("{}", got), r#"SELECT value FROM foo LIMIT 5"#); @@ -958,26 +1042,26 @@ mod test { #[test] fn test_fill_clause() { let (_, got) = fill_clause("FILL(null)").unwrap(); - assert_matches!(got, FillOption::Null); + assert_matches!(got, FillClause::Null); let (_, got) = fill_clause("FILL(NONE)").unwrap(); - assert_matches!(got, FillOption::None); + assert_matches!(got, FillClause::None); let (_, got) = fill_clause("FILL(53)").unwrap(); - assert_matches!(got, FillOption::Value(v) if v == 53.into()); + assert_matches!(got, FillClause::Value(v) if v == 53.into()); let (_, got) = fill_clause("FILL(-18.9)").unwrap(); - assert_matches!(got, FillOption::Value(v) if v == (-18.9).into()); + assert_matches!(got, FillClause::Value(v) if v == (-18.9).into()); let (_, got) = fill_clause("FILL(previous)").unwrap(); - assert_matches!(got, FillOption::Previous); + assert_matches!(got, FillClause::Previous); let (_, got) = fill_clause("FILL(linear)").unwrap(); - assert_matches!(got, FillOption::Linear); + assert_matches!(got, FillClause::Linear); // unnecessary whitespace let (_, got) = fill_clause("FILL ( null )").unwrap(); - assert_matches!(got, FillOption::Null); + assert_matches!(got, FillClause::Null); // Fallible cases @@ -990,7 +1074,7 @@ mod test { #[test] fn test_timezone_clause() { let (_, got) = timezone_clause("TZ('Australia/Hobart')").unwrap(); - assert_eq!(got, "Australia/Hobart"); + assert_eq!(*got, "Australia/Hobart"); // Fallible cases assert_expect_error!( diff --git a/influxdb_influxql_parser/src/show.rs b/influxdb_influxql_parser/src/show.rs index 4358149622..6571ceb6af 100644 --- a/influxdb_influxql_parser/src/show.rs +++ b/influxdb_influxql_parser/src/show.rs @@ -5,7 +5,7 @@ use crate::show_measurements::show_measurements; use crate::show_retention_policies::show_retention_policies; use crate::show_tag_keys::show_tag_keys; use crate::show_tag_values::show_tag_values; -use crate::Statement; +use crate::{impl_tuple_clause, Statement}; use nom::branch::alt; use nom::bytes::complete::tag_no_case; use nom::character::complete::multispace1; @@ -14,7 +14,7 @@ use nom::sequence::{pair, preceded}; use std::fmt::{Display, Formatter}; /// Parse a SHOW statement. -pub fn show_statement(i: &str) -> ParseResult<&str, Statement> { +pub(crate) fn show_statement(i: &str) -> ParseResult<&str, Statement> { preceded( pair(tag_no_case("SHOW"), multispace1), expect( @@ -54,12 +54,26 @@ fn show_databases(i: &str) -> ParseResult<&str, ShowDatabasesStatement> { value(ShowDatabasesStatement, tag_no_case("DATABASES"))(i) } -/// Parse an `ON` clause for `SHOW TAG KEYS`, `SHOW TAG VALUES` and `SHOW FIELD KEYS` -/// statements. -pub fn on_clause(i: &str) -> ParseResult<&str, Identifier> { +/// Represents an `ON` clause for the case where the database is a single [`Identifier`]. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct OnClause(pub(crate) Identifier); + +impl_tuple_clause!(OnClause, Identifier); + +impl Display for OnClause { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "ON {}", self.0) + } +} + +/// Parse an `ON` clause for statements such as `SHOW TAG KEYS` and `SHOW FIELD KEYS`. +pub(crate) fn on_clause(i: &str) -> ParseResult<&str, OnClause> { preceded( pair(tag_no_case("ON"), multispace1), - expect("invalid ON clause, expected identifier", identifier), + expect( + "invalid ON clause, expected identifier", + map(identifier, OnClause), + ), )(i) } diff --git a/influxdb_influxql_parser/src/show_field_keys.rs b/influxdb_influxql_parser/src/show_field_keys.rs index 01d722218f..d2846cd7be 100644 --- a/influxdb_influxql_parser/src/show_field_keys.rs +++ b/influxdb_influxql_parser/src/show_field_keys.rs @@ -1,7 +1,6 @@ -use crate::common::{limit_clause, offset_clause}; -use crate::identifier::Identifier; +use crate::common::{limit_clause, offset_clause, LimitClause, OffsetClause}; use crate::internal::{expect, ParseResult}; -use crate::show::on_clause; +use crate::show::{on_clause, OnClause}; use crate::simple_from_clause::{show_from_clause, ShowFromClause}; use nom::bytes::complete::tag_no_case; use nom::character::complete::multispace1; @@ -15,37 +14,37 @@ use std::fmt::Formatter; pub struct ShowFieldKeysStatement { /// The name of the database to query. If `None`, a default /// database will be used. - pub database: Option<Identifier>, + pub database: Option<OnClause>, /// The measurement or measurements to restrict which field keys /// are retrieved. pub from: Option<ShowFromClause>, /// A value to restrict the number of field keys returned. - pub limit: Option<u64>, + pub limit: Option<LimitClause>, /// A value to specify an offset to start retrieving field keys. - pub offset: Option<u64>, + pub offset: Option<OffsetClause>, } impl fmt::Display for ShowFieldKeysStatement { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "SHOW FIELD KEYS")?; + f.write_str("SHOW FIELD KEYS")?; - if let Some(ref expr) = self.database { - write!(f, " ON {}", expr)?; + if let Some(ref on_clause) = self.database { + write!(f, " {}", on_clause)?; } if let Some(ref expr) = self.from { - write!(f, " FROM {}", expr)?; + write!(f, " {}", expr)?; } - if let Some(limit) = self.limit { - write!(f, " LIMIT {}", limit)?; + if let Some(ref limit) = self.limit { + write!(f, " {}", limit)?; } - if let Some(offset) = self.offset { - write!(f, " OFFSET {}", offset)?; + if let Some(ref offset) = self.offset { + write!(f, " {}", offset)?; } Ok(()) @@ -53,7 +52,7 @@ impl fmt::Display for ShowFieldKeysStatement { } /// Parse a `SHOW FIELD KEYS` statement, starting from the `FIELD` token. -pub fn show_field_keys(i: &str) -> ParseResult<&str, ShowFieldKeysStatement> { +pub(crate) fn show_field_keys(i: &str) -> ParseResult<&str, ShowFieldKeysStatement> { let ( remaining_input, ( diff --git a/influxdb_influxql_parser/src/show_measurements.rs b/influxdb_influxql_parser/src/show_measurements.rs index d5277fad9b..81f8b9e8b2 100644 --- a/influxdb_influxql_parser/src/show_measurements.rs +++ b/influxdb_influxql_parser/src/show_measurements.rs @@ -3,9 +3,9 @@ //! [sql]: https://docs.influxdata.com/influxdb/v1.8/query_language/explore-schema/#show-measurements use crate::common::{ - limit_clause, offset_clause, qualified_measurement_name, where_clause, QualifiedMeasurementName, + limit_clause, offset_clause, qualified_measurement_name, where_clause, LimitClause, + OffsetClause, QualifiedMeasurementName, WhereClause, }; -use crate::expression::conditional::ConditionalExpression; use crate::identifier::{identifier, Identifier}; use crate::internal::{expect, ParseResult}; use nom::branch::alt; @@ -17,18 +17,24 @@ use nom::sequence::{pair, preceded, terminated}; use std::fmt; use std::fmt::Formatter; -/// OnExpression represents an InfluxQL database or retention policy name -/// or a wildcard. +/// Represents an `ON` clause for a `SHOW MEASUREMENTS` statement to specify +/// which database the statement applies to. #[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub enum OnExpression { +pub enum ExtendedOnClause { + /// Represents a specific database and the default retention policy. Database(Identifier), + /// Represents a specific database and retention policy. DatabaseRetentionPolicy(Identifier, Identifier), + /// Represents all databases and their default retention policies. AllDatabases, + /// Represents all databases and all their retention policies. AllDatabasesAndRetentionPolicies, } -impl fmt::Display for OnExpression { +impl fmt::Display for ExtendedOnClause { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_str("ON ")?; + match self { Self::Database(db) => write!(f, "{}", db), Self::DatabaseRetentionPolicy(db, rp) => write!(f, "{}.{}", db, rp), @@ -39,19 +45,22 @@ impl fmt::Display for OnExpression { } /// Parse the `ON` clause of the `SHOW MEASUREMENTS` statement. -fn on_clause(i: &str) -> ParseResult<&str, OnExpression> { +fn extended_on_clause(i: &str) -> ParseResult<&str, ExtendedOnClause> { preceded( pair(tag_no_case("ON"), multispace1), expect( "invalid ON clause, expected wildcard or identifier", alt(( - value(OnExpression::AllDatabasesAndRetentionPolicies, tag("*.*")), - value(OnExpression::AllDatabases, tag("*")), + value( + ExtendedOnClause::AllDatabasesAndRetentionPolicies, + tag("*.*"), + ), + value(ExtendedOnClause::AllDatabases, tag("*")), map( pair(opt(terminated(identifier, tag("."))), identifier), |tup| match tup { - (None, db) => OnExpression::Database(db), - (Some(db), rp) => OnExpression::DatabaseRetentionPolicy(db, rp), + (None, db) => ExtendedOnClause::Database(db), + (Some(db), rp) => ExtendedOnClause::DatabaseRetentionPolicy(db, rp), }, ), )), @@ -59,60 +68,72 @@ fn on_clause(i: &str) -> ParseResult<&str, OnExpression> { )(i) } +/// Represents a `SHOW MEASUREMENTS` statement. #[derive(Clone, Debug, Default, PartialEq)] pub struct ShowMeasurementsStatement { - /// Limit the search to databases matching the expression. - pub on_expression: Option<OnExpression>, - - /// Limit the search to measurements matching the expression. - pub measurement_expression: Option<MeasurementExpression>, - - /// A conditional expression to filter the measurement list. - pub condition: Option<ConditionalExpression>, - - /// A value to restrict the number of tag keys returned. - pub limit: Option<u64>, - - /// A value to specify an offset to start retrieving tag keys. - pub offset: Option<u64>, + /// Represents the `ON` clause, which limits the search + /// to databases matching the expression. + pub on: Option<ExtendedOnClause>, + + /// Represents the `WITH MEASUREMENT` clause, which limits + /// the search to measurements matching the expression. + pub with_measurement: Option<WithMeasurementClause>, + + /// Represents the `WHERE` clause, which holds a conditional + /// expression to filter the measurement list. + pub condition: Option<WhereClause>, + + /// Represents the `LIMIT` clause, which holds a value to + /// restrict the number of tag keys returned. + pub limit: Option<LimitClause>, + + /// Represents the `OFFSET` clause. + /// + /// The `OFFSET` clause holds value to specify an offset to start retrieving tag keys. + pub offset: Option<OffsetClause>, } impl fmt::Display for ShowMeasurementsStatement { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "SHOW MEASUREMENTS")?; - if let Some(ref expr) = self.on_expression { - write!(f, " ON {}", expr)?; + if let Some(ref on_clause) = self.on { + write!(f, " {}", on_clause)?; } - if let Some(ref expr) = self.measurement_expression { - write!(f, " WITH MEASUREMENT {}", expr)?; + if let Some(ref with_clause) = self.with_measurement { + write!(f, " {}", with_clause)?; } - if let Some(ref cond) = self.condition { - write!(f, " WHERE {}", cond)?; + if let Some(ref where_clause) = self.condition { + write!(f, " {}", where_clause)?; } - if let Some(limit) = self.limit { - write!(f, " LIMIT {}", limit)?; + if let Some(ref limit) = self.limit { + write!(f, " {}", limit)?; } - if let Some(offset) = self.offset { - write!(f, " OFFSET {}", offset)?; + if let Some(ref offset) = self.offset { + write!(f, " {}", offset)?; } Ok(()) } } +/// Represents the expression of a `WITH MEASUREMENT` clause. #[derive(Clone, Debug, Eq, PartialEq)] -pub enum MeasurementExpression { +pub enum WithMeasurementClause { + /// Limit the measurements identified by the measurement name using an equals operator. Equals(QualifiedMeasurementName), + /// Limit the measurements identified by the measurement name using a + /// regular expression equals operator. Regex(QualifiedMeasurementName), } -impl fmt::Display for MeasurementExpression { +impl fmt::Display for WithMeasurementClause { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_str("WITH MEASUREMENT ")?; match self { Self::Equals(ref name) => write!(f, "= {}", name), Self::Regex(ref re) => write!(f, "=~ {}", re), @@ -120,7 +141,7 @@ impl fmt::Display for MeasurementExpression { } } -fn with_measurement_clause(i: &str) -> ParseResult<&str, MeasurementExpression> { +fn with_measurement_clause(i: &str) -> ParseResult<&str, WithMeasurementClause> { preceded( tuple(( tag_no_case("WITH"), @@ -136,14 +157,14 @@ fn with_measurement_clause(i: &str) -> ParseResult<&str, MeasurementExpression> alt(( map( preceded(pair(tag("=~"), multispace0), qualified_measurement_name), - MeasurementExpression::Regex, + WithMeasurementClause::Regex, ), map( preceded( pair(tag("="), multispace0), expect("expected measurement name", qualified_measurement_name), ), - MeasurementExpression::Equals, + WithMeasurementClause::Equals, ), )), ), @@ -151,7 +172,7 @@ fn with_measurement_clause(i: &str) -> ParseResult<&str, MeasurementExpression> } /// Parse a `SHOW MEASUREMENTS` statement after `SHOW` and any whitespace has been consumed. -pub fn show_measurements(i: &str) -> ParseResult<&str, ShowMeasurementsStatement> { +pub(crate) fn show_measurements(i: &str) -> ParseResult<&str, ShowMeasurementsStatement> { let ( remaining_input, ( @@ -164,7 +185,7 @@ pub fn show_measurements(i: &str) -> ParseResult<&str, ShowMeasurementsStatement ), ) = tuple(( tag_no_case("MEASUREMENTS"), - opt(preceded(multispace1, on_clause)), + opt(preceded(multispace1, extended_on_clause)), opt(preceded(multispace1, with_measurement_clause)), opt(preceded(multispace1, where_clause)), opt(preceded(multispace1, limit_clause)), @@ -174,8 +195,8 @@ pub fn show_measurements(i: &str) -> ParseResult<&str, ShowMeasurementsStatement Ok(( remaining_input, ShowMeasurementsStatement { - on_expression, - measurement_expression, + on: on_expression, + with_measurement: measurement_expression, condition, limit, offset, @@ -197,7 +218,7 @@ mod test { assert_eq!( got, ShowMeasurementsStatement { - on_expression: None, + on: None, ..Default::default() }, ); @@ -206,7 +227,7 @@ mod test { assert_eq!( got, ShowMeasurementsStatement { - on_expression: Some(OnExpression::Database("foo".into())), + on: Some(ExtendedOnClause::Database("foo".into())), ..Default::default() }, ); @@ -218,17 +239,15 @@ mod test { assert_eq!( got, ShowMeasurementsStatement { - on_expression: Some(OnExpression::Database("foo".into())), - measurement_expression: Some(MeasurementExpression::Equals( - QualifiedMeasurementName { - database: None, - retention_policy: None, - name: "bar".into(), - } - )), - condition: Some(Expr::Literal(true.into()).into()), - limit: Some(10), - offset: Some(20) + on: Some(ExtendedOnClause::Database("foo".into())), + with_measurement: Some(WithMeasurementClause::Equals(QualifiedMeasurementName { + database: None, + retention_policy: None, + name: "bar".into(), + })), + condition: Some(WhereClause::new(Expr::Literal(true.into()).into())), + limit: Some(10.into()), + offset: Some(20.into()) }, ); assert_eq!( @@ -242,11 +261,11 @@ mod test { assert_eq!( got, ShowMeasurementsStatement { - on_expression: Some(OnExpression::Database("foo".into())), - measurement_expression: Some(MeasurementExpression::Regex( + on: Some(ExtendedOnClause::Database("foo".into())), + with_measurement: Some(WithMeasurementClause::Regex( QualifiedMeasurementName::new(MeasurementName::Regex("bar".into())) )), - condition: Some(Expr::Literal(true.into()).into()), + condition: Some(WhereClause::new(Expr::Literal(true.into()).into())), limit: None, offset: None }, @@ -262,7 +281,7 @@ mod test { let got = format!( "{}", ShowMeasurementsStatement { - on_expression: None, + on: None, ..Default::default() } ); @@ -271,7 +290,7 @@ mod test { let got = format!( "{}", ShowMeasurementsStatement { - on_expression: Some(OnExpression::Database("foo".into())), + on: Some(ExtendedOnClause::Database("foo".into())), ..Default::default() } ); @@ -280,7 +299,7 @@ mod test { let got = format!( "{}", ShowMeasurementsStatement { - on_expression: Some(OnExpression::DatabaseRetentionPolicy( + on: Some(ExtendedOnClause::DatabaseRetentionPolicy( "foo".into(), "bar".into() )), @@ -292,7 +311,7 @@ mod test { let got = format!( "{}", ShowMeasurementsStatement { - on_expression: Some(OnExpression::AllDatabases), + on: Some(ExtendedOnClause::AllDatabases), ..Default::default() } ); @@ -301,7 +320,7 @@ mod test { let got = format!( "{}", ShowMeasurementsStatement { - on_expression: Some(OnExpression::AllDatabasesAndRetentionPolicies), + on: Some(ExtendedOnClause::AllDatabasesAndRetentionPolicies), ..Default::default() } ); @@ -309,24 +328,24 @@ mod test { } #[test] - fn test_on_clause() { - let (_, got) = on_clause("ON cpu").unwrap(); - assert_eq!(got, OnExpression::Database("cpu".into())); + fn test_extended_on_clause() { + let (_, got) = extended_on_clause("ON cpu").unwrap(); + assert_eq!(got, ExtendedOnClause::Database("cpu".into())); - let (_, got) = on_clause("ON cpu.autogen").unwrap(); + let (_, got) = extended_on_clause("ON cpu.autogen").unwrap(); assert_eq!( got, - OnExpression::DatabaseRetentionPolicy("cpu".into(), "autogen".into()) + ExtendedOnClause::DatabaseRetentionPolicy("cpu".into(), "autogen".into()) ); - let (_, got) = on_clause("ON *").unwrap(); - assert_matches!(got, OnExpression::AllDatabases); + let (_, got) = extended_on_clause("ON *").unwrap(); + assert_matches!(got, ExtendedOnClause::AllDatabases); - let (_, got) = on_clause("ON *.*").unwrap(); - assert_matches!(got, OnExpression::AllDatabasesAndRetentionPolicies); + let (_, got) = extended_on_clause("ON *.*").unwrap(); + assert_matches!(got, ExtendedOnClause::AllDatabasesAndRetentionPolicies); assert_expect_error!( - on_clause("ON WHERE cpu = 'test'"), + extended_on_clause("ON WHERE cpu = 'test'"), "invalid ON clause, expected wildcard or identifier" ) } @@ -338,13 +357,13 @@ mod test { let (_, got) = with_measurement_clause("WITH measurement = foo").unwrap(); assert_eq!( got, - MeasurementExpression::Equals(QualifiedMeasurementName::new(Name("foo".into()))) + WithMeasurementClause::Equals(QualifiedMeasurementName::new(Name("foo".into()))) ); let (_, got) = with_measurement_clause("WITH measurement =~ /foo/").unwrap(); assert_eq!( got, - MeasurementExpression::Regex(QualifiedMeasurementName::new(Regex("foo".into()))) + WithMeasurementClause::Regex(QualifiedMeasurementName::new(Regex("foo".into()))) ); // Expressions are still valid when whitespace is omitted @@ -352,7 +371,7 @@ mod test { let (_, got) = with_measurement_clause("WITH measurement=foo..bar").unwrap(); assert_eq!( got, - MeasurementExpression::Equals(QualifiedMeasurementName::new_db( + WithMeasurementClause::Equals(QualifiedMeasurementName::new_db( Name("bar".into()), "foo".into() )) @@ -361,7 +380,7 @@ mod test { let (_, got) = with_measurement_clause("WITH measurement=~/foo/").unwrap(); assert_eq!( got, - MeasurementExpression::Regex(QualifiedMeasurementName::new(Regex("foo".into()))) + WithMeasurementClause::Regex(QualifiedMeasurementName::new(Regex("foo".into()))) ); // Quirks of InfluxQL per https://github.com/influxdata/influxdb_iox/issues/5662 @@ -369,13 +388,13 @@ mod test { let (_, got) = with_measurement_clause("WITH measurement =~ foo").unwrap(); assert_eq!( got, - MeasurementExpression::Regex(QualifiedMeasurementName::new(Name("foo".into()))) + WithMeasurementClause::Regex(QualifiedMeasurementName::new(Name("foo".into()))) ); let (_, got) = with_measurement_clause("WITH measurement = /foo/").unwrap(); assert_eq!( got, - MeasurementExpression::Equals(QualifiedMeasurementName::new(Regex("foo".into()))) + WithMeasurementClause::Equals(QualifiedMeasurementName::new(Regex("foo".into()))) ); // Fallible cases diff --git a/influxdb_influxql_parser/src/show_retention_policies.rs b/influxdb_influxql_parser/src/show_retention_policies.rs index 2778e3630e..f4ebbeea46 100644 --- a/influxdb_influxql_parser/src/show_retention_policies.rs +++ b/influxdb_influxql_parser/src/show_retention_policies.rs @@ -1,36 +1,31 @@ -use crate::identifier::{identifier, Identifier}; use crate::internal::{expect, ParseResult}; +use crate::show::{on_clause, OnClause}; use nom::bytes::complete::tag_no_case; use nom::character::complete::multispace1; use nom::combinator::opt; -use nom::sequence::{pair, preceded, tuple}; +use nom::sequence::{preceded, tuple}; use std::fmt::{Display, Formatter}; /// Represents a `SHOW RETENTION POLICIES` statement. #[derive(Debug, Clone, PartialEq, Eq)] pub struct ShowRetentionPoliciesStatement { /// Name of the database to list the retention policies, or all if this is `None`. - database: Option<Identifier>, + pub database: Option<OnClause>, } impl Display for ShowRetentionPoliciesStatement { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "SHOW RETENTION POLICIES")?; if let Some(ref database) = self.database { - write!(f, " ON {}", database)?; + write!(f, " {}", database)?; } Ok(()) } } -fn on_clause(i: &str) -> ParseResult<&str, Identifier> { - preceded( - pair(tag_no_case("ON"), multispace1), - expect("invalid ON clause, expected identifier", identifier), - )(i) -} - -pub fn show_retention_policies(i: &str) -> ParseResult<&str, ShowRetentionPoliciesStatement> { +pub(crate) fn show_retention_policies( + i: &str, +) -> ParseResult<&str, ShowRetentionPoliciesStatement> { let (remaining, (_, _, _, database)) = tuple(( tag_no_case("RETENTION"), multispace1, diff --git a/influxdb_influxql_parser/src/show_tag_keys.rs b/influxdb_influxql_parser/src/show_tag_keys.rs index 0067e1ff31..367691b761 100644 --- a/influxdb_influxql_parser/src/show_tag_keys.rs +++ b/influxdb_influxql_parser/src/show_tag_keys.rs @@ -1,8 +1,8 @@ -use crate::common::{limit_clause, offset_clause, where_clause}; -use crate::expression::conditional::ConditionalExpression; -use crate::identifier::Identifier; +use crate::common::{ + limit_clause, offset_clause, where_clause, LimitClause, OffsetClause, WhereClause, +}; use crate::internal::ParseResult; -use crate::show::on_clause; +use crate::show::{on_clause, OnClause}; use crate::simple_from_clause::{show_from_clause, ShowFromClause}; use nom::bytes::complete::tag_no_case; use nom::character::complete::multispace1; @@ -16,44 +16,44 @@ use std::fmt::Formatter; pub struct ShowTagKeysStatement { /// The name of the database to query. If `None`, a default /// database will be used. - pub database: Option<Identifier>, + pub database: Option<OnClause>, /// The measurement or measurements to restrict which tag keys /// are retrieved. pub from: Option<ShowFromClause>, /// A conditional expression to filter the tag keys. - pub condition: Option<ConditionalExpression>, + pub condition: Option<WhereClause>, /// A value to restrict the number of tag keys returned. - pub limit: Option<u64>, + pub limit: Option<LimitClause>, /// A value to specify an offset to start retrieving tag keys. - pub offset: Option<u64>, + pub offset: Option<OffsetClause>, } impl fmt::Display for ShowTagKeysStatement { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "SHOW TAG KEYS")?; - if let Some(ref expr) = self.database { - write!(f, " ON {}", expr)?; + if let Some(ref on_clause) = self.database { + write!(f, " {}", on_clause)?; } if let Some(ref expr) = self.from { - write!(f, " FROM {}", expr)?; + write!(f, " {}", expr)?; } if let Some(ref cond) = self.condition { - write!(f, " WHERE {}", cond)?; + write!(f, " {}", cond)?; } - if let Some(limit) = self.limit { - write!(f, " LIMIT {}", limit)?; + if let Some(ref limit) = self.limit { + write!(f, " {}", limit)?; } - if let Some(offset) = self.offset { - write!(f, " OFFSET {}", offset)?; + if let Some(ref offset) = self.offset { + write!(f, " {}", offset)?; } Ok(()) @@ -61,7 +61,7 @@ impl fmt::Display for ShowTagKeysStatement { } /// Parse a `SHOW TAG KEYS` statement, starting from the `KEYS` token. -pub fn show_tag_keys(i: &str) -> ParseResult<&str, ShowTagKeysStatement> { +pub(crate) fn show_tag_keys(i: &str) -> ParseResult<&str, ShowTagKeysStatement> { let ( remaining_input, ( diff --git a/influxdb_influxql_parser/src/show_tag_values.rs b/influxdb_influxql_parser/src/show_tag_values.rs index 0c565d1542..b0558a59d2 100644 --- a/influxdb_influxql_parser/src/show_tag_values.rs +++ b/influxdb_influxql_parser/src/show_tag_values.rs @@ -1,8 +1,9 @@ -use crate::common::{limit_clause, offset_clause, where_clause, OneOrMore}; -use crate::expression::conditional::ConditionalExpression; +use crate::common::{ + limit_clause, offset_clause, where_clause, LimitClause, OffsetClause, OneOrMore, WhereClause, +}; use crate::identifier::{identifier, Identifier}; use crate::internal::{expect, ParseResult}; -use crate::show::on_clause; +use crate::show::{on_clause, OnClause}; use crate::simple_from_clause::{show_from_clause, ShowFromClause}; use crate::string::{regex, Regex}; use nom::branch::alt; @@ -18,50 +19,50 @@ use std::fmt::{Display, Formatter}; pub struct ShowTagValuesStatement { /// The name of the database to query. If `None`, a default /// database will be used. - pub database: Option<Identifier>, + pub database: Option<OnClause>, /// The measurement or measurements to restrict which tag keys /// are retrieved. pub from: Option<ShowFromClause>, - /// `WITH KEY` expression, to limit the values retrieved to + /// Represents the `WITH KEY` clause, to restrict the tag values to /// the matching tag keys. - pub with_key: WithKeyExpression, + pub with_key: WithKeyClause, /// A conditional expression to filter the tag keys. - pub condition: Option<ConditionalExpression>, + pub condition: Option<WhereClause>, /// A value to restrict the number of tag keys returned. - pub limit: Option<u64>, + pub limit: Option<LimitClause>, /// A value to specify an offset to start retrieving tag keys. - pub offset: Option<u64>, + pub offset: Option<OffsetClause>, } impl Display for ShowTagValuesStatement { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "SHOW TAG VALUES")?; - if let Some(ref expr) = self.database { - write!(f, " ON {}", expr)?; + if let Some(ref on_clause) = self.database { + write!(f, " {}", on_clause)?; } - if let Some(ref expr) = self.from { - write!(f, " FROM {}", expr)?; + if let Some(ref from_clause) = self.from { + write!(f, " {}", from_clause)?; } write!(f, " {}", self.with_key)?; - if let Some(ref cond) = self.condition { - write!(f, " WHERE {}", cond)?; + if let Some(ref where_clause) = self.condition { + write!(f, " {}", where_clause)?; } - if let Some(limit) = self.limit { - write!(f, " LIMIT {}", limit)?; + if let Some(ref limit) = self.limit { + write!(f, " {}", limit)?; } - if let Some(offset) = self.offset { - write!(f, " OFFSET {}", offset)?; + if let Some(ref offset) = self.offset { + write!(f, " {}", offset)?; } Ok(()) @@ -69,7 +70,7 @@ impl Display for ShowTagValuesStatement { } /// Parse a `SHOW TAG VALUES` statement, starting from the `VALUES` token. -pub fn show_tag_values(i: &str) -> ParseResult<&str, ShowTagValuesStatement> { +pub(crate) fn show_tag_values(i: &str) -> ParseResult<&str, ShowTagValuesStatement> { let ( remaining_input, ( @@ -107,19 +108,36 @@ pub fn show_tag_values(i: &str) -> ParseResult<&str, ShowTagValuesStatement> { )) } +/// Represents a list of identifiers when the `WITH KEY` clause +/// specifies the `IN` operator. pub type InList = OneOrMore<Identifier>; +impl Display for InList { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(self.first(), f)?; + for arg in self.rest() { + write!(f, ", {}", arg)?; + } + Ok(()) + } +} + +/// Represents a `WITH KEY` clause. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum WithKeyExpression { +pub enum WithKeyClause { + /// Select a single tag key that equals the identifier. Eq(Identifier), + /// Select all tag keys that do not equal the identifier. NotEq(Identifier), + /// Select any tag keys that pass the regular expression. EqRegex(Regex), + /// Select the tag keys that do not pass the regular expression. NotEqRegex(Regex), - /// IN expression + /// Select the tag keys matching each of the identifiers in the list. In(InList), } -impl Display for WithKeyExpression { +impl Display for WithKeyClause { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.write_str("WITH KEY ")?; @@ -145,7 +163,7 @@ fn identifier_list(i: &str) -> ParseResult<&str, InList> { )(i) } -fn with_key_clause(i: &str) -> ParseResult<&str, WithKeyExpression> { +fn with_key_clause(i: &str) -> ParseResult<&str, WithKeyClause> { preceded( tuple(( tag_no_case("WITH"), @@ -163,7 +181,7 @@ fn with_key_clause(i: &str) -> ParseResult<&str, WithKeyExpression> { regex, ), ), - WithKeyExpression::EqRegex, + WithKeyClause::EqRegex, ), map( preceded( @@ -173,7 +191,7 @@ fn with_key_clause(i: &str) -> ParseResult<&str, WithKeyExpression> { regex, ), ), - WithKeyExpression::NotEqRegex, + WithKeyClause::NotEqRegex, ), map( preceded( @@ -183,7 +201,7 @@ fn with_key_clause(i: &str) -> ParseResult<&str, WithKeyExpression> { identifier, ), ), - WithKeyExpression::Eq, + WithKeyClause::Eq, ), map( preceded( @@ -193,7 +211,7 @@ fn with_key_clause(i: &str) -> ParseResult<&str, WithKeyExpression> { identifier, ), ), - WithKeyExpression::NotEq, + WithKeyClause::NotEq, ), map( preceded( @@ -203,7 +221,7 @@ fn with_key_clause(i: &str) -> ParseResult<&str, WithKeyExpression> { identifier_list, ), ), - WithKeyExpression::In, + WithKeyClause::In, ), )), ), @@ -286,24 +304,24 @@ mod test { #[test] fn test_with_key_clause() { let (_, got) = with_key_clause("WITH KEY = foo").unwrap(); - assert_eq!(got, WithKeyExpression::Eq("foo".into())); + assert_eq!(got, WithKeyClause::Eq("foo".into())); let (_, got) = with_key_clause("WITH KEY != foo").unwrap(); - assert_eq!(got, WithKeyExpression::NotEq("foo".into())); + assert_eq!(got, WithKeyClause::NotEq("foo".into())); let (_, got) = with_key_clause("WITH KEY =~ /foo/").unwrap(); - assert_eq!(got, WithKeyExpression::EqRegex("foo".into())); + assert_eq!(got, WithKeyClause::EqRegex("foo".into())); let (_, got) = with_key_clause("WITH KEY !~ /foo/").unwrap(); - assert_eq!(got, WithKeyExpression::NotEqRegex("foo".into())); + assert_eq!(got, WithKeyClause::NotEqRegex("foo".into())); let (_, got) = with_key_clause("WITH KEY IN (foo)").unwrap(); - assert_eq!(got, WithKeyExpression::In(InList::new(vec!["foo".into()]))); + assert_eq!(got, WithKeyClause::In(InList::new(vec!["foo".into()]))); let (_, got) = with_key_clause("WITH KEY IN (foo, bar, \"foo bar\")").unwrap(); assert_eq!( got, - WithKeyExpression::In(InList::new(vec![ + WithKeyClause::In(InList::new(vec![ "foo".into(), "bar".into(), "foo bar".into() @@ -312,7 +330,7 @@ mod test { // Expressions are still valid when whitespace is omitted let (_, got) = with_key_clause("WITH KEY=foo").unwrap(); - assert_eq!(got, WithKeyExpression::Eq("foo".into())); + assert_eq!(got, WithKeyClause::Eq("foo".into())); // Fallible cases diff --git a/influxdb_influxql_parser/src/simple_from_clause.rs b/influxdb_influxql_parser/src/simple_from_clause.rs index 07528a9fc2..e1e5dbabfd 100644 --- a/influxdb_influxql_parser/src/simple_from_clause.rs +++ b/influxdb_influxql_parser/src/simple_from_clause.rs @@ -7,6 +7,7 @@ use nom::bytes::complete::tag_no_case; use nom::character::complete::multispace1; use nom::sequence::{pair, preceded}; use std::fmt; +use std::fmt::{Display, Formatter}; /// Represents a `FROM` clause of a `DELETE` or `SHOW` statement. /// @@ -64,8 +65,18 @@ impl Parser for QualifiedMeasurementName { /// ``` pub type ShowFromClause = FromMeasurementClause<QualifiedMeasurementName>; +impl Display for ShowFromClause { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "FROM {}", self.first())?; + for arg in self.rest() { + write!(f, ", {}", arg)?; + } + Ok(()) + } +} + /// Parse a `FROM` clause for various `SHOW` statements. -pub fn show_from_clause(i: &str) -> ParseResult<&str, ShowFromClause> { +pub(crate) fn show_from_clause(i: &str) -> ParseResult<&str, ShowFromClause> { from_clause(i) } @@ -78,8 +89,18 @@ impl Parser for Identifier { /// Represents a `FROM` clause for a `DELETE` statement. pub type DeleteFromClause = FromMeasurementClause<MeasurementName>; +impl Display for DeleteFromClause { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "FROM {}", self.first())?; + for arg in self.rest() { + write!(f, ", {}", arg)?; + } + Ok(()) + } +} + /// Parse a `FROM` clause for a `DELETE` statement. -pub fn delete_from_clause(i: &str) -> ParseResult<&str, DeleteFromClause> { +pub(crate) fn delete_from_clause(i: &str) -> ParseResult<&str, DeleteFromClause> { from_clause(i) } diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__delete_statement-2.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__delete_statement-2.snap new file mode 100644 index 0000000000..7e0285fec4 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__delete_statement-2.snap @@ -0,0 +1,21 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"DELETE WHERE 'foo bar' =~ /foo/\")" +--- +- "pre_visit_statement: Delete(Where(WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) })))" +- "pre_visit_delete_statement: Where(WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) }))" +- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) })" +- "pre_visit_conditional_expression: Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) }" +- "pre_visit_conditional_expression: Expr(Literal(String(\"foo bar\")))" +- "pre_visit_expr: Literal(String(\"foo bar\"))" +- "post_visit_expr: Literal(String(\"foo bar\"))" +- "post_visit_conditional_expression: Expr(Literal(String(\"foo bar\")))" +- "pre_visit_conditional_expression: Expr(Literal(Regex(Regex(\"foo\"))))" +- "pre_visit_expr: Literal(Regex(Regex(\"foo\")))" +- "post_visit_expr: Literal(Regex(Regex(\"foo\")))" +- "post_visit_conditional_expression: Expr(Literal(Regex(Regex(\"foo\"))))" +- "post_visit_conditional_expression: Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) }" +- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) })" +- "post_visit_delete_statement: Where(WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) }))" +- "post_visit_statement: Delete(Where(WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) })))" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__delete_statement-3.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__delete_statement-3.snap new file mode 100644 index 0000000000..cda5aad9b2 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__delete_statement-3.snap @@ -0,0 +1,13 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"DELETE FROM cpu\")" +--- +- "pre_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }, condition: None })" +- "pre_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }, condition: None }" +- "pre_visit_delete_from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }" +- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_delete_from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }" +- "post_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }, condition: None }" +- "post_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }, condition: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__delete_statement-4.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__delete_statement-4.snap new file mode 100644 index 0000000000..3a5562381c --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__delete_statement-4.snap @@ -0,0 +1,13 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"DELETE FROM /^cpu/\")" +--- +- "pre_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }, condition: None })" +- "pre_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }, condition: None }" +- "pre_visit_delete_from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }" +- "pre_visit_measurement_name: Regex(Regex(\"^cpu\"))" +- "post_visit_measurement_name: Regex(Regex(\"^cpu\"))" +- "post_visit_delete_from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }" +- "post_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }, condition: None }" +- "post_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }, condition: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__delete_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__delete_statement.snap new file mode 100644 index 0000000000..b3d379e33c --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__delete_statement.snap @@ -0,0 +1,25 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"DELETE FROM a WHERE b = \\\"c\\\"\")" +--- +- "pre_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Name(Identifier(\"a\"))] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })) })" +- "pre_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Name(Identifier(\"a\"))] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })) }" +- "pre_visit_delete_from: OneOrMore { contents: [Name(Identifier(\"a\"))] }" +- "pre_visit_measurement_name: Name(Identifier(\"a\"))" +- "post_visit_measurement_name: Name(Identifier(\"a\"))" +- "post_visit_delete_from: OneOrMore { contents: [Name(Identifier(\"a\"))] }" +- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })" +- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) }" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"b\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"b\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"b\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"b\"), data_type: None })" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"c\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"c\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"c\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"c\"), data_type: None })" +- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) }" +- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })" +- "post_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Name(Identifier(\"a\"))] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })) }" +- "post_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Name(Identifier(\"a\"))] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })) })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__drop_measurement_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__drop_measurement_statement.snap new file mode 100644 index 0000000000..01c4a5cb6a --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__drop_measurement_statement.snap @@ -0,0 +1,9 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"DROP MEASUREMENT cpu\")" +--- +- "pre_visit_statement: DropMeasurement(DropMeasurementStatement { name: Identifier(\"cpu\") })" +- "pre_visit_drop_measurement_statement: DropMeasurementStatement { name: Identifier(\"cpu\") }" +- "post_visit_drop_measurement_statement: DropMeasurementStatement { name: Identifier(\"cpu\") }" +- "post_visit_statement: DropMeasurement(DropMeasurementStatement { name: Identifier(\"cpu\") })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__explain_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__explain_statement.snap new file mode 100644 index 0000000000..084a45e39d --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__explain_statement.snap @@ -0,0 +1,25 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"EXPLAIN SELECT * FROM cpu\")" +--- +- "pre_visit_statement: Explain(ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } })" +- "pre_visit_explain_statement: ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } }" +- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }" +- "pre_visit_select_field: Field { expr: Wildcard(None), alias: None }" +- "pre_visit_expr: Wildcard(None)" +- "post_visit_expr: Wildcard(None)" +- "post_visit_select_field: Field { expr: Wildcard(None), alias: None }" +- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }" +- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }" +- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })" +- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }" +- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "post_visit_explain_statement: ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } }" +- "post_visit_statement: Explain(ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-2.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-2.snap new file mode 100644 index 0000000000..a788e897cd --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-2.snap @@ -0,0 +1,23 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(r#\"SELECT DISTINCT value FROM temp\"#)" +--- +- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" +- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }" +- "pre_visit_select_field: Field { expr: Distinct(Identifier(\"value\")), alias: None }" +- "pre_visit_expr: Distinct(Identifier(\"value\"))" +- "post_visit_expr: Distinct(Identifier(\"value\"))" +- "post_visit_select_field: Field { expr: Distinct(Identifier(\"value\")), alias: None }" +- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }" +- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }" +- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"temp\"))" +- "post_visit_measurement_name: Name(Identifier(\"temp\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }" +- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })" +- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }" +- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-3.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-3.snap new file mode 100644 index 0000000000..1c3727ec04 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-3.snap @@ -0,0 +1,25 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(r#\"SELECT COUNT(value) FROM temp\"#)" +--- +- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" +- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }" +- "pre_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }" +- "pre_visit_expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }" +- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" +- "post_visit_expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }" +- "post_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }" +- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }" +- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }" +- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"temp\"))" +- "post_visit_measurement_name: Name(Identifier(\"temp\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }" +- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })" +- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }" +- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-4.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-4.snap new file mode 100644 index 0000000000..1b5876584d --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-4.snap @@ -0,0 +1,25 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(r#\"SELECT COUNT(DISTINCT value) FROM temp\"#)" +--- +- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" +- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }" +- "pre_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }" +- "pre_visit_expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }" +- "pre_visit_expr: Distinct(Identifier(\"value\"))" +- "post_visit_expr: Distinct(Identifier(\"value\"))" +- "post_visit_expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }" +- "post_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }" +- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }" +- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }" +- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"temp\"))" +- "post_visit_measurement_name: Name(Identifier(\"temp\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }" +- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })" +- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }" +- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-5.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-5.snap new file mode 100644 index 0000000000..b2d5e2b2e1 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-5.snap @@ -0,0 +1,29 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(r#\"SELECT * FROM /cpu/, memory\"#)" +--- +- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" +- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }" +- "pre_visit_select_field: Field { expr: Wildcard(None), alias: None }" +- "pre_visit_expr: Wildcard(None)" +- "post_visit_expr: Wildcard(None)" +- "post_visit_select_field: Field { expr: Wildcard(None), alias: None }" +- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }" +- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }" +- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }" +- "pre_visit_measurement_name: Regex(Regex(\"cpu\"))" +- "post_visit_measurement_name: Regex(Regex(\"cpu\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }" +- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) })" +- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"memory\"))" +- "post_visit_measurement_name: Name(Identifier(\"memory\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) }" +- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })" +- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }" +- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-6.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-6.snap new file mode 100644 index 0000000000..36890bb36f --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-6.snap @@ -0,0 +1,93 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE host = \"node1\")\n WHERE region =~ /west/ AND value > 5\n GROUP BY TIME(5m), host FILL(previous)\n ORDER BY TIME DESC\n LIMIT 1 OFFSET 2\n SLIMIT 3 SOFFSET 4\n TZ('Australia/Hobart')\n \"#)" +--- +- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })" +- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }" +- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }" +- "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }" +- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" +- "post_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }" +- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }" +- "pre_visit_select_from_clause: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }" +- "pre_visit_select_measurement_selection: Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" +- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }" +- "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }" +- "pre_visit_expr: VarRef { name: Identifier(\"usage\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"usage\"), data_type: None }" +- "post_visit_select_field: Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }" +- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }" +- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }" +- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })" +- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }" +- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })" +- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) }" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"node1\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"node1\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"node1\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"node1\"), data_type: None })" +- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) }" +- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })" +- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "post_visit_select_measurement_selection: Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" +- "post_visit_select_from_clause: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }" +- "pre_visit_where_clause: WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })" +- "pre_visit_conditional_expression: Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } }" +- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"region\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"region\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"region\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"region\"), data_type: None })" +- "pre_visit_conditional_expression: Expr(Literal(Regex(Regex(\"west\"))))" +- "pre_visit_expr: Literal(Regex(Regex(\"west\")))" +- "post_visit_expr: Literal(Regex(Regex(\"west\")))" +- "post_visit_conditional_expression: Expr(Literal(Regex(Regex(\"west\"))))" +- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }" +- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) }" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"value\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"value\"), data_type: None })" +- "pre_visit_conditional_expression: Expr(Literal(Unsigned(5)))" +- "pre_visit_expr: Literal(Unsigned(5))" +- "post_visit_expr: Literal(Unsigned(5))" +- "post_visit_conditional_expression: Expr(Literal(Unsigned(5)))" +- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) }" +- "post_visit_conditional_expression: Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } }" +- "post_visit_where_clause: WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })" +- "pre_visit_group_by_clause: OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }" +- "pre_visit_select_dimension: Time { interval: Literal(Duration(Duration(300000000000))), offset: None }" +- "pre_visit_expr: Literal(Duration(Duration(300000000000)))" +- "post_visit_expr: Literal(Duration(Duration(300000000000)))" +- "post_visit_select_dimension: Time { interval: Literal(Duration(Duration(300000000000))), offset: None }" +- "pre_visit_select_dimension: Tag(Identifier(\"host\"))" +- "post_visit_select_dimension: Tag(Identifier(\"host\"))" +- "post_visit_group_by_clause: OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }" +- "pre_visit_fill_clause: Previous" +- "post_visit_fill_clause: Previous" +- "pre_visit_order_by_clause: Descending" +- "post_visit_order_by_clause: Descending" +- "pre_visit_limit_clause: LimitClause(1)" +- "post_visit_limit_clause: LimitClause(1)" +- "pre_visit_offset_clause: OffsetClause(2)" +- "post_visit_offset_clause: OffsetClause(2)" +- "pre_visit_slimit_clause: SLimitClause(3)" +- "post_visit_slimit_clause: SLimitClause(3)" +- "pre_visit_soffset_clause: SOffsetClause(4)" +- "post_visit_soffset_clause: SOffsetClause(4)" +- "pre_visit_timezone_clause: TimeZoneClause(\"Australia/Hobart\")" +- "post_visit_timezone_clause: TimeZoneClause(\"Australia/Hobart\")" +- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }" +- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement.snap new file mode 100644 index 0000000000..1e6e58cee1 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement.snap @@ -0,0 +1,23 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(r#\"SELECT value FROM temp\"#)" +--- +- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" +- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }" +- "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }" +- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" +- "post_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }" +- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }" +- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }" +- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"temp\"))" +- "post_visit_measurement_name: Name(Identifier(\"temp\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }" +- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })" +- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }" +- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_databases_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_databases_statement.snap new file mode 100644 index 0000000000..ea2e2e8c3b --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_databases_statement.snap @@ -0,0 +1,9 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW DATABASES\")" +--- +- "pre_visit_statement: ShowDatabases(ShowDatabasesStatement)" +- "pre_visit_show_databases_statement: ShowDatabasesStatement" +- "post_visit_show_databases_statement: ShowDatabasesStatement" +- "post_visit_statement: ShowDatabases(ShowDatabasesStatement)" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_field_keys_statement-2.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_field_keys_statement-2.snap new file mode 100644 index 0000000000..4a2d7b7bca --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_field_keys_statement-2.snap @@ -0,0 +1,11 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW FIELD KEYS ON telegraf\")" +--- +- "pre_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: None, limit: None, offset: None })" +- "pre_visit_show_field_keys_statement: ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: None, limit: None, offset: None }" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "post_visit_show_field_keys_statement: ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: None, limit: None, offset: None }" +- "post_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_field_keys_statement-3.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_field_keys_statement-3.snap new file mode 100644 index 0000000000..95bc88aef0 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_field_keys_statement-3.snap @@ -0,0 +1,15 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW FIELD KEYS FROM cpu\")" +--- +- "pre_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: None, from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), limit: None, offset: None })" +- "pre_visit_show_field_keys_statement: ShowFieldKeysStatement { database: None, from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), limit: None, offset: None }" +- "pre_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "post_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }" +- "post_visit_show_field_keys_statement: ShowFieldKeysStatement { database: None, from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), limit: None, offset: None }" +- "post_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: None, from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_field_keys_statement-4.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_field_keys_statement-4.snap new file mode 100644 index 0000000000..d1f04dea5b --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_field_keys_statement-4.snap @@ -0,0 +1,17 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW FIELD KEYS ON telegraf FROM /cpu/\")" +--- +- "pre_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }), limit: None, offset: None })" +- "pre_visit_show_field_keys_statement: ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }), limit: None, offset: None }" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "pre_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }" +- "pre_visit_measurement_name: Regex(Regex(\"cpu\"))" +- "post_visit_measurement_name: Regex(Regex(\"cpu\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }" +- "post_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }" +- "post_visit_show_field_keys_statement: ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }), limit: None, offset: None }" +- "post_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }), limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_field_keys_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_field_keys_statement.snap new file mode 100644 index 0000000000..076b6693f2 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_field_keys_statement.snap @@ -0,0 +1,9 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW FIELD KEYS\")" +--- +- "pre_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: None, from: None, limit: None, offset: None })" +- "pre_visit_show_field_keys_statement: ShowFieldKeysStatement { database: None, from: None, limit: None, offset: None }" +- "post_visit_show_field_keys_statement: ShowFieldKeysStatement { database: None, from: None, limit: None, offset: None }" +- "post_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: None, from: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-2.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-2.snap new file mode 100644 index 0000000000..17c82c45d5 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-2.snap @@ -0,0 +1,11 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW MEASUREMENTS ON db.rp\")" +--- +- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: Some(DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))), with_measurement: None, condition: None, limit: None, offset: None })" +- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: Some(DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))), with_measurement: None, condition: None, limit: None, offset: None }" +- "pre_visit_extended_on_clause: DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))" +- "post_visit_extended_on_clause: DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))" +- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: Some(DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))), with_measurement: None, condition: None, limit: None, offset: None }" +- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: Some(DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))), with_measurement: None, condition: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-3.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-3.snap new file mode 100644 index 0000000000..948da40328 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-3.snap @@ -0,0 +1,15 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW MEASUREMENTS WITH MEASUREMENT = \\\"cpu\\\"\")" +--- +- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: Some(Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })), condition: None, limit: None, offset: None })" +- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: Some(Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })), condition: None, limit: None, offset: None }" +- "pre_visit_with_measurement_clause: Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "post_visit_with_measurement_clause: Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })" +- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: Some(Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })), condition: None, limit: None, offset: None }" +- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: Some(Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })), condition: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-4.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-4.snap new file mode 100644 index 0000000000..370670e974 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-4.snap @@ -0,0 +1,21 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW MEASUREMENTS WHERE host = 'west'\")" +--- +- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: None, offset: None })" +- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: None, offset: None }" +- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })" +- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) }" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_conditional_expression: Expr(Literal(String(\"west\")))" +- "pre_visit_expr: Literal(String(\"west\"))" +- "post_visit_expr: Literal(String(\"west\"))" +- "post_visit_conditional_expression: Expr(Literal(String(\"west\")))" +- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) }" +- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })" +- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: None, offset: None }" +- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-5.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-5.snap new file mode 100644 index 0000000000..2006080fcb --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-5.snap @@ -0,0 +1,11 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW MEASUREMENTS LIMIT 5\")" +--- +- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: Some(LimitClause(5)), offset: None })" +- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: Some(LimitClause(5)), offset: None }" +- "pre_visit_limit_clause: LimitClause(5)" +- "post_visit_limit_clause: LimitClause(5)" +- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: Some(LimitClause(5)), offset: None }" +- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: Some(LimitClause(5)), offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-6.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-6.snap new file mode 100644 index 0000000000..ae40b59581 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-6.snap @@ -0,0 +1,11 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW MEASUREMENTS OFFSET 10\")" +--- +- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: Some(OffsetClause(10)) })" +- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: Some(OffsetClause(10)) }" +- "pre_visit_offset_clause: OffsetClause(10)" +- "post_visit_offset_clause: OffsetClause(10)" +- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: Some(OffsetClause(10)) }" +- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: Some(OffsetClause(10)) })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-7.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-7.snap new file mode 100644 index 0000000000..385531fd0a --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement-7.snap @@ -0,0 +1,33 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW MEASUREMENTS ON * WITH MEASUREMENT =~ /foo/ WHERE host = 'west' LIMIT 10 OFFSET 20\")" +--- +- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: Some(AllDatabases), with_measurement: Some(Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: Some(LimitClause(10)), offset: Some(OffsetClause(20)) })" +- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: Some(AllDatabases), with_measurement: Some(Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: Some(LimitClause(10)), offset: Some(OffsetClause(20)) }" +- "pre_visit_extended_on_clause: AllDatabases" +- "post_visit_extended_on_clause: AllDatabases" +- "pre_visit_with_measurement_clause: Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) }" +- "pre_visit_measurement_name: Regex(Regex(\"foo\"))" +- "post_visit_measurement_name: Regex(Regex(\"foo\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) }" +- "post_visit_with_measurement_clause: Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })" +- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })" +- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) }" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_conditional_expression: Expr(Literal(String(\"west\")))" +- "pre_visit_expr: Literal(String(\"west\"))" +- "post_visit_expr: Literal(String(\"west\"))" +- "post_visit_conditional_expression: Expr(Literal(String(\"west\")))" +- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) }" +- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })" +- "pre_visit_limit_clause: LimitClause(10)" +- "post_visit_limit_clause: LimitClause(10)" +- "pre_visit_offset_clause: OffsetClause(20)" +- "post_visit_offset_clause: OffsetClause(20)" +- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: Some(AllDatabases), with_measurement: Some(Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: Some(LimitClause(10)), offset: Some(OffsetClause(20)) }" +- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: Some(AllDatabases), with_measurement: Some(Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: Some(LimitClause(10)), offset: Some(OffsetClause(20)) })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement.snap new file mode 100644 index 0000000000..f210c1011b --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_measurements_statement.snap @@ -0,0 +1,9 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW MEASUREMENTS\")" +--- +- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: None })" +- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: None }" +- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: None }" +- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_retention_policies_statement-2.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_retention_policies_statement-2.snap new file mode 100644 index 0000000000..38b2a0a03b --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_retention_policies_statement-2.snap @@ -0,0 +1,11 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW RETENTION POLICIES ON telegraf\")" +--- +- "pre_visit_statement: ShowRetentionPolicies(ShowRetentionPoliciesStatement { database: Some(OnClause(Identifier(\"telegraf\"))) })" +- "pre_visit_show_retention_policies_statement: ShowRetentionPoliciesStatement { database: Some(OnClause(Identifier(\"telegraf\"))) }" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "post_visit_show_retention_policies_statement: ShowRetentionPoliciesStatement { database: Some(OnClause(Identifier(\"telegraf\"))) }" +- "post_visit_statement: ShowRetentionPolicies(ShowRetentionPoliciesStatement { database: Some(OnClause(Identifier(\"telegraf\"))) })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_retention_policies_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_retention_policies_statement.snap new file mode 100644 index 0000000000..a853a45087 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_retention_policies_statement.snap @@ -0,0 +1,9 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW RETENTION POLICIES\")" +--- +- "pre_visit_statement: ShowRetentionPolicies(ShowRetentionPoliciesStatement { database: None })" +- "pre_visit_show_retention_policies_statement: ShowRetentionPoliciesStatement { database: None }" +- "post_visit_show_retention_policies_statement: ShowRetentionPoliciesStatement { database: None }" +- "post_visit_statement: ShowRetentionPolicies(ShowRetentionPoliciesStatement { database: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_keys_statement-2.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_keys_statement-2.snap new file mode 100644 index 0000000000..0983fed299 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_keys_statement-2.snap @@ -0,0 +1,33 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW TAG KEYS ON telegraf FROM cpu WHERE host = \\\"west\\\" LIMIT 5 OFFSET 10\")" +--- +- "pre_visit_statement: ShowTagKeys(ShowTagKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) })" +- "pre_visit_show_tag_keys_statement: ShowTagKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) }" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "pre_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "post_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }" +- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })" +- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) }" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"west\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"west\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"west\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"west\"), data_type: None })" +- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) }" +- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })" +- "pre_visit_limit_clause: LimitClause(5)" +- "post_visit_limit_clause: LimitClause(5)" +- "pre_visit_offset_clause: OffsetClause(10)" +- "post_visit_offset_clause: OffsetClause(10)" +- "post_visit_show_tag_keys_statement: ShowTagKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) }" +- "post_visit_statement: ShowTagKeys(ShowTagKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_keys_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_keys_statement.snap new file mode 100644 index 0000000000..df97550800 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_keys_statement.snap @@ -0,0 +1,9 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW TAG KEYS\")" +--- +- "pre_visit_statement: ShowTagKeys(ShowTagKeysStatement { database: None, from: None, condition: None, limit: None, offset: None })" +- "pre_visit_show_tag_keys_statement: ShowTagKeysStatement { database: None, from: None, condition: None, limit: None, offset: None }" +- "post_visit_show_tag_keys_statement: ShowTagKeysStatement { database: None, from: None, condition: None, limit: None, offset: None }" +- "post_visit_statement: ShowTagKeys(ShowTagKeysStatement { database: None, from: None, condition: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_values_statement-2.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_values_statement-2.snap new file mode 100644 index 0000000000..a3dd856c51 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_values_statement-2.snap @@ -0,0 +1,11 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW TAG VALUES WITH KEY =~ /host|region/\")" +--- +- "pre_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: EqRegex(Regex(\"host|region\")), condition: None, limit: None, offset: None })" +- "pre_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: EqRegex(Regex(\"host|region\")), condition: None, limit: None, offset: None }" +- "pre_visit_with_key_clause: EqRegex(Regex(\"host|region\"))" +- "post_visit_with_key_clause: EqRegex(Regex(\"host|region\"))" +- "post_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: EqRegex(Regex(\"host|region\")), condition: None, limit: None, offset: None }" +- "post_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: EqRegex(Regex(\"host|region\")), condition: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_values_statement-3.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_values_statement-3.snap new file mode 100644 index 0000000000..0e503bef56 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_values_statement-3.snap @@ -0,0 +1,11 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW TAG VALUES WITH KEY IN (host, region)\")" +--- +- "pre_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] }), condition: None, limit: None, offset: None })" +- "pre_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] }), condition: None, limit: None, offset: None }" +- "pre_visit_with_key_clause: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] })" +- "post_visit_with_key_clause: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] })" +- "post_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] }), condition: None, limit: None, offset: None }" +- "post_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] }), condition: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_values_statement-4.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_values_statement-4.snap new file mode 100644 index 0000000000..4236ca3744 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_values_statement-4.snap @@ -0,0 +1,35 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW TAG VALUES ON telegraf FROM cpu WITH KEY = host WHERE host = \\\"west\\\" LIMIT 5 OFFSET 10\")" +--- +- "pre_visit_statement: ShowTagValues(ShowTagValuesStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), with_key: Eq(Identifier(\"host\")), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) })" +- "pre_visit_show_tag_values_statement: ShowTagValuesStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), with_key: Eq(Identifier(\"host\")), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) }" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "pre_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "post_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }" +- "pre_visit_with_key_clause: Eq(Identifier(\"host\"))" +- "post_visit_with_key_clause: Eq(Identifier(\"host\"))" +- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })" +- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) }" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"west\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"west\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"west\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"west\"), data_type: None })" +- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) }" +- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })" +- "pre_visit_limit_clause: LimitClause(5)" +- "post_visit_limit_clause: LimitClause(5)" +- "pre_visit_offset_clause: OffsetClause(10)" +- "post_visit_offset_clause: OffsetClause(10)" +- "post_visit_show_tag_values_statement: ShowTagValuesStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), with_key: Eq(Identifier(\"host\")), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) }" +- "post_visit_statement: ShowTagValues(ShowTagValuesStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), with_key: Eq(Identifier(\"host\")), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_values_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_values_statement.snap new file mode 100644 index 0000000000..b01ce0a49c --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__show_tag_values_statement.snap @@ -0,0 +1,11 @@ +--- +source: influxdb_influxql_parser/src/visit.rs +expression: "visit_statement!(\"SHOW TAG VALUES WITH KEY = host\")" +--- +- "pre_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: Eq(Identifier(\"host\")), condition: None, limit: None, offset: None })" +- "pre_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: Eq(Identifier(\"host\")), condition: None, limit: None, offset: None }" +- "pre_visit_with_key_clause: Eq(Identifier(\"host\"))" +- "post_visit_with_key_clause: Eq(Identifier(\"host\"))" +- "post_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: Eq(Identifier(\"host\")), condition: None, limit: None, offset: None }" +- "post_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: Eq(Identifier(\"host\")), condition: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/string.rs b/influxdb_influxql_parser/src/string.rs index e52b3fafb7..a81825e2ca 100644 --- a/influxdb_influxql_parser/src/string.rs +++ b/influxdb_influxql_parser/src/string.rs @@ -4,6 +4,7 @@ // Taken liberally from https://github.com/Geal/nom/blob/main/examples/string.rs and // amended for InfluxQL. +use crate::impl_tuple_clause; use crate::internal::{expect, ParseError, ParseResult}; use nom::branch::alt; use nom::bytes::complete::{is_not, tag}; @@ -61,7 +62,7 @@ enum StringFragment<'a> { } /// Parse a single-quoted literal string. -pub fn single_quoted_string(i: &str) -> ParseResult<&str, String> { +pub(crate) fn single_quoted_string(i: &str) -> ParseResult<&str, String> { let escaped = preceded( char('\\'), expect( @@ -79,7 +80,7 @@ pub fn single_quoted_string(i: &str) -> ParseResult<&str, String> { } /// Parse a double-quoted identifier string. -pub fn double_quoted_string(i: &str) -> ParseResult<&str, String> { +pub(crate) fn double_quoted_string(i: &str) -> ParseResult<&str, String> { let escaped = preceded( char('\\'), expect( @@ -158,7 +159,9 @@ fn regex_literal(i: &str) -> ParseResult<&str, &str> { /// An unescaped regular expression. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct Regex(pub String); +pub struct Regex(pub(crate) String); + +impl_tuple_clause!(Regex, String); impl Display for Regex { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { @@ -168,12 +171,6 @@ impl Display for Regex { } } -impl From<String> for Regex { - fn from(v: String) -> Self { - Self(v) - } -} - impl From<&str> for Regex { fn from(v: &str) -> Self { Self(v.into()) @@ -181,7 +178,7 @@ impl From<&str> for Regex { } /// Parse a regular expression, delimited by `/`. -pub fn regex(i: &str) -> ParseResult<&str, Regex> { +pub(crate) fn regex(i: &str) -> ParseResult<&str, Regex> { map( string( '/', diff --git a/influxdb_influxql_parser/src/visit.rs b/influxdb_influxql_parser/src/visit.rs new file mode 100644 index 0000000000..1f4d1fbd7e --- /dev/null +++ b/influxdb_influxql_parser/src/visit.rs @@ -0,0 +1,1651 @@ +//! The visit module provides API for walking the AST. +//! +//! # Example +//! +//! ``` +//! use influxdb_influxql_parser::Visitor; +//! use influxdb_influxql_parser::Visitable; +//! use influxdb_influxql_parser::parse_statements; +//! +//! struct MyVisitor; +//! +//! impl Visitor for MyVisitor { +//! fn post_visit_where_clause(self, n: &influxdb_influxql_parser::WhereClause) -> influxdb_influxql_parser::VisitorResult<Self> { +//! println!("{}", n); +//! Ok(self) +//! } +//! } +//! +//! let statements = parse_statements("SELECT value FROM cpu WHERE host = 'west'").unwrap(); +//! let statement = statements.first().unwrap(); +//! let vis = MyVisitor; +//! statement.accept(vis); +//! ``` +use crate::common::{ + LimitClause, MeasurementName, OffsetClause, OrderByClause, QualifiedMeasurementName, + WhereClause, +}; +use crate::delete::DeleteStatement; +use crate::drop::DropMeasurementStatement; +use crate::explain::ExplainStatement; +use crate::expression::arithmetic::Expr; +use crate::expression::conditional::ConditionalExpression; +use crate::select::{ + Dimension, Field, FieldList, FillClause, FromMeasurementClause, GroupByClause, + MeasurementSelection, SLimitClause, SOffsetClause, SelectStatement, TimeZoneClause, +}; +use crate::show::{OnClause, ShowDatabasesStatement}; +use crate::show_field_keys::ShowFieldKeysStatement; +use crate::show_measurements::{ + ExtendedOnClause, ShowMeasurementsStatement, WithMeasurementClause, +}; +use crate::show_retention_policies::ShowRetentionPoliciesStatement; +use crate::show_tag_keys::ShowTagKeysStatement; +use crate::show_tag_values::{ShowTagValuesStatement, WithKeyClause}; +use crate::simple_from_clause::{DeleteFromClause, ShowFromClause}; +use crate::visit::Recursion::*; +use crate::Statement; + +/// The result type for a [`Visitor`]. +pub type VisitorResult<T, E = &'static str> = Result<T, E>; + +/// Controls how the visitor recursion should proceed. +pub enum Recursion<V: Visitor> { + /// Attempt to visit all the children, recursively, of this expression. + Continue(V), + /// Do not visit the children of this expression, though the walk + /// of parents of this expression will not be affected + Stop(V), +} + +/// Encode the depth-first traversal of an InfluxQL statement. When passed to +/// any [`Visitable::accept`], `pre_visit` functions are invoked repeatedly +/// until a leaf node is reached or a `pre_visit` function returns [`Recursion::Stop`]. +pub trait Visitor: Sized { + /// Invoked before any children of the InfluxQL statement are visited. + fn pre_visit_statement(self, _n: &Statement) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the InfluxQL statement are visited. + fn post_visit_statement(self, _n: &Statement) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `DELETE` statement are visited. + fn pre_visit_delete_statement(self, _n: &DeleteStatement) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `DELETE` statement are visited. + fn post_visit_delete_statement(self, _n: &DeleteStatement) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `FROM` clause of a `DELETE` statement are visited. + fn pre_visit_delete_from_clause(self, _n: &DeleteFromClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `FROM` clause of a `DELETE` statement are visited. + fn post_visit_delete_from_clause(self, _n: &DeleteFromClause) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the measurement name are visited. + fn pre_visit_measurement_name(self, _n: &MeasurementName) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the measurement name are visited. + fn post_visit_measurement_name(self, _n: &MeasurementName) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `DROP MEASUREMENT` statement are visited. + fn pre_visit_drop_measurement_statement( + self, + _n: &DropMeasurementStatement, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `DROP MEASUREMENT` statement are visited. + fn post_visit_drop_measurement_statement( + self, + _n: &DropMeasurementStatement, + ) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `EXPLAIN` statement are visited. + fn pre_visit_explain_statement(self, _n: &ExplainStatement) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `EXPLAIN` statement are visited. + fn post_visit_explain_statement(self, _n: &ExplainStatement) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `SELECT` statement are visited. + fn pre_visit_select_statement(self, _n: &SelectStatement) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `SELECT` statement are visited. + fn post_visit_select_statement(self, _n: &SelectStatement) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `SHOW DATABASES` statement are visited. + fn pre_visit_show_databases_statement( + self, + _n: &ShowDatabasesStatement, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `SHOW DATABASES` statement are visited. + fn post_visit_show_databases_statement( + self, + _n: &ShowDatabasesStatement, + ) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `SHOW MEASUREMENTS` statement are visited. + fn pre_visit_show_measurements_statement( + self, + _n: &ShowMeasurementsStatement, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `SHOW MEASUREMENTS` statement are visited. + fn post_visit_show_measurements_statement( + self, + _n: &ShowMeasurementsStatement, + ) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `SHOW RETENTION POLICIES` statement are visited. + fn pre_visit_show_retention_policies_statement( + self, + _n: &ShowRetentionPoliciesStatement, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `SHOW RETENTION POLICIES` statement are visited. + fn post_visit_show_retention_policies_statement( + self, + _n: &ShowRetentionPoliciesStatement, + ) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `SHOW TAG KEYS` statement are visited. + fn pre_visit_show_tag_keys_statement( + self, + _n: &ShowTagKeysStatement, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `SHOW TAG KEYS` statement are visited. + fn post_visit_show_tag_keys_statement(self, _n: &ShowTagKeysStatement) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `SHOW TAG VALUES` statement are visited. + fn pre_visit_show_tag_values_statement( + self, + _n: &ShowTagValuesStatement, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `SHOW TAG VALUES` statement are visited. + fn post_visit_show_tag_values_statement( + self, + _n: &ShowTagValuesStatement, + ) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `SHOW FIELD KEYS` statement are visited. + fn pre_visit_show_field_keys_statement( + self, + _n: &ShowFieldKeysStatement, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `SHOW FIELD KEYS` statement are visited. + fn post_visit_show_field_keys_statement( + self, + _n: &ShowFieldKeysStatement, + ) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the conditional expression are visited. + fn pre_visit_conditional_expression( + self, + _n: &ConditionalExpression, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the conditional expression are visited. + fn post_visit_conditional_expression(self, _n: &ConditionalExpression) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the arithmetic expression are visited. + fn pre_visit_expr(self, _n: &Expr) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the arithmetic expression are visited. + fn post_visit_expr(self, _n: &Expr) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any fields of the `SELECT` projection are visited. + fn pre_visit_select_field_list(self, _n: &FieldList) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all fields of the `SELECT` projection are visited. + fn post_visit_select_field_list(self, _n: &FieldList) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the field of a `SELECT` statement are visited. + fn pre_visit_select_field(self, _n: &Field) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the field of a `SELECT` statement are visited. + fn post_visit_select_field(self, _n: &Field) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `FROM` clause of a `SELECT` statement are visited. + fn pre_visit_select_from_clause( + self, + _n: &FromMeasurementClause, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `FROM` clause of a `SELECT` statement are visited. + fn post_visit_select_from_clause(self, _n: &FromMeasurementClause) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the measurement selection of a `FROM` clause for a `SELECT` statement are visited. + fn pre_visit_select_measurement_selection( + self, + _n: &MeasurementSelection, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the measurement selection of a `FROM` clause for a `SELECT` statement are visited. + fn post_visit_select_measurement_selection( + self, + _n: &MeasurementSelection, + ) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `GROUP BY` clause are visited. + fn pre_visit_group_by_clause(self, _n: &GroupByClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `GROUP BY` clause are visited. + fn post_visit_group_by_clause(self, _n: &GroupByClause) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `GROUP BY` dimension expression are visited. + fn pre_visit_select_dimension(self, _n: &Dimension) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `GROUP BY` dimension expression are visited. + fn post_visit_select_dimension(self, _n: &Dimension) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `WHERE` clause are visited. + fn pre_visit_where_clause(self, _n: &WhereClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `WHERE` clause are visited. + fn post_visit_where_clause(self, _n: &WhereClause) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `FROM` clause for any `SHOW` statement are visited. + fn pre_visit_show_from_clause(self, _n: &ShowFromClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `FROM` clause for any `SHOW` statement are visited. + fn post_visit_show_from_clause(self, _n: &ShowFromClause) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the qualified measurement name are visited. + fn pre_visit_qualified_measurement_name( + self, + _n: &QualifiedMeasurementName, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the qualified measurement name are visited. + fn post_visit_qualified_measurement_name( + self, + _n: &QualifiedMeasurementName, + ) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `FILL` clause are visited. + fn pre_visit_fill_clause(self, _n: &FillClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `FILL` clause are visited. + fn post_visit_fill_clause(self, _n: &FillClause) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `ORDER BY` clause are visited. + fn pre_visit_order_by_clause(self, _n: &OrderByClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `ORDER BY` clause are visited. + fn post_visit_order_by_clause(self, _n: &OrderByClause) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `LIMIT` clause are visited. + fn pre_visit_limit_clause(self, _n: &LimitClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `LIMIT` clause are visited. + fn post_visit_limit_clause(self, _n: &LimitClause) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `OFFSET` clause are visited. + fn pre_visit_offset_clause(self, _n: &OffsetClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `OFFSET` clause are visited. + fn post_visit_offset_clause(self, _n: &OffsetClause) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `SLIMIT` clause are visited. + fn pre_visit_slimit_clause(self, _n: &SLimitClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `SLIMIT` clause are visited. + fn post_visit_slimit_clause(self, _n: &SLimitClause) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of the `SOFFSET` clause are visited. + fn pre_visit_soffset_clause(self, _n: &SOffsetClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of the `SOFFSET` clause are visited. + fn post_visit_soffset_clause(self, _n: &SOffsetClause) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of a `TZ` clause are visited. + fn pre_visit_timezone_clause(self, _n: &TimeZoneClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of a `TZ` clause are visited. + fn post_visit_timezone_clause(self, _n: &TimeZoneClause) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of an extended `ON` clause are visited. + fn pre_visit_extended_on_clause(self, _n: &ExtendedOnClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of an extended `ON` clause are visited. + fn post_visit_extended_on_clause(self, _n: &ExtendedOnClause) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of an `ON` clause are visited. + fn pre_visit_on_clause(self, _n: &OnClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of an `ON` clause are visited. + fn post_visit_on_clause(self, _n: &OnClause) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of a `WITH MEASUREMENT` clause are visited. + fn pre_visit_with_measurement_clause( + self, + _n: &WithMeasurementClause, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of a `WITH MEASUREMENT` clause are visited. + fn post_visit_with_measurement_clause(self, _n: &WithMeasurementClause) -> VisitorResult<Self> { + Ok(self) + } + + /// Invoked before any children of a `WITH KEY` clause are visited. + fn pre_visit_with_key_clause(self, _n: &WithKeyClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self)) + } + + /// Invoked after all children of a `WITH KEY` clause are visited. + fn post_visit_with_key_clause(self, _n: &WithKeyClause) -> VisitorResult<Self> { + Ok(self) + } +} + +/// Trait for types that can be visited by [`Visitor`] +pub trait Visitable: Sized { + /// accept a visitor, calling `visit` on all children of this + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V>; +} + +impl Visitable for Statement { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_statement(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = match self { + Self::Delete(s) => s.accept(visitor), + Self::DropMeasurement(s) => s.accept(visitor), + Self::Explain(s) => s.accept(visitor), + Self::Select(s) => s.accept(visitor), + Self::ShowDatabases(s) => s.accept(visitor), + Self::ShowMeasurements(s) => s.accept(visitor), + Self::ShowRetentionPolicies(s) => s.accept(visitor), + Self::ShowTagKeys(s) => s.accept(visitor), + Self::ShowTagValues(s) => s.accept(visitor), + Self::ShowFieldKeys(s) => s.accept(visitor), + }?; + + visitor.post_visit_statement(self) + } +} + +impl Visitable for DeleteStatement { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_delete_statement(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = match self { + Self::FromWhere { from, condition } => { + let visitor = from.accept(visitor)?; + + if let Some(condition) = condition { + condition.accept(visitor) + } else { + Ok(visitor) + } + } + Self::Where(condition) => condition.accept(visitor), + }?; + + visitor.post_visit_delete_statement(self) + } +} + +impl Visitable for WhereClause { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_where_clause(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = self.0.accept(visitor)?; + + visitor.post_visit_where_clause(self) + } +} + +impl Visitable for DeleteFromClause { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_delete_from_clause(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = self.contents.iter().try_fold(visitor, |v, n| n.accept(v))?; + + visitor.post_visit_delete_from_clause(self) + } +} + +impl Visitable for MeasurementName { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_measurement_name(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + visitor.post_visit_measurement_name(self) + } +} + +impl Visitable for DropMeasurementStatement { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_drop_measurement_statement(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + visitor.post_visit_drop_measurement_statement(self) + } +} + +impl Visitable for ExplainStatement { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_explain_statement(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = self.select.accept(visitor)?; + + visitor.post_visit_explain_statement(self) + } +} + +impl Visitable for SelectStatement { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_select_statement(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = self.fields.accept(visitor)?; + + let visitor = self.from.accept(visitor)?; + + let visitor = if let Some(condition) = &self.condition { + condition.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(group_by) = &self.group_by { + group_by.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(fill_clause) = &self.fill { + fill_clause.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(order_by) = &self.order_by { + order_by.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(limit) = &self.limit { + limit.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(offset) = &self.offset { + offset.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(limit) = &self.series_limit { + limit.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(offset) = &self.series_offset { + offset.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(tz_clause) = &self.timezone { + tz_clause.accept(visitor) + } else { + Ok(visitor) + }?; + + visitor.post_visit_select_statement(self) + } +} + +impl Visitable for TimeZoneClause { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_timezone_clause(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + visitor.post_visit_timezone_clause(self) + } +} + +impl Visitable for LimitClause { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_limit_clause(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + visitor.post_visit_limit_clause(self) + } +} + +impl Visitable for OffsetClause { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_offset_clause(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + visitor.post_visit_offset_clause(self) + } +} + +impl Visitable for SLimitClause { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_slimit_clause(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + visitor.post_visit_slimit_clause(self) + } +} + +impl Visitable for SOffsetClause { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_soffset_clause(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + visitor.post_visit_soffset_clause(self) + } +} + +impl Visitable for FillClause { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_fill_clause(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + visitor.post_visit_fill_clause(self) + } +} + +impl Visitable for OrderByClause { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_order_by_clause(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + visitor.post_visit_order_by_clause(self) + } +} + +impl Visitable for GroupByClause { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_group_by_clause(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = self.contents.iter().try_fold(visitor, |v, d| d.accept(v))?; + + visitor.post_visit_group_by_clause(self) + } +} + +impl Visitable for ShowMeasurementsStatement { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_show_measurements_statement(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = if let Some(on_clause) = &self.on { + on_clause.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(with_clause) = &self.with_measurement { + with_clause.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(condition) = &self.condition { + condition.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(limit) = &self.limit { + limit.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(offset) = &self.offset { + offset.accept(visitor) + } else { + Ok(visitor) + }?; + + visitor.post_visit_show_measurements_statement(self) + } +} + +impl Visitable for ExtendedOnClause { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_extended_on_clause(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + visitor.post_visit_extended_on_clause(self) + } +} + +impl Visitable for WithMeasurementClause { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_with_measurement_clause(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = match self { + Self::Equals(n) => n.accept(visitor), + Self::Regex(n) => n.accept(visitor), + }?; + + visitor.post_visit_with_measurement_clause(self) + } +} + +impl Visitable for ShowRetentionPoliciesStatement { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_show_retention_policies_statement(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = if let Some(on_clause) = &self.database { + on_clause.accept(visitor) + } else { + Ok(visitor) + }?; + + visitor.post_visit_show_retention_policies_statement(self) + } +} + +impl Visitable for ShowFromClause { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_show_from_clause(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = self.contents.iter().try_fold(visitor, |v, f| f.accept(v))?; + + visitor.post_visit_show_from_clause(self) + } +} + +impl Visitable for QualifiedMeasurementName { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_qualified_measurement_name(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = self.name.accept(visitor)?; + + visitor.post_visit_qualified_measurement_name(self) + } +} + +impl Visitable for ShowTagKeysStatement { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_show_tag_keys_statement(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = if let Some(on_clause) = &self.database { + on_clause.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(from) = &self.from { + from.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(condition) = &self.condition { + condition.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(limit) = &self.limit { + limit.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(offset) = &self.offset { + offset.accept(visitor) + } else { + Ok(visitor) + }?; + + visitor.post_visit_show_tag_keys_statement(self) + } +} + +impl Visitable for ShowTagValuesStatement { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_show_tag_values_statement(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = if let Some(on_clause) = &self.database { + on_clause.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(from) = &self.from { + from.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = self.with_key.accept(visitor)?; + + let visitor = if let Some(condition) = &self.condition { + condition.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(limit) = &self.limit { + limit.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(offset) = &self.offset { + offset.accept(visitor) + } else { + Ok(visitor) + }?; + + visitor.post_visit_show_tag_values_statement(self) + } +} + +impl Visitable for ShowFieldKeysStatement { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_show_field_keys_statement(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = if let Some(on_clause) = &self.database { + on_clause.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(from) = &self.from { + from.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(limit) = &self.limit { + limit.accept(visitor) + } else { + Ok(visitor) + }?; + + let visitor = if let Some(offset) = &self.offset { + offset.accept(visitor) + } else { + Ok(visitor) + }?; + + visitor.post_visit_show_field_keys_statement(self) + } +} + +impl Visitable for FieldList { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_select_field_list(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = self.contents.iter().try_fold(visitor, |v, f| f.accept(v))?; + + visitor.post_visit_select_field_list(self) + } +} + +impl Visitable for Field { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_select_field(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = self.expr.accept(visitor)?; + + visitor.post_visit_select_field(self) + } +} + +impl Visitable for FromMeasurementClause { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_select_from_clause(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = self.contents.iter().try_fold(visitor, |v, f| f.accept(v))?; + + visitor.post_visit_select_from_clause(self) + } +} + +impl Visitable for MeasurementSelection { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_select_measurement_selection(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = match self { + Self::Name(name) => name.accept(visitor), + Self::Subquery(select) => select.accept(visitor), + }?; + + visitor.post_visit_select_measurement_selection(self) + } +} + +impl Visitable for Dimension { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_select_dimension(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = match self { + Self::Time { interval, offset } => { + let visitor = interval.accept(visitor)?; + if let Some(offset) = offset { + offset.accept(visitor) + } else { + Ok(visitor) + } + } + Self::Tag(_) | Self::Regex(_) | Self::Wildcard => Ok(visitor), + }?; + + visitor.post_visit_select_dimension(self) + } +} + +impl Visitable for WithKeyClause { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_with_key_clause(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + visitor.post_visit_with_key_clause(self) + } +} + +impl Visitable for ShowDatabasesStatement { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_show_databases_statement(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + visitor.post_visit_show_databases_statement(self) + } +} + +impl Visitable for ConditionalExpression { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_conditional_expression(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = match self { + Self::Expr(expr) => expr.accept(visitor), + Self::Binary { lhs, rhs, .. } => { + let visitor = lhs.accept(visitor)?; + rhs.accept(visitor) + } + Self::Grouped(expr) => expr.accept(visitor), + }?; + + visitor.post_visit_conditional_expression(self) + } +} + +impl Visitable for Expr { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_expr(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = match self { + Self::UnaryOp(_, expr) => expr.accept(visitor), + Self::Call { args, .. } => args.iter().try_fold(visitor, |v, e| e.accept(v)), + Self::Binary { lhs, op: _, rhs } => { + let visitor = lhs.accept(visitor)?; + rhs.accept(visitor) + } + Self::Nested(expr) => expr.accept(visitor), + + // We explicitly list out each enumeration, to ensure + // we revisit if new items are added to the Expr enumeration. + Self::VarRef { .. } + | Self::BindParameter(_) + | Self::Literal(_) + | Self::Wildcard(_) + | Self::Distinct(_) => Ok(visitor), + }?; + + visitor.post_visit_expr(self) + } +} + +impl Visitable for OnClause { + fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + let visitor = match visitor.pre_visit_on_clause(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + visitor.post_visit_on_clause(self) + } +} + +#[cfg(test)] +mod test { + use crate::common::{ + LimitClause, MeasurementName, OffsetClause, OrderByClause, QualifiedMeasurementName, + WhereClause, + }; + use crate::delete::DeleteStatement; + use crate::drop::DropMeasurementStatement; + use crate::explain::ExplainStatement; + use crate::expression::arithmetic::Expr; + use crate::expression::conditional::ConditionalExpression; + use crate::select::{ + Dimension, Field, FieldList, FillClause, FromMeasurementClause, GroupByClause, + MeasurementSelection, SLimitClause, SOffsetClause, SelectStatement, TimeZoneClause, + }; + use crate::show::{OnClause, ShowDatabasesStatement}; + use crate::show_field_keys::ShowFieldKeysStatement; + use crate::show_measurements::{ + ExtendedOnClause, ShowMeasurementsStatement, WithMeasurementClause, + }; + use crate::show_retention_policies::ShowRetentionPoliciesStatement; + use crate::show_tag_keys::ShowTagKeysStatement; + use crate::show_tag_values::{ShowTagValuesStatement, WithKeyClause}; + use crate::simple_from_clause::{DeleteFromClause, ShowFromClause}; + use crate::visit::{Recursion, Visitor, VisitorResult}; + use crate::Recursion::Continue; + use crate::{statement, Statement, Visitable}; + use std::fmt::Debug; + + struct TestVisitor(Vec<String>); + + impl TestVisitor { + fn new() -> Self { + Self(Vec::new()) + } + + fn push_pre(self, name: &str, n: impl Debug) -> Self { + let mut s = self.0; + s.push(format!("pre_visit_{}: {:?}", name, n)); + Self(s) + } + + fn push_post(self, name: &str, n: impl Debug) -> Self { + let mut s = self.0; + s.push(format!("post_visit_{}: {:?}", name, n)); + Self(s) + } + } + + impl Visitor for TestVisitor { + fn pre_visit_statement(self, n: &Statement) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("statement", n))) + } + + fn post_visit_statement(self, n: &Statement) -> VisitorResult<Self> { + Ok(self.push_post("statement", n)) + } + + fn pre_visit_delete_statement(self, n: &DeleteStatement) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("delete_statement", n))) + } + + fn post_visit_delete_statement(self, n: &DeleteStatement) -> VisitorResult<Self> { + Ok(self.push_post("delete_statement", n)) + } + + fn pre_visit_delete_from_clause( + self, + n: &DeleteFromClause, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("delete_from", n))) + } + + fn post_visit_delete_from_clause(self, n: &DeleteFromClause) -> VisitorResult<Self> { + Ok(self.push_post("delete_from", n)) + } + + fn pre_visit_measurement_name(self, n: &MeasurementName) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("measurement_name", n))) + } + + fn post_visit_measurement_name(self, n: &MeasurementName) -> VisitorResult<Self> { + Ok(self.push_post("measurement_name", n)) + } + + fn pre_visit_drop_measurement_statement( + self, + n: &DropMeasurementStatement, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("drop_measurement_statement", n))) + } + + fn post_visit_drop_measurement_statement( + self, + n: &DropMeasurementStatement, + ) -> VisitorResult<Self> { + Ok(self.push_post("drop_measurement_statement", n)) + } + + fn pre_visit_explain_statement( + self, + n: &ExplainStatement, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("explain_statement", n))) + } + + fn post_visit_explain_statement(self, n: &ExplainStatement) -> VisitorResult<Self> { + Ok(self.push_post("explain_statement", n)) + } + + fn pre_visit_select_statement(self, n: &SelectStatement) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("select_statement", n))) + } + + fn post_visit_select_statement(self, n: &SelectStatement) -> VisitorResult<Self> { + Ok(self.push_post("select_statement", n)) + } + + fn pre_visit_show_databases_statement( + self, + n: &ShowDatabasesStatement, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("show_databases_statement", n))) + } + + fn post_visit_show_databases_statement( + self, + n: &ShowDatabasesStatement, + ) -> VisitorResult<Self> { + Ok(self.push_post("show_databases_statement", n)) + } + + fn pre_visit_show_measurements_statement( + self, + n: &ShowMeasurementsStatement, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("show_measurements_statement", n))) + } + + fn post_visit_show_measurements_statement( + self, + n: &ShowMeasurementsStatement, + ) -> VisitorResult<Self> { + Ok(self.push_post("show_measurements_statement", n)) + } + + fn pre_visit_show_retention_policies_statement( + self, + n: &ShowRetentionPoliciesStatement, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue( + self.push_pre("show_retention_policies_statement", n), + )) + } + + fn post_visit_show_retention_policies_statement( + self, + n: &ShowRetentionPoliciesStatement, + ) -> VisitorResult<Self> { + Ok(self.push_post("show_retention_policies_statement", n)) + } + + fn pre_visit_show_tag_keys_statement( + self, + n: &ShowTagKeysStatement, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("show_tag_keys_statement", n))) + } + + fn post_visit_show_tag_keys_statement( + self, + n: &ShowTagKeysStatement, + ) -> VisitorResult<Self> { + Ok(self.push_post("show_tag_keys_statement", n)) + } + + fn pre_visit_show_tag_values_statement( + self, + n: &ShowTagValuesStatement, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("show_tag_values_statement", n))) + } + + fn post_visit_show_tag_values_statement( + self, + n: &ShowTagValuesStatement, + ) -> VisitorResult<Self> { + Ok(self.push_post("show_tag_values_statement", n)) + } + + fn pre_visit_show_field_keys_statement( + self, + n: &ShowFieldKeysStatement, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("show_field_keys_statement", n))) + } + + fn post_visit_show_field_keys_statement( + self, + n: &ShowFieldKeysStatement, + ) -> VisitorResult<Self> { + Ok(self.push_post("show_field_keys_statement", n)) + } + + fn pre_visit_conditional_expression( + self, + n: &ConditionalExpression, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("conditional_expression", n))) + } + + fn post_visit_conditional_expression( + self, + n: &ConditionalExpression, + ) -> VisitorResult<Self> { + Ok(self.push_post("conditional_expression", n)) + } + + fn pre_visit_expr(self, n: &Expr) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("expr", n))) + } + + fn post_visit_expr(self, n: &Expr) -> VisitorResult<Self> { + Ok(self.push_post("expr", n)) + } + + fn pre_visit_select_field_list(self, n: &FieldList) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("select_field_list", n))) + } + + fn post_visit_select_field_list(self, n: &FieldList) -> VisitorResult<Self> { + Ok(self.push_post("select_field_list", n)) + } + + fn pre_visit_select_field(self, n: &Field) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("select_field", n))) + } + + fn post_visit_select_field(self, n: &Field) -> VisitorResult<Self> { + Ok(self.push_post("select_field", n)) + } + + fn pre_visit_select_from_clause( + self, + n: &FromMeasurementClause, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("select_from_clause", n))) + } + + fn post_visit_select_from_clause(self, n: &FromMeasurementClause) -> VisitorResult<Self> { + Ok(self.push_post("select_from_clause", n)) + } + + fn pre_visit_select_measurement_selection( + self, + n: &MeasurementSelection, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("select_measurement_selection", n))) + } + + fn post_visit_select_measurement_selection( + self, + n: &MeasurementSelection, + ) -> VisitorResult<Self> { + Ok(self.push_post("select_measurement_selection", n)) + } + + fn pre_visit_group_by_clause(self, n: &GroupByClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("group_by_clause", n))) + } + + fn post_visit_group_by_clause(self, n: &GroupByClause) -> VisitorResult<Self> { + Ok(self.push_post("group_by_clause", n)) + } + + fn pre_visit_select_dimension(self, n: &Dimension) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("select_dimension", n))) + } + + fn post_visit_select_dimension(self, n: &Dimension) -> VisitorResult<Self> { + Ok(self.push_post("select_dimension", n)) + } + + fn pre_visit_where_clause(self, n: &WhereClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("where_clause", n))) + } + + fn post_visit_where_clause(self, n: &WhereClause) -> VisitorResult<Self> { + Ok(self.push_post("where_clause", n)) + } + + fn pre_visit_show_from_clause(self, n: &ShowFromClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("show_from_clause", n))) + } + + fn post_visit_show_from_clause(self, n: &ShowFromClause) -> VisitorResult<Self> { + Ok(self.push_post("show_from_clause", n)) + } + + fn pre_visit_qualified_measurement_name( + self, + n: &QualifiedMeasurementName, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("qualified_measurement_name", n))) + } + + fn post_visit_qualified_measurement_name( + self, + n: &QualifiedMeasurementName, + ) -> VisitorResult<Self> { + Ok(self.push_post("qualified_measurement_name", n)) + } + + fn pre_visit_fill_clause(self, n: &FillClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("fill_clause", n))) + } + + fn post_visit_fill_clause(self, n: &FillClause) -> VisitorResult<Self> { + Ok(self.push_post("fill_clause", n)) + } + + fn pre_visit_order_by_clause(self, n: &OrderByClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("order_by_clause", n))) + } + + fn post_visit_order_by_clause(self, n: &OrderByClause) -> VisitorResult<Self> { + Ok(self.push_post("order_by_clause", n)) + } + + fn pre_visit_limit_clause(self, n: &LimitClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("limit_clause", n))) + } + + fn post_visit_limit_clause(self, n: &LimitClause) -> VisitorResult<Self> { + Ok(self.push_post("limit_clause", n)) + } + + fn pre_visit_offset_clause(self, n: &OffsetClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("offset_clause", n))) + } + + fn post_visit_offset_clause(self, n: &OffsetClause) -> VisitorResult<Self> { + Ok(self.push_post("offset_clause", n)) + } + + fn pre_visit_slimit_clause(self, n: &SLimitClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("slimit_clause", n))) + } + + fn post_visit_slimit_clause(self, n: &SLimitClause) -> VisitorResult<Self> { + Ok(self.push_post("slimit_clause", n)) + } + + fn pre_visit_soffset_clause(self, n: &SOffsetClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("soffset_clause", n))) + } + + fn post_visit_soffset_clause(self, n: &SOffsetClause) -> VisitorResult<Self> { + Ok(self.push_post("soffset_clause", n)) + } + + fn pre_visit_timezone_clause(self, n: &TimeZoneClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("timezone_clause", n))) + } + + fn post_visit_timezone_clause(self, n: &TimeZoneClause) -> VisitorResult<Self> { + Ok(self.push_post("timezone_clause", n)) + } + + fn pre_visit_extended_on_clause( + self, + n: &ExtendedOnClause, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("extended_on_clause", n))) + } + + fn post_visit_extended_on_clause(self, n: &ExtendedOnClause) -> VisitorResult<Self> { + Ok(self.push_post("extended_on_clause", n)) + } + + fn pre_visit_on_clause(self, n: &OnClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("on_clause", n))) + } + + fn post_visit_on_clause(self, n: &OnClause) -> VisitorResult<Self> { + Ok(self.push_pre("on_clause", n)) + } + + fn pre_visit_with_measurement_clause( + self, + n: &WithMeasurementClause, + ) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("with_measurement_clause", n))) + } + + fn post_visit_with_measurement_clause( + self, + n: &WithMeasurementClause, + ) -> VisitorResult<Self> { + Ok(self.push_post("with_measurement_clause", n)) + } + + fn pre_visit_with_key_clause(self, n: &WithKeyClause) -> VisitorResult<Recursion<Self>> { + Ok(Continue(self.push_pre("with_key_clause", n))) + } + + fn post_visit_with_key_clause(self, n: &WithKeyClause) -> VisitorResult<Self> { + Ok(self.push_post("with_key_clause", n)) + } + } + + macro_rules! visit_statement { + ($SQL:literal) => {{ + let (_, s) = statement($SQL).unwrap(); + s.accept(TestVisitor::new()).unwrap().0 + }}; + } + + #[test] + fn test_delete_statement() { + insta::assert_yaml_snapshot!(visit_statement!("DELETE FROM a WHERE b = \"c\"")); + insta::assert_yaml_snapshot!(visit_statement!("DELETE WHERE 'foo bar' =~ /foo/")); + insta::assert_yaml_snapshot!(visit_statement!("DELETE FROM cpu")); + insta::assert_yaml_snapshot!(visit_statement!("DELETE FROM /^cpu/")); + } + + #[test] + fn test_drop_measurement_statement() { + insta::assert_yaml_snapshot!(visit_statement!("DROP MEASUREMENT cpu")) + } + + #[test] + fn test_explain_statement() { + insta::assert_yaml_snapshot!(visit_statement!("EXPLAIN SELECT * FROM cpu")); + } + + #[test] + fn test_select_statement() { + insta::assert_yaml_snapshot!(visit_statement!(r#"SELECT value FROM temp"#)); + insta::assert_yaml_snapshot!(visit_statement!(r#"SELECT DISTINCT value FROM temp"#)); + insta::assert_yaml_snapshot!(visit_statement!(r#"SELECT COUNT(value) FROM temp"#)); + insta::assert_yaml_snapshot!(visit_statement!( + r#"SELECT COUNT(DISTINCT value) FROM temp"# + )); + insta::assert_yaml_snapshot!(visit_statement!(r#"SELECT * FROM /cpu/, memory"#)); + insta::assert_yaml_snapshot!(visit_statement!( + r#"SELECT value FROM (SELECT usage FROM cpu WHERE host = "node1") + WHERE region =~ /west/ AND value > 5 + GROUP BY TIME(5m), host + FILL(previous) + ORDER BY TIME DESC + LIMIT 1 OFFSET 2 + SLIMIT 3 SOFFSET 4 + TZ('Australia/Hobart') + "# + )); + } + + #[test] + fn test_show_databases_statement() { + insta::assert_yaml_snapshot!(visit_statement!("SHOW DATABASES")); + } + + #[test] + fn test_show_measurements_statement() { + insta::assert_yaml_snapshot!(visit_statement!("SHOW MEASUREMENTS")); + insta::assert_yaml_snapshot!(visit_statement!("SHOW MEASUREMENTS ON db.rp")); + insta::assert_yaml_snapshot!(visit_statement!( + "SHOW MEASUREMENTS WITH MEASUREMENT = \"cpu\"" + )); + insta::assert_yaml_snapshot!(visit_statement!("SHOW MEASUREMENTS WHERE host = 'west'")); + insta::assert_yaml_snapshot!(visit_statement!("SHOW MEASUREMENTS LIMIT 5")); + insta::assert_yaml_snapshot!(visit_statement!("SHOW MEASUREMENTS OFFSET 10")); + + insta::assert_yaml_snapshot!(visit_statement!( + "SHOW MEASUREMENTS ON * WITH MEASUREMENT =~ /foo/ WHERE host = 'west' LIMIT 10 OFFSET 20" + )); + } + + #[test] + fn test_show_retention_policies_statement() { + insta::assert_yaml_snapshot!(visit_statement!("SHOW RETENTION POLICIES")); + insta::assert_yaml_snapshot!(visit_statement!("SHOW RETENTION POLICIES ON telegraf")); + } + + #[test] + fn test_show_tag_keys_statement() { + insta::assert_yaml_snapshot!(visit_statement!("SHOW TAG KEYS")); + insta::assert_yaml_snapshot!(visit_statement!( + "SHOW TAG KEYS ON telegraf FROM cpu WHERE host = \"west\" LIMIT 5 OFFSET 10" + )); + } + + #[test] + fn test_show_tag_values_statement() { + insta::assert_yaml_snapshot!(visit_statement!("SHOW TAG VALUES WITH KEY = host")); + insta::assert_yaml_snapshot!(visit_statement!( + "SHOW TAG VALUES WITH KEY =~ /host|region/" + )); + insta::assert_yaml_snapshot!(visit_statement!( + "SHOW TAG VALUES WITH KEY IN (host, region)" + )); + insta::assert_yaml_snapshot!(visit_statement!("SHOW TAG VALUES ON telegraf FROM cpu WITH KEY = host WHERE host = \"west\" LIMIT 5 OFFSET 10")); + } + + #[test] + fn test_show_field_keys_statement() { + insta::assert_yaml_snapshot!(visit_statement!("SHOW FIELD KEYS")); + insta::assert_yaml_snapshot!(visit_statement!("SHOW FIELD KEYS ON telegraf")); + insta::assert_yaml_snapshot!(visit_statement!("SHOW FIELD KEYS FROM cpu")); + insta::assert_yaml_snapshot!(visit_statement!("SHOW FIELD KEYS ON telegraf FROM /cpu/")); + } +} diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index f2bf0d21f2..a74ac4779e 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -58,6 +58,7 @@ ring = { version = "0.16", features = ["alloc", "dev_urandom_fallback", "once_ce serde = { version = "1", features = ["derive", "rc", "serde_derive", "std"] } serde_json = { version = "1", features = ["raw_value", "std"] } sha2 = { version = "0.10", features = ["std"] } +similar = { version = "2", features = ["inline", "text"] } smallvec = { version = "1", default-features = false, features = ["union"] } sqlx = { version = "0.6", features = ["_rt-tokio", "json", "macros", "migrate", "postgres", "runtime-tokio-rustls", "sqlx-macros", "tls", "uuid"] } sqlx-core = { version = "0.6", default-features = false, features = ["_rt-tokio", "_tls-rustls", "any", "base64", "crc", "dirs", "hkdf", "hmac", "json", "md-5", "migrate", "postgres", "rand", "runtime-tokio-rustls", "rustls", "rustls-pemfile", "serde", "serde_json", "sha1", "sha2", "tokio-stream", "uuid", "webpki-roots", "whoami"] }
57f08dbccde9d7ae8eb331cbf0173f2d16fa33bd
Andrew Lamb
2023-01-18 13:19:32
Update datafusion to Jan 9, 2023 (1 / 2) (#6603)
* refactor: Update DataFusion pin to early Jan 2023 * fix: Update tests now that planning is async * fix: Updates for API changes * chore: Run cargo hakari tasks * fix: Update comment * refactor: nicer config setup * fix: gapfill async
Co-authored-by: CircleCI[bot] <[email protected]>
chore: Update datafusion to Jan 9, 2023 (1 / 2) (#6603) * refactor: Update DataFusion pin to early Jan 2023 * fix: Update tests now that planning is async * fix: Updates for API changes * chore: Run cargo hakari tasks * fix: Update comment * refactor: nicer config setup * fix: gapfill async Co-authored-by: CircleCI[bot] <[email protected]>
diff --git a/Cargo.lock b/Cargo.lock index 0ca074ed3f..3a1da02d92 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1337,8 +1337,8 @@ dependencies = [ [[package]] name = "datafusion" -version = "15.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=07f49803a3d7a9e9b3c2c9a7714c1bb08db71385#07f49803a3d7a9e9b3c2c9a7714c1bb08db71385" +version = "16.0.0" +source = "git+https://github.com/alamb/arrow-datafusion.git?branch=alamb/patched_for_iox#baeb5a042a0169027df5345ace5f5ccc146f4603" dependencies = [ "ahash 0.8.2", "arrow", @@ -1358,6 +1358,7 @@ dependencies = [ "futures", "glob", "hashbrown 0.13.2", + "indexmap", "itertools", "lazy_static", "log", @@ -1370,7 +1371,7 @@ dependencies = [ "pin-project-lite", "rand", "smallvec", - "sqlparser 0.28.0", + "sqlparser", "tempfile", "tokio", "tokio-stream", @@ -1382,32 +1383,33 @@ dependencies = [ [[package]] name = "datafusion-common" -version = "15.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=07f49803a3d7a9e9b3c2c9a7714c1bb08db71385#07f49803a3d7a9e9b3c2c9a7714c1bb08db71385" +version = "16.0.0" +source = "git+https://github.com/alamb/arrow-datafusion.git?branch=alamb/patched_for_iox#baeb5a042a0169027df5345ace5f5ccc146f4603" dependencies = [ "arrow", "chrono", + "num_cpus", "object_store", "parquet", - "sqlparser 0.28.0", + "sqlparser", ] [[package]] name = "datafusion-expr" -version = "15.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=07f49803a3d7a9e9b3c2c9a7714c1bb08db71385#07f49803a3d7a9e9b3c2c9a7714c1bb08db71385" +version = "16.0.0" +source = "git+https://github.com/alamb/arrow-datafusion.git?branch=alamb/patched_for_iox#baeb5a042a0169027df5345ace5f5ccc146f4603" dependencies = [ "ahash 0.8.2", "arrow", "datafusion-common", "log", - "sqlparser 0.28.0", + "sqlparser", ] [[package]] name = "datafusion-optimizer" -version = "15.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=07f49803a3d7a9e9b3c2c9a7714c1bb08db71385#07f49803a3d7a9e9b3c2c9a7714c1bb08db71385" +version = "16.0.0" +source = "git+https://github.com/alamb/arrow-datafusion.git?branch=alamb/patched_for_iox#baeb5a042a0169027df5345ace5f5ccc146f4603" dependencies = [ "arrow", "async-trait", @@ -1417,12 +1419,13 @@ dependencies = [ "datafusion-physical-expr", "hashbrown 0.13.2", "log", + "regex-syntax", ] [[package]] name = "datafusion-physical-expr" -version = "15.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=07f49803a3d7a9e9b3c2c9a7714c1bb08db71385#07f49803a3d7a9e9b3c2c9a7714c1bb08db71385" +version = "16.0.0" +source = "git+https://github.com/alamb/arrow-datafusion.git?branch=alamb/patched_for_iox#baeb5a042a0169027df5345ace5f5ccc146f4603" dependencies = [ "ahash 0.8.2", "arrow", @@ -1436,6 +1439,7 @@ dependencies = [ "datafusion-row", "half 2.1.0", "hashbrown 0.13.2", + "indexmap", "itertools", "lazy_static", "md-5", @@ -1450,8 +1454,8 @@ dependencies = [ [[package]] name = "datafusion-proto" -version = "15.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=07f49803a3d7a9e9b3c2c9a7714c1bb08db71385#07f49803a3d7a9e9b3c2c9a7714c1bb08db71385" +version = "16.0.0" +source = "git+https://github.com/alamb/arrow-datafusion.git?branch=alamb/patched_for_iox#baeb5a042a0169027df5345ace5f5ccc146f4603" dependencies = [ "arrow", "chrono", @@ -1467,8 +1471,8 @@ dependencies = [ [[package]] name = "datafusion-row" -version = "15.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=07f49803a3d7a9e9b3c2c9a7714c1bb08db71385#07f49803a3d7a9e9b3c2c9a7714c1bb08db71385" +version = "16.0.0" +source = "git+https://github.com/alamb/arrow-datafusion.git?branch=alamb/patched_for_iox#baeb5a042a0169027df5345ace5f5ccc146f4603" dependencies = [ "arrow", "datafusion-common", @@ -1478,14 +1482,14 @@ dependencies = [ [[package]] name = "datafusion-sql" -version = "15.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=07f49803a3d7a9e9b3c2c9a7714c1bb08db71385#07f49803a3d7a9e9b3c2c9a7714c1bb08db71385" +version = "16.0.0" +source = "git+https://github.com/alamb/arrow-datafusion.git?branch=alamb/patched_for_iox#baeb5a042a0169027df5345ace5f5ccc146f4603" dependencies = [ "arrow-schema", "datafusion-common", "datafusion-expr", "log", - "sqlparser 0.28.0", + "sqlparser", ] [[package]] @@ -2508,7 +2512,7 @@ version = "0.1.0" dependencies = [ "generated_types", "snafu", - "sqlparser 0.30.0", + "sqlparser", "workspace-hack", ] @@ -4178,7 +4182,7 @@ dependencies = [ "query_functions", "schema", "snafu", - "sqlparser 0.30.0", + "sqlparser", "test_helpers", "workspace-hack", ] @@ -5321,20 +5325,23 @@ dependencies = [ [[package]] name = "sqlparser" -version = "0.28.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "249ae674b9f636b8ff64d8bfe218774cf05a26de40fd9f358669dccc4c0a9d7d" +checksum = "db67dc6ef36edb658196c3fef0464a80b53dbbc194a904e81f9bd4190f9ecc5b" dependencies = [ "log", + "sqlparser_derive", ] [[package]] -name = "sqlparser" -version = "0.30.0" +name = "sqlparser_derive" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db67dc6ef36edb658196c3fef0464a80b53dbbc194a904e81f9bd4190f9ecc5b" +checksum = "55fe75cb4a364c7f7ae06c7dbbc8d84bddd85d6cdf9975963c3935bc1991761e" dependencies = [ - "log", + "proc-macro2", + "quote", + "syn", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 79bd154f74..55692f0fbf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -115,8 +115,12 @@ license = "MIT OR Apache-2.0" [workspace.dependencies] arrow = { version = "29.0.0" } arrow-flight = { version = "29.0.0" } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="07f49803a3d7a9e9b3c2c9a7714c1bb08db71385", default-features = false } -datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="07f49803a3d7a9e9b3c2c9a7714c1bb08db71385" } +#datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="07f49803a3d7a9e9b3c2c9a7714c1bb08db71385", default-features = false } +#datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="07f49803a3d7a9e9b3c2c9a7714c1bb08db71385" } +# Temporary patch to https://github.com/alamb/arrow-datafusion/tree/alamb/patched_for_iox +# See https://github.com/alamb/arrow-datafusion/pull/7 for details +datafusion = { git = "https://github.com/alamb/arrow-datafusion.git", branch="alamb/patched_for_iox", default-features = false } +datafusion-proto = { git = "https://github.com/alamb/arrow-datafusion.git", branch="alamb/patched_for_iox" } hashbrown = { version = "0.13.2" } parquet = { version = "29.0.0" } diff --git a/datafusion_util/src/config.rs b/datafusion_util/src/config.rs index b3ac8bae01..d4eff0c7ab 100644 --- a/datafusion_util/src/config.rs +++ b/datafusion_util/src/config.rs @@ -1,9 +1,4 @@ -use datafusion::{ - config::{ - OPT_COALESCE_TARGET_BATCH_SIZE, OPT_PARQUET_PUSHDOWN_FILTERS, OPT_PARQUET_REORDER_FILTERS, - }, - prelude::SessionConfig, -}; +use datafusion::{config::ConfigOptions, prelude::SessionConfig}; // The default catalog name - this impacts what SQL queries use if not specified pub const DEFAULT_CATALOG: &str = "public"; @@ -13,19 +8,15 @@ pub const DEFAULT_SCHEMA: &str = "iox"; /// The maximum number of rows that DataFusion should create in each RecordBatch pub const BATCH_SIZE: usize = 8 * 1024; -const COALESCE_BATCH_SIZE: usize = BATCH_SIZE / 2; - /// Return a SessionConfig object configured for IOx pub fn iox_session_config() -> SessionConfig { - SessionConfig::new() + // Enable parquet predicate pushdown optimization + let mut options = ConfigOptions::new(); + options.execution.parquet.pushdown_filters = true; + options.execution.parquet.reorder_filters = true; + + SessionConfig::from(options) .with_batch_size(BATCH_SIZE) - .set_u64( - OPT_COALESCE_TARGET_BATCH_SIZE, - COALESCE_BATCH_SIZE.try_into().unwrap(), - ) - // Enable parquet predicate pushdown optimization - .set_bool(OPT_PARQUET_PUSHDOWN_FILTERS, true) - .set_bool(OPT_PARQUET_REORDER_FILTERS, true) .with_create_default_catalog_and_schema(true) .with_information_schema(true) .with_default_catalog_and_schema(DEFAULT_CATALOG, DEFAULT_SCHEMA) diff --git a/iox_query/src/exec/context.rs b/iox_query/src/exec/context.rs index 1dc298b6f8..bc7b0e8656 100644 --- a/iox_query/src/exec/context.rs +++ b/iox_query/src/exec/context.rs @@ -20,7 +20,7 @@ use crate::{ split::StreamSplitExec, stringset::{IntoStringSet, StringSetRef}, }, - logical_optimizer::iox_optimizer, + logical_optimizer::register_iox_optimizers, plan::{ fieldlist::FieldListPlan, seriesset::{SeriesSetPlan, SeriesSetPlans}, @@ -222,13 +222,11 @@ impl IOxSessionConfig { let state = SessionState::with_config_rt(session_config, self.runtime) .with_query_planner(Arc::new(IOxQueryPlanner {})); - - let state = register_selector_aggregates(state); - let mut state = register_scalar_functions(state); - state.optimizer = iox_optimizer(); + let state = register_iox_optimizers(state); let inner = SessionContext::with_state(state); - + register_selector_aggregates(&inner); + register_scalar_functions(&inner); if let Some(default_catalog) = self.default_catalog { inner.register_catalog(DEFAULT_CATALOG, default_catalog); } @@ -311,9 +309,9 @@ impl IOxSessionContext { let ctx = self.child_ctx("prepare_sql"); debug!(text=%sql, "planning SQL query"); - // NOTE can not use ctx.inner.sql here as it also interprets DDL + // NOTE can not use ctx.inner.sql() here as it also interprets DDL #[allow(deprecated)] - let logical_plan = ctx.inner.create_logical_plan(sql)?; + let logical_plan = ctx.inner.state().create_logical_plan(sql).await?; debug!(plan=%logical_plan.display_graphviz(), "logical plan"); // Make nicer erorrs for unsupported SQL @@ -347,7 +345,7 @@ impl IOxSessionContext { pub async fn create_physical_plan(&self, plan: &LogicalPlan) -> Result<Arc<dyn ExecutionPlan>> { let mut ctx = self.child_ctx("create_physical_plan"); debug!(text=%plan.display_indent_schema(), "create_physical_plan: initial plan"); - let physical_plan = ctx.inner.create_physical_plan(plan).await?; + let physical_plan = ctx.inner.state().create_physical_plan(plan).await?; ctx.recorder.event("physical plan"); debug!(text=%displayable(physical_plan.as_ref()).indent(), "create_physical_plan: plan to run"); @@ -670,13 +668,13 @@ pub trait SessionContextIOxExt { impl SessionContextIOxExt for SessionState { fn child_span(&self, name: &'static str) -> Option<Span> { - self.config + self.config() .get_extension::<Option<Span>>() .and_then(|span| span.as_ref().as_ref().map(|span| span.child(name))) } fn span_ctx(&self) -> Option<SpanContext> { - self.config + self.config() .get_extension::<Option<Span>>() .and_then(|span| span.as_ref().as_ref().map(|span| span.ctx.clone())) } diff --git a/iox_query/src/frontend/influxql.rs b/iox_query/src/frontend/influxql.rs index e1ef4c9b21..dc3598e1b1 100644 --- a/iox_query/src/frontend/influxql.rs +++ b/iox_query/src/frontend/influxql.rs @@ -40,7 +40,7 @@ impl InfluxQLQueryPlanner { } let planner = InfluxQLToLogicalPlan::new(&ctx, database); - let logical_plan = planner.statement_to_plan(statements.pop().unwrap())?; + let logical_plan = planner.statement_to_plan(statements.pop().unwrap()).await?; debug!(plan=%logical_plan.display_graphviz(), "logical plan"); // This would only work for SELECT statements at the moment, as the schema queries do diff --git a/iox_query/src/logical_optimizer/mod.rs b/iox_query/src/logical_optimizer/mod.rs index fba32fa298..76357c65a8 100644 --- a/iox_query/src/logical_optimizer/mod.rs +++ b/iox_query/src/logical_optimizer/mod.rs @@ -1,17 +1,14 @@ use std::sync::Arc; -use datafusion::optimizer::optimizer::Optimizer; +use datafusion::execution::context::SessionState; use self::influx_regex_to_datafusion_regex::InfluxRegexToDataFusionRegex; mod influx_regex_to_datafusion_regex; -/// Create IOx-specific logical [`Optimizer`]. +/// Register IOx-specific logical [`OptimizerRule`]s with the SessionContext /// -/// This is mostly the default optimizer that DataFusion provides but with some additional passes. -pub fn iox_optimizer() -> Optimizer { - let mut opt = Optimizer::new(); - opt.rules - .push(Arc::new(InfluxRegexToDataFusionRegex::new())); - opt +/// [`OptimizerRule`]: datafusion::optimizer::OptimizerRule +pub fn register_iox_optimizers(state: SessionState) -> SessionState { + state.add_optimizer_rule(Arc::new(InfluxRegexToDataFusionRegex::new())) } diff --git a/iox_query/src/plan/influxql.rs b/iox_query/src/plan/influxql.rs index 1ad512a299..d4cdaf5a68 100644 --- a/iox_query/src/plan/influxql.rs +++ b/iox_query/src/plan/influxql.rs @@ -8,14 +8,13 @@ mod var_ref; use crate::plan::influxql::rewriter::rewrite_statement; use crate::{DataFusionError, IOxSessionContext, QueryNamespace}; use datafusion::common::{DFSchema, Result, ScalarValue}; -use datafusion::execution::context::SessionState; +use datafusion::datasource::provider_as_source; use datafusion::logical_expr::expr_rewriter::normalize_col; use datafusion::logical_expr::logical_plan::builder::project; use datafusion::logical_expr::{ lit, BinaryExpr, BuiltinScalarFunction, Expr, LogicalPlan, LogicalPlanBuilder, Operator, }; use datafusion::prelude::Column; -use datafusion::sql::planner::ContextProvider; use datafusion::sql::TableReference; use influxdb_influxql_parser::expression::{ BinaryOperator, ConditionalExpression, ConditionalOperator, VarRefDataType, @@ -56,19 +55,14 @@ enum ExprScope { pub struct InfluxQLToLogicalPlan<'a> { ctx: &'a IOxSessionContext, database: Arc<dyn QueryNamespace>, - state: SessionState, } impl<'a> InfluxQLToLogicalPlan<'a> { pub fn new(ctx: &'a IOxSessionContext, database: Arc<dyn QueryNamespace>) -> Self { - Self { - ctx, - database, - state: ctx.inner().state(), - } + Self { ctx, database } } - pub fn statement_to_plan(&self, statement: Statement) -> Result<LogicalPlan> { + pub async fn statement_to_plan(&self, statement: Statement) -> Result<LogicalPlan> { match statement { Statement::CreateDatabase(_) => { Err(DataFusionError::NotImplemented("CREATE DATABASE".into())) @@ -80,7 +74,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> { Statement::Explain(_) => Err(DataFusionError::NotImplemented("EXPLAIN".into())), Statement::Select(select) => { let select = rewrite_statement(self.database.as_meta(), &select)?; - self.select_statement_to_plan(select) + self.select_statement_to_plan(select).await } Statement::ShowDatabases(_) => { Err(DataFusionError::NotImplemented("SHOW DATABASES".into())) @@ -104,9 +98,9 @@ impl<'a> InfluxQLToLogicalPlan<'a> { } /// Create a [`LogicalPlan`] from the specified InfluxQL `SELECT` statement. - fn select_statement_to_plan(&self, select: SelectStatement) -> Result<LogicalPlan> { + async fn select_statement_to_plan(&self, select: SelectStatement) -> Result<LogicalPlan> { // Process FROM clause - let plans = self.plan_from_tables(select.from)?; + let plans = self.plan_from_tables(select.from).await?; // Only support a single measurement to begin with let plan = match plans.len() { @@ -398,12 +392,13 @@ impl<'a> InfluxQLToLogicalPlan<'a> { /// Generate a list of logical plans for each of the tables references in the `FROM` /// clause. - fn plan_from_tables(&self, from: FromMeasurementClause) -> Result<Vec<LogicalPlan>> { - from.iter() - .map(|ms| match ms { + async fn plan_from_tables(&self, from: FromMeasurementClause) -> Result<Vec<LogicalPlan>> { + let mut plans = vec![]; + for ms in from.iter() { + let plan = match ms { MeasurementSelection::Name(qn) => match qn.name { MeasurementName::Name(ref ident) => { - self.create_table_ref(normalize_identifier(ident)) + self.create_table_ref(normalize_identifier(ident)).await } // rewriter is expected to expand the regular expression MeasurementName::Regex(_) => Err(DataFusionError::Internal( @@ -413,17 +408,19 @@ impl<'a> InfluxQLToLogicalPlan<'a> { MeasurementSelection::Subquery(_) => Err(DataFusionError::NotImplemented( "subquery in FROM clause".into(), )), - }) - .collect() + }?; + plans.push(plan); + } + Ok(plans) } /// Create a [LogicalPlan] that refers to the specified `table_name` or /// an [LogicalPlan::EmptyRelation] if the table does not exist. - fn create_table_ref(&self, table_name: String) -> Result<LogicalPlan> { + async fn create_table_ref(&self, table_name: String) -> Result<LogicalPlan> { let table_ref: TableReference<'_> = table_name.as_str().into(); - if let Ok(provider) = self.state.get_table_provider(table_ref) { - LogicalPlanBuilder::scan(&table_name, provider, None)?.build() + if let Ok(provider) = self.ctx.inner().table_provider(table_ref).await { + LogicalPlanBuilder::scan(&table_name, provider_as_source(provider), None)?.build() } else { LogicalPlanBuilder::empty(false).build() } @@ -469,7 +466,7 @@ mod test { use influxdb_influxql_parser::parse_statements; use insta::assert_snapshot; - fn plan(sql: &str) -> String { + async fn plan(sql: &str) -> String { let mut statements = parse_statements(sql).unwrap(); // index of columns in the above chunk: [bar, foo, i64_field, i64_field_2, time] let executor = Arc::new(Executor::new_testing()); @@ -502,7 +499,7 @@ mod test { let ctx = test_db.new_query_context(None); let planner = InfluxQLToLogicalPlan::new(&ctx, test_db); - match planner.statement_to_plan(statements.pop().unwrap()) { + match planner.statement_to_plan(statements.pop().unwrap()).await { Ok(res) => res.display_indent_schema().to_string(), Err(err) => err.to_string(), } @@ -511,18 +508,18 @@ mod test { /// Verify the list of unsupported statements. /// /// It is expected certain statements will be unsupported, indefinitely. - #[test] - fn test_unsupported_statements() { - assert_snapshot!(plan("CREATE DATABASE foo")); - assert_snapshot!(plan("DELETE FROM foo")); - assert_snapshot!(plan("DROP MEASUREMENT foo")); - assert_snapshot!(plan("EXPLAIN SELECT bar FROM foo")); - assert_snapshot!(plan("SHOW DATABASES")); - assert_snapshot!(plan("SHOW MEASUREMENTS")); - assert_snapshot!(plan("SHOW RETENTION POLICIES")); - assert_snapshot!(plan("SHOW TAG KEYS")); - assert_snapshot!(plan("SHOW TAG VALUES WITH KEY = bar")); - assert_snapshot!(plan("SHOW FIELD KEYS")); + #[tokio::test] + async fn test_unsupported_statements() { + assert_snapshot!(plan("CREATE DATABASE foo").await); + assert_snapshot!(plan("DELETE FROM foo").await); + assert_snapshot!(plan("DROP MEASUREMENT foo").await); + assert_snapshot!(plan("EXPLAIN SELECT bar FROM foo").await); + assert_snapshot!(plan("SHOW DATABASES").await); + assert_snapshot!(plan("SHOW MEASUREMENTS").await); + assert_snapshot!(plan("SHOW RETENTION POLICIES").await); + assert_snapshot!(plan("SHOW TAG KEYS").await); + assert_snapshot!(plan("SHOW TAG VALUES WITH KEY = bar").await); + assert_snapshot!(plan("SHOW FIELD KEYS").await); } /// Tests to validate InfluxQL `SELECT` statements that project columns without specifying @@ -531,25 +528,25 @@ mod test { use super::*; /// Select data from a single measurement - #[test] - fn test_single_measurement() { - assert_snapshot!(plan("SELECT f64_field FROM data")); - assert_snapshot!(plan("SELECT time, f64_field FROM data")); - assert_snapshot!(plan("SELECT time as timestamp, f64_field FROM data")); - assert_snapshot!(plan("SELECT foo, f64_field FROM data")); - assert_snapshot!(plan("SELECT foo, f64_field, i64_field FROM data")); - assert_snapshot!(plan("SELECT /^f/ FROM data")); - assert_snapshot!(plan("SELECT * FROM data")); - assert_snapshot!(plan("SELECT TIME FROM data")); // TIME is a field + #[tokio::test] + async fn test_single_measurement() { + assert_snapshot!(plan("SELECT f64_field FROM data").await); + assert_snapshot!(plan("SELECT time, f64_field FROM data").await); + assert_snapshot!(plan("SELECT time as timestamp, f64_field FROM data").await); + assert_snapshot!(plan("SELECT foo, f64_field FROM data").await); + assert_snapshot!(plan("SELECT foo, f64_field, i64_field FROM data").await); + assert_snapshot!(plan("SELECT /^f/ FROM data").await); + assert_snapshot!(plan("SELECT * FROM data").await); + assert_snapshot!(plan("SELECT TIME FROM data").await); // TIME is a field } /// Arithmetic expressions in the projection list - #[test] - fn test_simple_arithmetic_in_projection() { - assert_snapshot!(plan("SELECT foo, f64_field + f64_field FROM data")); - assert_snapshot!(plan("SELECT foo, sin(f64_field) FROM data")); - assert_snapshot!(plan("SELECT foo, atan2(f64_field, 2) FROM data")); - assert_snapshot!(plan("SELECT foo, f64_field + 0.5 FROM data")); + #[tokio::test] + async fn test_simple_arithmetic_in_projection() { + assert_snapshot!(plan("SELECT foo, f64_field + f64_field FROM data").await); + assert_snapshot!(plan("SELECT foo, sin(f64_field) FROM data").await); + assert_snapshot!(plan("SELECT foo, atan2(f64_field, 2) FROM data").await); + assert_snapshot!(plan("SELECT foo, f64_field + 0.5 FROM data").await); } // The following is an outline of additional scenarios to develop @@ -659,10 +656,10 @@ mod test { /// Succeeds and returns null values for the expression /// **Actual:** /// Error during planning: 'Float64 + Utf8' can't be evaluated because there isn't a common type to coerce the types to - #[test] + #[tokio::test] #[ignore] - fn test_select_coercion_from_str() { - assert_snapshot!(plan("SELECT f64_field + str_field::float FROM data")); + async fn test_select_coercion_from_str() { + assert_snapshot!(plan("SELECT f64_field + str_field::float FROM data").await); } /// **Issue:** @@ -673,10 +670,10 @@ mod test { /// Succeeds and plan projection of f64_field is Float64 /// **Data:** /// m0,tag0=val00 f64=99.0,i64=100i,str="lo",str_f64="5.5" 1667181600000000000 - #[test] + #[tokio::test] #[ignore] - fn test_select_explicit_cast() { - assert_snapshot!(plan("SELECT f64_field::integer FROM data")); + async fn test_select_explicit_cast() { + assert_snapshot!(plan("SELECT f64_field::integer FROM data").await); } /// **Issue:** @@ -685,14 +682,14 @@ mod test { /// Succeeds and plans the query, returning null values for unknown columns /// **Actual:** /// Schema error: No field named 'TIME'. Valid fields are 'data'.'bar', 'data'.'bool_field', 'data'.'f64_field', 'data'.'foo', 'data'.'i64_field', 'data'.'mixedCase', 'data'.'str_field', 'data'.'time', 'data'.'with space'. - #[test] + #[tokio::test] #[ignore] - fn test_select_case_sensitivity() { + async fn test_select_case_sensitivity() { // should return no results - assert_snapshot!(plan("SELECT TIME, f64_Field FROM data")); + assert_snapshot!(plan("SELECT TIME, f64_Field FROM data").await); // should bind to time and f64_field, and i64_Field should return NULL values - assert_snapshot!(plan("SELECT time, f64_field, i64_Field FROM data")); + assert_snapshot!(plan("SELECT time, f64_field, i64_Field FROM data").await); } } } diff --git a/iox_query/src/provider/deduplicate.rs b/iox_query/src/provider/deduplicate.rs index f75bb7c7ba..d1fdda6f91 100644 --- a/iox_query/src/provider/deduplicate.rs +++ b/iox_query/src/provider/deduplicate.rs @@ -156,8 +156,9 @@ impl ExecutionPlan for DeduplicateExec { Some(&self.sort_keys) } - fn relies_on_input_order(&self) -> bool { - true + fn required_input_ordering(&self) -> Vec<Option<&[PhysicalSortExpr]>> { + // requires the input to be sorted on the primary key + vec![self.output_ordering()] } fn maintains_input_order(&self) -> bool { diff --git a/iox_query/src/provider/physical.rs b/iox_query/src/provider/physical.rs index e59af96b55..049e194589 100644 --- a/iox_query/src/provider/physical.rs +++ b/iox_query/src/provider/physical.rs @@ -198,6 +198,7 @@ pub fn chunks_to_physical_nodes( limit: None, table_partition_cols: vec![], output_ordering, + infinite_source: false, }; let meta_size_hint = None; let parquet_exec = ParquetExec::new(base_config, predicate.filter_expr(), meta_size_hint); diff --git a/iox_query/src/test.rs b/iox_query/src/test.rs index 8c00df8edf..59229af1bf 100644 --- a/iox_query/src/test.rs +++ b/iox_query/src/test.rs @@ -232,6 +232,7 @@ struct TestDatabaseSchemaProvider { partitions: BTreeMap<String, BTreeMap<ChunkId, Arc<TestChunk>>>, } +#[async_trait] impl SchemaProvider for TestDatabaseSchemaProvider { fn as_any(&self) -> &dyn Any { self @@ -246,7 +247,7 @@ impl SchemaProvider for TestDatabaseSchemaProvider { .collect() } - fn table(&self, name: &str) -> Option<Arc<dyn TableProvider>> { + async fn table(&self, name: &str) -> Option<Arc<dyn TableProvider>> { Some(Arc::new(TestDatabaseTableProvider { partitions: self .partitions diff --git a/parquet_file/src/storage.rs b/parquet_file/src/storage.rs index 39f1fe050c..ceb27b207f 100644 --- a/parquet_file/src/storage.rs +++ b/parquet_file/src/storage.rs @@ -123,6 +123,7 @@ impl ParquetExecInput { table_partition_cols: vec![], // Parquet files ARE actually sorted but we don't care here since we just construct a `collect` plan. output_ordering: None, + infinite_source: false, }; let exec = ParquetExec::new(base_config, None, None); let exec_schema = exec.schema(); diff --git a/parquet_to_line_protocol/src/lib.rs b/parquet_to_line_protocol/src/lib.rs index 78afaa1aac..88385fbabb 100644 --- a/parquet_to_line_protocol/src/lib.rs +++ b/parquet_to_line_protocol/src/lib.rs @@ -222,6 +222,7 @@ impl ParquetFileReader { limit: None, table_partition_cols: vec![], output_ordering: None, + infinite_source: false, }; // set up enough datafusion context to do the real read session diff --git a/predicate/src/delete_expr.rs b/predicate/src/delete_expr.rs index 084ecd6186..4c9cdc3861 100644 --- a/predicate/src/delete_expr.rs +++ b/predicate/src/delete_expr.rs @@ -216,13 +216,13 @@ mod tests { #[test] fn test_unsupported_operator() { - let res = df_to_op(datafusion::logical_expr::Operator::Like); + let res = df_to_op(datafusion::logical_expr::Operator::Lt); assert_contains!(res.unwrap_err().to_string(), "unsupported operator:"); } #[test] fn test_unsupported_operator_in_expr() { - let expr = col("foo").like(lit("x")); + let expr = col("foo").lt(lit("x")); let res = df_to_expr(expr); assert_contains!(res.unwrap_err().to_string(), "unsupported operator:"); } diff --git a/predicate/src/rpc_predicate/rewrite.rs b/predicate/src/rpc_predicate/rewrite.rs index da4814f718..4488e86a5a 100644 --- a/predicate/src/rpc_predicate/rewrite.rs +++ b/predicate/src/rpc_predicate/rewrite.rs @@ -99,10 +99,6 @@ fn is_comparison(op: Operator) -> bool { Operator::Modulo => false, Operator::And => true, Operator::Or => true, - Operator::Like => true, - Operator::ILike => true, - Operator::NotLike => true, - Operator::NotILike => true, Operator::IsDistinctFrom => true, Operator::IsNotDistinctFrom => true, Operator::RegexMatch => true, @@ -381,8 +377,6 @@ mod tests { run_case(Operator::Modulo, false, lit(1), lit(2)); run_case(Operator::And, true, lit("foo"), lit("bar")); run_case(Operator::Or, true, lit("foo"), lit("bar")); - run_case(Operator::Like, true, lit("foo"), lit("bar")); - run_case(Operator::NotLike, true, lit("foo"), lit("bar")); run_case(Operator::IsDistinctFrom, true, lit("foo"), lit("bar")); run_case(Operator::IsNotDistinctFrom, true, lit("foo"), lit("bar")); run_case(Operator::RegexMatch, true, lit("foo"), lit("bar")); diff --git a/querier/src/namespace/query_access.rs b/querier/src/namespace/query_access.rs index da8b76aed1..fb839488b7 100644 --- a/querier/src/namespace/query_access.rs +++ b/querier/src/namespace/query_access.rs @@ -163,6 +163,7 @@ struct UserSchemaProvider { tables: Arc<HashMap<Arc<str>, Arc<QuerierTable>>>, } +#[async_trait] impl SchemaProvider for UserSchemaProvider { fn as_any(&self) -> &dyn Any { self as &dyn Any @@ -174,7 +175,7 @@ impl SchemaProvider for UserSchemaProvider { names } - fn table(&self, name: &str) -> Option<Arc<dyn TableProvider>> { + async fn table(&self, name: &str) -> Option<Arc<dyn TableProvider>> { self.tables.get(name).map(|t| Arc::clone(t) as _) } @@ -517,10 +518,10 @@ mod tests { "| | CoalescePartitionsExec |", "| | ProjectionExec: expr=[host@0 as host, perc@1 as perc, time@2 as time] |", "| | UnionExec |", - "| | CoalesceBatchesExec: target_batch_size=4096 |", + "| | CoalesceBatchesExec: target_batch_size=8192 |", "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |", "| | ParquetExec: limit=None, partitions={1 group: [[1/2/1/4/<uuid>.parquet]]}, projection=[host, perc, time] |", - "| | CoalesceBatchesExec: target_batch_size=4096 |", + "| | CoalesceBatchesExec: target_batch_size=8192 |", "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |", "| | ParquetExec: limit=None, partitions={1 group: [[1/2/1/4/<uuid>.parquet]]}, projection=[host, perc, time] |", "| | |", diff --git a/querier/src/system_tables/mod.rs b/querier/src/system_tables/mod.rs index 0429b55a1f..1ab101074a 100644 --- a/querier/src/system_tables/mod.rs +++ b/querier/src/system_tables/mod.rs @@ -43,6 +43,7 @@ impl SystemSchemaProvider { } } +#[async_trait] impl SchemaProvider for SystemSchemaProvider { fn as_any(&self) -> &dyn Any { self as &dyn Any @@ -55,7 +56,7 @@ impl SchemaProvider for SystemSchemaProvider { .collect() } - fn table(&self, name: &str) -> Option<Arc<dyn TableProvider>> { + async fn table(&self, name: &str) -> Option<Arc<dyn TableProvider>> { match name { QUERIES_TABLE => Some(Arc::clone(&self.queries)), _ => None, diff --git a/query_functions/src/gapfill.rs b/query_functions/src/gapfill.rs index 8158397e95..a6a5320e47 100644 --- a/query_functions/src/gapfill.rs +++ b/query_functions/src/gapfill.rs @@ -66,7 +66,7 @@ mod test { let times = Arc::new(TimestampNanosecondArray::from(vec![Some(1000)])); let rb = RecordBatch::try_from_iter(vec![("time", times as ArrayRef)])?; let ctx = context_with_table(rb); - let df = ctx.table("t")?.select(vec![date_bin_gapfill( + let df = ctx.table("t").await?.select(vec![date_bin_gapfill( lit_interval_milliseconds(360_000), col("time"), lit_timestamp_nano(0), diff --git a/query_functions/src/lib.rs b/query_functions/src/lib.rs index afb35e36c9..87ba94756c 100644 --- a/query_functions/src/lib.rs +++ b/query_functions/src/lib.rs @@ -12,8 +12,8 @@ )] use datafusion::{ - execution::{context::SessionState, FunctionRegistry}, - prelude::{lit, Expr}, + execution::FunctionRegistry, + prelude::{lit, Expr, SessionContext}, }; use group_by::WindowDuration; use window::EncodedWindowDuration; @@ -96,14 +96,12 @@ pub fn registry() -> &'static dyn FunctionRegistry { } /// registers scalar functions so they can be invoked via SQL -pub fn register_scalar_functions(mut state: SessionState) -> SessionState { +pub fn register_scalar_functions(ctx: &SessionContext) { let registry = registry(); for f in registry.udfs() { let udf = registry.udf(&f).unwrap(); - state.scalar_functions.insert(f, udf); + ctx.register_udf(udf.as_ref().clone()) } - - state } #[cfg(test)] @@ -131,6 +129,7 @@ mod test { let ctx = context_with_table(batch); let result = ctx .table("t") + .await .unwrap() .filter(regex_match_expr(col("data"), "Foo".into())) .unwrap() @@ -163,6 +162,7 @@ mod test { let ctx = context_with_table(batch); let result = ctx .table("t") + .await .unwrap() .filter(regex_not_match_expr(col("data"), "Foo".into())) .unwrap() @@ -191,6 +191,7 @@ mod test { let ctx = context_with_table(batch); let result = ctx .table("t") + .await .unwrap() .select(vec![ col("time"), diff --git a/query_functions/src/regex.rs b/query_functions/src/regex.rs index e6ea4fab36..ea5c6f0a93 100644 --- a/query_functions/src/regex.rs +++ b/query_functions/src/regex.rs @@ -343,7 +343,7 @@ mod test { .unwrap(); let ctx = context_with_table(rb); - let df = ctx.table("t").unwrap(); + let df = ctx.table("t").await.unwrap(); let df = df.filter(op).unwrap(); // execute the query diff --git a/query_functions/src/selectors.rs b/query_functions/src/selectors.rs index 5afd930c29..d901432742 100644 --- a/query_functions/src/selectors.rs +++ b/query_functions/src/selectors.rs @@ -22,9 +22,9 @@ use arrow::{ }; use datafusion::{ error::{DataFusionError, Result as DataFusionResult}, - execution::context::SessionState, logical_expr::{AccumulatorFunctionImplementation, Signature, TypeSignature, Volatility}, physical_plan::{udaf::AggregateUDF, Accumulator}, + prelude::SessionContext, scalar::ScalarValue, }; @@ -40,26 +40,11 @@ use internal::{ use schema::TIME_DATA_TYPE; /// registers selector functions so they can be invoked via SQL -pub fn register_selector_aggregates(mut state: SessionState) -> SessionState { - let first = struct_selector_first(); - let last = struct_selector_last(); - let min = struct_selector_min(); - let max = struct_selector_max(); - - //TODO make a nicer api for this in DataFusion - state - .aggregate_functions - .insert(first.name.to_string(), first); - - state - .aggregate_functions - .insert(last.name.to_string(), last); - - state.aggregate_functions.insert(min.name.to_string(), min); - - state.aggregate_functions.insert(max.name.to_string(), max); - - state +pub fn register_selector_aggregates(ctx: &SessionContext) { + ctx.register_udaf(struct_selector_first()); + ctx.register_udaf(struct_selector_last()); + ctx.register_udaf(struct_selector_min()); + ctx.register_udaf(struct_selector_max()); } /// Returns a DataFusion user defined aggregate function for computing @@ -76,11 +61,11 @@ pub fn register_selector_aggregates(mut state: SessionState) -> SessionState { /// /// If there are multiple rows with the minimum timestamp value, the /// value is arbitrary -pub fn struct_selector_first() -> Arc<AggregateUDF> { - Arc::new(make_uda( +pub fn struct_selector_first() -> AggregateUDF { + make_uda( "selector_first", FactoryBuilder::new(SelectorType::First, SelectorOutput::Struct), - )) + ) } /// Returns a DataFusion user defined aggregate function for computing @@ -97,11 +82,11 @@ pub fn struct_selector_first() -> Arc<AggregateUDF> { /// /// If there are multiple rows with the maximum timestamp value, the /// value is arbitrary -pub fn struct_selector_last() -> Arc<AggregateUDF> { - Arc::new(make_uda( +pub fn struct_selector_last() -> AggregateUDF { + make_uda( "selector_last", FactoryBuilder::new(SelectorType::Last, SelectorOutput::Struct), - )) + ) } /// Returns a DataFusion user defined aggregate function for computing @@ -118,11 +103,11 @@ pub fn struct_selector_last() -> Arc<AggregateUDF> { /// /// If there are multiple rows with the same minimum value, the value /// with the first (earliest/smallest) timestamp is chosen -pub fn struct_selector_min() -> Arc<AggregateUDF> { - Arc::new(make_uda( +pub fn struct_selector_min() -> AggregateUDF { + make_uda( "selector_min", FactoryBuilder::new(SelectorType::Min, SelectorOutput::Struct), - )) + ) } /// Returns a DataFusion user defined aggregate function for computing @@ -139,11 +124,11 @@ pub fn struct_selector_min() -> Arc<AggregateUDF> { /// /// If there are multiple rows with the same maximum value, the value /// with the first (earliest/smallest) timestamp is chosen -pub fn struct_selector_max() -> Arc<AggregateUDF> { - Arc::new(make_uda( +pub fn struct_selector_max() -> AggregateUDF { + make_uda( "selector_max", FactoryBuilder::new(SelectorType::Max, SelectorOutput::Struct), - )) + ) } /// Returns a DataFusion user defined aggregate function for computing @@ -1346,7 +1331,7 @@ mod test { let ctx = SessionContext::new(); ctx.register_table("t", Arc::new(provider)).unwrap(); - let df = ctx.table("t").unwrap(); + let df = ctx.table("t").await.unwrap(); let df = df.aggregate(vec![], aggs).unwrap(); // execute the query diff --git a/query_tests/cases/in/dedup_and_predicates_parquet.expected b/query_tests/cases/in/dedup_and_predicates_parquet.expected index f4067049ad..d56213f83e 100644 --- a/query_tests/cases/in/dedup_and_predicates_parquet.expected +++ b/query_tests/cases/in/dedup_and_predicates_parquet.expected @@ -33,23 +33,22 @@ +-----+-----+-----+----------------------+ -- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A'; -- Results After Normalizing UUIDs -+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | -| | Filter: table.tag = Dictionary(Int32, Utf8("A")) | -| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] | -| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: tag@2 = A | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | -| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | | -+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | +| | Filter: table.tag = Dictionary(Int32, Utf8("A")) | +| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] | +| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: tag@2 = A | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | | ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: SELECT * FROM "table" WHERE foo=1 AND bar=2; +-----+-----+-----+----------------------+ | bar | foo | tag | time | @@ -58,23 +57,22 @@ +-----+-----+-----+----------------------+ -- SQL: EXPLAIN SELECT * FROM "table" WHERE foo=1 AND bar=2; -- Results After Normalizing UUIDs -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | -| | Filter: table.foo = Float64(1) AND table.bar = Float64(2) | -| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.foo = Float64(1), table.bar = Float64(2)] | -| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: foo@1 = 1 AND bar@0 = 2 | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | -| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | +| | Filter: table.foo = Float64(1) AND table.bar = Float64(2) | +| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.foo = Float64(1), table.bar = Float64(2)] | +| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: foo@1 = 1 AND bar@0 = 2 | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | | ++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag; +-----+-----+-----+----------------------+ | bar | foo | tag | time | @@ -94,7 +92,7 @@ | physical_plan | SortExec: [tag@2 ASC NULLS LAST] | | | CoalescePartitionsExec | | | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | CoalesceBatchesExec: target_batch_size=4096 | +| | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: time@3 = 0 | | | RepartitionExec: partitioning=RoundRobinBatch(4) | | | DeduplicateExec: [tag@2 ASC,time@3 ASC] | @@ -112,20 +110,19 @@ +-----+-----+-----+----------------------+ -- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00'); -- Results After Normalizing UUIDs -+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | -| | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) | -| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] | -| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | -| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | | -+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | +| | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) | +| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] | +| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | | ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/query_tests/cases/in/dedup_and_predicates_parquet_ingester.expected b/query_tests/cases/in/dedup_and_predicates_parquet_ingester.expected index c7a05cd987..c7fa1ecd41 100644 --- a/query_tests/cases/in/dedup_and_predicates_parquet_ingester.expected +++ b/query_tests/cases/in/dedup_and_predicates_parquet_ingester.expected @@ -34,24 +34,23 @@ +-----+-----+-----+----------------------+ -- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A'; -- Results After Normalizing UUIDs -+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | -| | Filter: table.tag = Dictionary(Int32, Utf8("A")) | -| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] | -| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: tag@2 = A | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | -| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | SortExec: [tag@2 ASC,time@3 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 | -| | | -+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | +| | Filter: table.tag = Dictionary(Int32, Utf8("A")) | +| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] | +| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: tag@2 = A | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | SortExec: [tag@2 ASC,time@3 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 | +| | | ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: SELECT * FROM "table" WHERE foo=1 AND bar=2; +-----+-----+-----+----------------------+ | bar | foo | tag | time | @@ -60,24 +59,23 @@ +-----+-----+-----+----------------------+ -- SQL: EXPLAIN SELECT * FROM "table" WHERE foo=1 AND bar=2; -- Results After Normalizing UUIDs -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | -| | Filter: table.foo = Float64(1) AND table.bar = Float64(2) | -| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.foo = Float64(1), table.bar = Float64(2)] | -| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: foo@1 = 1 AND bar@0 = 2 | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | -| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | SortExec: [tag@2 ASC,time@3 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 | -| | | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | +| | Filter: table.foo = Float64(1) AND table.bar = Float64(2) | +| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.foo = Float64(1), table.bar = Float64(2)] | +| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: foo@1 = 1 AND bar@0 = 2 | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | SortExec: [tag@2 ASC,time@3 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 | +| | | ++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag; +-----+-----+-----+----------------------+ | bar | foo | tag | time | @@ -97,7 +95,7 @@ | physical_plan | SortExec: [tag@2 ASC NULLS LAST] | | | CoalescePartitionsExec | | | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | CoalesceBatchesExec: target_batch_size=4096 | +| | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: time@3 = 0 | | | RepartitionExec: partitioning=RoundRobinBatch(4) | | | DeduplicateExec: [tag@2 ASC,time@3 ASC] | @@ -116,21 +114,20 @@ +-----+-----+-----+----------------------+ -- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00'); -- Results After Normalizing UUIDs -+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | -| | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) | -| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] | -| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | -| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | SortExec: [tag@2 ASC,time@3 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 | -| | | -+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | +| | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) | +| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] | +| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | SortExec: [tag@2 ASC,time@3 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 | +| | | ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/query_tests/cases/in/duplicates_ingester.expected b/query_tests/cases/in/duplicates_ingester.expected index 3c5fa34bbe..73cad6fe3c 100644 --- a/query_tests/cases/in/duplicates_ingester.expected +++ b/query_tests/cases/in/duplicates_ingester.expected @@ -25,24 +25,25 @@ +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: EXPLAIN select time, state, city, min_temp, max_temp, area from h2o; -- Results After Normalizing UUIDs -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area | -| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] | -| physical_plan | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] | -| | UnionExec | -| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] | -| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | -| | DeduplicateExec: [city@1 ASC,state@4 ASC,time@5 ASC] | -| | SortExec: [city@1 ASC,state@4 ASC,time@5 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, projection=[area, city, max_temp, min_temp, state, time] | -| | | -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area | +| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] | +| physical_plan | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | UnionExec | +| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] | +| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | +| | DeduplicateExec: [city@1 ASC,state@4 ASC,time@5 ASC] | +| | SortExec: [city@1 ASC,state@4 ASC,time@5 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, projection=[area, city, max_temp, min_temp, state, time] | +| | | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: EXPLAIN select state as name from h2o UNION ALL select city as name from h2o; -- Results After Normalizing UUIDs +---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/query_tests/cases/in/duplicates_parquet.expected b/query_tests/cases/in/duplicates_parquet.expected index 57f3aee984..14b4a28b8d 100644 --- a/query_tests/cases/in/duplicates_parquet.expected +++ b/query_tests/cases/in/duplicates_parquet.expected @@ -22,21 +22,22 @@ +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: EXPLAIN select time, state, city, min_temp, max_temp, area from h2o; -- Results After Normalizing UUIDs -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area | -| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] | -| physical_plan | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] | -| | UnionExec | -| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] | -| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | -| | ParquetExec: limit=None, partitions={2 groups: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], [1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, projection=[area, city, max_temp, min_temp, state, time] | -| | | -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area | +| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] | +| physical_plan | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | UnionExec | +| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] | +| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | +| | ParquetExec: limit=None, partitions={2 groups: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], [1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, projection=[area, city, max_temp, min_temp, state, time] | +| | | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: EXPLAIN select state as name from h2o UNION ALL select city as name from h2o; -- Results After Normalizing UUIDs +---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -84,7 +85,7 @@ ---------- | Plan with Metrics | CoalescePartitionsExec, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] | | | ProjectionExec: expr=[area@0 as area, city@1 as city, max_temp@2 as max_temp, min_temp@3 as min_temp, state@4 as state, time@5 as time], metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] | -| | CoalesceBatchesExec: target_batch_size=4096, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] | +| | CoalesceBatchesExec: target_batch_size=8192, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] | | | FilterExec: state@4 = MA, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] | | | RepartitionExec: partitioning=RoundRobinBatch(4), metrics=[fetch_time=1.234ms, repart_time=1.234ms, send_time=1.234ms] | | | UnionExec, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] | diff --git a/query_tests/cases/in/pushdown.expected b/query_tests/cases/in/pushdown.expected index 8a472e36f1..336aa522df 100644 --- a/query_tests/cases/in/pushdown.expected +++ b/query_tests/cases/in/pushdown.expected @@ -14,15 +14,16 @@ +-------+--------+--------------------------------+-----------+ -- SQL: EXPLAIN SELECT * from restaurant; -- Results After Normalizing UUIDs -+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | TableScan: restaurant projection=[count, system, time, town] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, projection=[count, system, time, town] | -| | | -+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | +| | TableScan: restaurant projection=[count, system, time, town] | +| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, projection=[count, system, time, town] | +| | | ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: SELECT * from restaurant where count > 200; -- Results After Sorting +-------+--------+--------------------------------+-----------+ @@ -44,7 +45,7 @@ | | Filter: restaurant.count > UInt64(200) | | | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200)] | | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | +| | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: count@0 > 200 | | | RepartitionExec: partitioning=RoundRobinBatch(4) | | | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200), pruning_predicate=count_max@0 > 200, projection=[count, system, time, town] | @@ -59,7 +60,7 @@ | | Filter: CAST(restaurant.count AS Float64) > Float64(200) | | | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.count AS Float64) > Float64(200)] | | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | +| | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: CAST(count@0 AS Float64) > 200 | | | RepartitionExec: partitioning=RoundRobinBatch(4) | | | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=CAST(count AS Float64) > Float64(200), projection=[count, system, time, town] | @@ -74,7 +75,7 @@ | | Filter: restaurant.system > Float64(4) | | | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4)] | | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | +| | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: system@1 > 4 | | | RepartitionExec: partitioning=RoundRobinBatch(4) | | | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(4), pruning_predicate=system_max@0 > 4, projection=[count, system, time, town] | @@ -100,7 +101,7 @@ | | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) | | | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury"))] | | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | +| | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: count@0 > 200 AND town@3 != tewsbury | | | RepartitionExec: partitioning=RoundRobinBatch(4) | | | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2, projection=[count, system, time, town] | @@ -125,7 +126,7 @@ | | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND (restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))) | | | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))] | | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | +| | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: count@0 > 200 AND town@3 != tewsbury AND system@1 = 5 OR town@3 = lawrence | | | RepartitionExec: partitioning=RoundRobinBatch(4) | | | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")) AND (system = Float64(5) OR town = Dictionary(Int32, Utf8("lawrence"))), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2, projection=[count, system, time, town] | @@ -149,7 +150,7 @@ | | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND (restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))) AND restaurant.count < UInt64(40000) | | | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence")), restaurant.count < UInt64(40000)] | | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | +| | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: count@0 > 200 AND town@3 != tewsbury AND system@1 = 5 OR town@3 = lawrence AND count@0 < 40000 | | | RepartitionExec: partitioning=RoundRobinBatch(4) | | | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")) AND (system = Float64(5) OR town = Dictionary(Int32, Utf8("lawrence"))) AND count < UInt64(40000), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2 AND count_min@5 < 40000, projection=[count, system, time, town] | @@ -175,7 +176,7 @@ | | Filter: restaurant.count > UInt64(200) AND restaurant.count < UInt64(40000) | | | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.count < UInt64(40000)] | | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | +| | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: count@0 > 200 AND count@0 < 40000 | | | RepartitionExec: partitioning=RoundRobinBatch(4) | | | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND count < UInt64(40000), pruning_predicate=count_max@0 > 200 AND count_min@1 < 40000, projection=[count, system, time, town] | @@ -202,7 +203,7 @@ | | Filter: restaurant.system > Float64(4) AND restaurant.system < Float64(7) | | | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4), restaurant.system < Float64(7)] | | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | +| | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: system@1 > 4 AND system@1 < 7 | | | RepartitionExec: partitioning=RoundRobinBatch(4) | | | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(4) AND system < Float64(7), pruning_predicate=system_max@0 > 4 AND system_min@1 < 7, projection=[count, system, time, town] | @@ -226,7 +227,7 @@ | | Filter: restaurant.system > Float64(5) AND restaurant.system < Float64(7) | | | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.system < Float64(7)] | | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | +| | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: system@1 > 5 AND system@1 < 7 | | | RepartitionExec: partitioning=RoundRobinBatch(4) | | | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND system < Float64(7), pruning_predicate=system_max@0 > 5 AND system_min@1 < 7, projection=[count, system, time, town] | @@ -249,7 +250,7 @@ | | Filter: restaurant.system > Float64(5) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > restaurant.system | | | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), Float64(7) > restaurant.system] | | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | +| | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: system@1 > 5 AND town@3 != tewsbury AND 7 > system@1 | | | RepartitionExec: partitioning=RoundRobinBatch(4) | | | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > system, pruning_predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7, projection=[count, system, time, town] | @@ -271,7 +272,7 @@ | | Filter: restaurant.system > Float64(5) AND Dictionary(Int32, Utf8("tewsbury")) != restaurant.town AND restaurant.system < Float64(7) AND (restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))) | | | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), Dictionary(Int32, Utf8("tewsbury")) != restaurant.town, restaurant.system < Float64(7), restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))] | | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | +| | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: system@1 > 5 AND tewsbury != town@3 AND system@1 < 7 AND count@0 = 632 OR town@3 = reading | | | RepartitionExec: partitioning=RoundRobinBatch(4) | | | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND Dictionary(Int32, Utf8("tewsbury")) != town AND system < Float64(7) AND (count = UInt64(632) OR town = Dictionary(Int32, Utf8("reading"))), pruning_predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7 AND count_min@4 <= 632 AND 632 <= count_max@5 OR town_min@1 <= reading AND reading <= town_max@2, projection=[count, system, time, town] | @@ -290,9 +291,9 @@ | | Filter: Float64(5) < restaurant.system AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND restaurant.system < Float64(7) AND (restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))) AND restaurant.time > TimestampNanosecond(130, None) | | | TableScan: restaurant projection=[count, system, time, town], partial_filters=[Float64(5) < restaurant.system, restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system < Float64(7), restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading")), restaurant.time > TimestampNanosecond(130, None)] | | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: 5 < system@1 AND town@3 != tewsbury AND system@1 < 7 AND count@0 = 632 OR town@3 = reading AND time@2 > 130 | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: 5 < system@1 AND town@3 != tewsbury AND system@1 < 7 AND count@0 = 632 OR town@3 = reading AND time@2 > 130 | | | EmptyExec: produce_one_row=false | | | | +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -320,18 +321,18 @@ +-------+--------+--------------------------------+---------+ -- SQL: EXPLAIN SELECT * from restaurant where influx_regex_match(town, 'foo|bar|baz') and influx_regex_not_match(town, 'one|two'); -- Results After Normalizing UUIDs -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: (CAST(restaurant.town AS Utf8)restaurant.town ~ Utf8("foo|bar|baz")) AND (CAST(restaurant.town AS Utf8)restaurant.town !~ Utf8("one|two")) | -| | Projection: CAST(restaurant.town AS Utf8) AS CAST(restaurant.town AS Utf8)restaurant.town, restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.town AS Utf8) AS restaurant.town ~ Utf8("foo|bar|baz") AS influx_regex_match(restaurant.town,Utf8("foo|bar|baz")), CAST(restaurant.town AS Utf8) AS restaurant.town !~ Utf8("one|two") AS influx_regex_not_match(restaurant.town,Utf8("one|two")), CAST(restaurant.town AS Utf8) ~ Utf8("foo|bar|baz"), CAST(restaurant.town AS Utf8) !~ Utf8("one|two")] | -| physical_plan | ProjectionExec: expr=[count@1 as count, system@2 as system, time@3 as time, town@4 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: CAST(restaurant.town AS Utf8)restaurant.town@0 ~ foo|bar|baz AND CAST(restaurant.town AS Utf8)restaurant.town@0 !~ one|two | -| | ProjectionExec: expr=[CAST(town@3 AS Utf8) as CAST(restaurant.town AS Utf8)restaurant.town, count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=(CAST(town AS Utf8) AS restaurant.town ~ Utf8("foo|bar|baz")) AND (CAST(town AS Utf8) AS restaurant.town !~ Utf8("one|two")) AND (CAST(town AS Utf8) ~ Utf8("foo|bar|baz")) AND (CAST(town AS Utf8) !~ Utf8("one|two")), projection=[count, system, time, town] | -| | | -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | +| | Filter: (CAST(restaurant.town AS Utf8)restaurant.town LIKE Utf8("%foo%") OR CAST(restaurant.town AS Utf8)restaurant.town LIKE Utf8("%bar%") OR CAST(restaurant.town AS Utf8)restaurant.town LIKE Utf8("%baz%")) AND CAST(restaurant.town AS Utf8)restaurant.town NOT LIKE Utf8("%one%") AND CAST(restaurant.town AS Utf8)restaurant.town NOT LIKE Utf8("%two%") | +| | Projection: CAST(restaurant.town AS Utf8) AS CAST(restaurant.town AS Utf8)restaurant.town, restaurant.count, restaurant.system, restaurant.time, restaurant.town | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.town AS Utf8) AS restaurant.town LIKE Utf8("%foo%") OR CAST(restaurant.town AS Utf8) AS restaurant.town LIKE Utf8("%bar%") OR CAST(restaurant.town AS Utf8) AS restaurant.town LIKE Utf8("%baz%") AS influx_regex_match(restaurant.town,Utf8("foo|bar|baz")), CAST(restaurant.town AS Utf8) AS restaurant.town NOT LIKE Utf8("%one%") AND CAST(restaurant.town AS Utf8) AS restaurant.town NOT LIKE Utf8("%two%") AS influx_regex_not_match(restaurant.town,Utf8("one|two")), CAST(restaurant.town AS Utf8) LIKE Utf8("%foo%") OR CAST(restaurant.town AS Utf8) LIKE Utf8("%bar%") OR CAST(restaurant.town AS Utf8) LIKE Utf8("%baz%"), CAST(restaurant.town AS Utf8) NOT LIKE Utf8("%one%"), CAST(restaurant.town AS Utf8) NOT LIKE Utf8("%two%")] | +| physical_plan | ProjectionExec: expr=[count@1 as count, system@2 as system, time@3 as time, town@4 as town] | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: CAST(restaurant.town AS Utf8)restaurant.town@0 LIKE %foo% OR CAST(restaurant.town AS Utf8)restaurant.town@0 LIKE %bar% OR CAST(restaurant.town AS Utf8)restaurant.town@0 LIKE %baz% AND CAST(restaurant.town AS Utf8)restaurant.town@0 NOT LIKE %one% AND CAST(restaurant.town AS Utf8)restaurant.town@0 NOT LIKE %two% | +| | ProjectionExec: expr=[CAST(town@3 AS Utf8) as CAST(restaurant.town AS Utf8)restaurant.town, count@0 as count, system@1 as system, time@2 as time, town@3 as town] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=(CAST(town AS Utf8) AS restaurant.town LIKE Utf8("%foo%") OR CAST(town AS Utf8) AS restaurant.town LIKE Utf8("%bar%") OR CAST(town AS Utf8) AS restaurant.town LIKE Utf8("%baz%")) AND CAST(town AS Utf8) AS restaurant.town NOT LIKE Utf8("%one%") AND CAST(town AS Utf8) AS restaurant.town NOT LIKE Utf8("%two%") AND (CAST(town AS Utf8) LIKE Utf8("%foo%") OR CAST(town AS Utf8) LIKE Utf8("%bar%") OR CAST(town AS Utf8) LIKE Utf8("%baz%")) AND CAST(town AS Utf8) NOT LIKE Utf8("%one%") AND CAST(town AS Utf8) NOT LIKE Utf8("%two%"), projection=[count, system, time, town] | +| | | ++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/query_tests/cases/in/retention.expected b/query_tests/cases/in/retention.expected index ec8856fb1f..4f1229c635 100644 --- a/query_tests/cases/in/retention.expected +++ b/query_tests/cases/in/retention.expected @@ -9,30 +9,27 @@ +------+------+----------------------+ -- SQL: EXPLAIN SELECT * FROM cpu order by host, load, time; -- Results After Normalizing UUIDs -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Sort: cpu.host ASC NULLS LAST, cpu.load ASC NULLS LAST, cpu.time ASC NULLS LAST | -| | Projection: cpu.host, cpu.load, cpu.time | -| | TableScan: cpu projection=[host, load, time] | -| physical_plan | SortExec: [host@0 ASC NULLS LAST,load@1 ASC NULLS LAST,time@2 ASC NULLS LAST] | -| | CoalescePartitionsExec | -| | ProjectionExec: expr=[host@0 as host, load@1 as load, time@2 as time] | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | DeduplicateExec: [host@0 ASC,time@2 ASC] | -| | SortPreservingMergeExec: [host@0 ASC,time@2 ASC] | -| | SortExec: [host@0 ASC,time@2 ASC] | -| | UnionExec | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] | -| | | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Sort: cpu.host ASC NULLS LAST, cpu.load ASC NULLS LAST, cpu.time ASC NULLS LAST | +| | Projection: cpu.host, cpu.load, cpu.time | +| | TableScan: cpu projection=[host, load, time] | +| physical_plan | SortExec: [host@0 ASC NULLS LAST,load@1 ASC NULLS LAST,time@2 ASC NULLS LAST] | +| | CoalescePartitionsExec | +| | ProjectionExec: expr=[host@0 as host, load@1 as load, time@2 as time] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | DeduplicateExec: [host@0 ASC,time@2 ASC] | +| | SortPreservingMergeExec: [host@0 ASC,time@2 ASC] | +| | UnionExec | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] | +| | | ++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: SELECT * FROM cpu WHERE host != 'b' ORDER BY host,time; +------+------+----------------------+ | host | load | time | @@ -42,30 +39,27 @@ +------+------+----------------------+ -- SQL: EXPLAIN SELECT * FROM cpu WHERE host != 'b' ORDER BY host,time; -- Results After Normalizing UUIDs -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Sort: cpu.host ASC NULLS LAST, cpu.time ASC NULLS LAST | -| | Projection: cpu.host, cpu.load, cpu.time | -| | Filter: cpu.host != Dictionary(Int32, Utf8("b")) | -| | TableScan: cpu projection=[host, load, time], partial_filters=[cpu.host != Dictionary(Int32, Utf8("b"))] | -| physical_plan | SortExec: [host@0 ASC NULLS LAST,time@2 ASC NULLS LAST] | -| | CoalescePartitionsExec | -| | ProjectionExec: expr=[host@0 as host, load@1 as load, time@2 as time] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: host@0 != b | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | DeduplicateExec: [host@0 ASC,time@2 ASC] | -| | SortPreservingMergeExec: [host@0 ASC,time@2 ASC] | -| | SortExec: [host@0 ASC,time@2 ASC] | -| | UnionExec | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=host != Dictionary(Int32, Utf8("b")), pruning_predicate=host_min@0 != b OR b != host_max@1, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=host != Dictionary(Int32, Utf8("b")), pruning_predicate=host_min@0 != b OR b != host_max@1, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] | -| | | -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Sort: cpu.host ASC NULLS LAST, cpu.time ASC NULLS LAST | +| | Projection: cpu.host, cpu.load, cpu.time | +| | Filter: cpu.host != Dictionary(Int32, Utf8("b")) | +| | TableScan: cpu projection=[host, load, time], partial_filters=[cpu.host != Dictionary(Int32, Utf8("b"))] | +| physical_plan | SortExec: [host@0 ASC NULLS LAST,time@2 ASC NULLS LAST] | +| | CoalescePartitionsExec | +| | ProjectionExec: expr=[host@0 as host, load@1 as load, time@2 as time] | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: host@0 != b | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | DeduplicateExec: [host@0 ASC,time@2 ASC] | +| | SortPreservingMergeExec: [host@0 ASC,time@2 ASC] | +| | UnionExec | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=host != Dictionary(Int32, Utf8("b")), pruning_predicate=host_min@0 != b OR b != host_max@1, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: time@2 < -9223372036854775808 OR time@2 > -3600000000000 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=host != Dictionary(Int32, Utf8("b")), pruning_predicate=host_min@0 != b OR b != host_max@1, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] | +| | | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/query_tests/cases/in/several_chunks.expected b/query_tests/cases/in/several_chunks.expected index 62848cefb2..e028700b8c 100644 --- a/query_tests/cases/in/several_chunks.expected +++ b/query_tests/cases/in/several_chunks.expected @@ -14,25 +14,26 @@ +---------+------------+-------+------+--------------------------------+ -- SQL: EXPLAIN SELECT * from h2o; -- Results After Normalizing UUIDs -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time | -| | TableScan: h2o projection=[city, other_temp, state, temp, time] | -| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] | -| | UnionExec | -| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | -| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, projection=[city, other_temp, state, temp, time] | -| | | -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time | +| | TableScan: h2o projection=[city, other_temp, state, temp, time] | +| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | UnionExec | +| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, projection=[city, other_temp, state, temp, time] | +| | | ++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: select temp, other_temp, time from h2o; -- Results After Sorting +------+------------+--------------------------------+ @@ -80,7 +81,7 @@ | | Filter: h2o.time >= TimestampNanosecond(250, None) | | | TableScan: h2o projection=[city, other_temp, state, temp, time], partial_filters=[h2o.time >= TimestampNanosecond(250, None)] | | physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] | -| | CoalesceBatchesExec: target_batch_size=4096 | +| | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: time@4 >= 250 | | | RepartitionExec: partitioning=RoundRobinBatch(4) | | | UnionExec | diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index c8096ec041..f3f8aa6442 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -28,7 +28,7 @@ bytes = { version = "1", features = ["std"] } chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "iana-time-zone", "serde", "std", "winapi"] } crossbeam-utils = { version = "0.8", features = ["std"] } crypto-common = { version = "0.1", default-features = false, features = ["std"] } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "07f49803a3d7a9e9b3c2c9a7714c1bb08db71385", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] } +datafusion = { git = "https://github.com/alamb/arrow-datafusion.git", branch = "alamb/patched_for_iox", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] } digest = { version = "0.10", features = ["alloc", "block-buffer", "core-api", "mac", "std", "subtle"] } either = { version = "1", features = ["use_std"] } fixedbitset = { version = "0.4", features = ["std"] }
5639db7c84d5d5156df0fd49f7feb438e6a1917b
Dom Dwyer
2023-01-11 14:21:19
ref-link lack of 1.66.1 issue
Link to the upstream issue.
null
docs: ref-link lack of 1.66.1 issue Link to the upstream issue.
diff --git a/.circleci/config.yml b/.circleci/config.yml index a88021d286..77703f3ef6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -392,6 +392,8 @@ jobs: command: | COMMIT_SHA="$(git rev-parse HEAD)" + # FIXME: revert this override - https://github.com/rust-lang/docker-rust/issues/128 + # RUST_VERSION="$(sed -E -ne 's/channel = "(.*)"/\1/p' rust-toolchain.toml)" RUST_VERSION="1.66" docker buildx build \
f8b1d37d5a02643651571a02eb8f4a43e56f780f
Stuart Carnie
2023-05-05 10:11:37
Consistent alias names with InfluxQL OG
Fixes #7750
null
fix: Consistent alias names with InfluxQL OG Fixes #7750
diff --git a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected index 5430dbdd58..b8310946e3 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected @@ -1003,11 +1003,11 @@ tags: non_existent= +---------------------+-------+--------------------+--------------------+ -- InfluxQL: SELECT COUNT(f64), COUNT(f64) + COUNT(f64), COUNT(f64) * 3 FROM m0; name: m0 -+---------------------+-------+---------------------+-----------+ -| time | count | count_f64_count_f64 | count_f64 | -+---------------------+-------+---------------------+-----------+ -| 1970-01-01T00:00:00 | 7 | 14 | 21 | -+---------------------+-------+---------------------+-----------+ ++---------------------+-------+-------------+---------+ +| time | count | count_count | count_1 | ++---------------------+-------+-------------+---------+ +| 1970-01-01T00:00:00 | 7 | 14 | 21 | ++---------------------+-------+-------------+---------+ -- InfluxQL: SELECT COUNT(f64) as the_count, SUM(non_existent) as foo FROM m0; name: m0 +---------------------+-----------+-----+ @@ -1052,31 +1052,31 @@ name: disk +---------------------+------+--------+ -- InfluxQL: SELECT MEAN(usage_idle) + MEAN(bytes_free) FROM cpu, disk; name: cpu -+---------------------+---------------------------------+ -| time | mean_usage_idle_mean_bytes_free | -+---------------------+---------------------------------+ -| 1970-01-01T00:00:00 | | -+---------------------+---------------------------------+ ++---------------------+-----------+ +| time | mean_mean | ++---------------------+-----------+ +| 1970-01-01T00:00:00 | | ++---------------------+-----------+ name: disk -+---------------------+---------------------------------+ -| time | mean_usage_idle_mean_bytes_free | -+---------------------+---------------------------------+ -| 1970-01-01T00:00:00 | | -+---------------------+---------------------------------+ ++---------------------+-----------+ +| time | mean_mean | ++---------------------+-----------+ +| 1970-01-01T00:00:00 | | ++---------------------+-----------+ -- InfluxQL: SELECT MEAN(usage_idle) + MEAN(foo) FROM cpu; name: cpu -+---------------------+--------------------------+ -| time | mean_usage_idle_mean_foo | -+---------------------+--------------------------+ -| 1970-01-01T00:00:00 | | -+---------------------+--------------------------+ ++---------------------+-----------+ +| time | mean_mean | ++---------------------+-----------+ +| 1970-01-01T00:00:00 | | ++---------------------+-----------+ -- InfluxQL: SELECT MEAN(usage_idle), MEAN(usage_idle) + MEAN(foo) FROM cpu; name: cpu -+---------------------+--------------------+--------------------------+ -| time | mean | mean_usage_idle_mean_foo | -+---------------------+--------------------+--------------------------+ -| 1970-01-01T00:00:00 | 1.9850000000000003 | | -+---------------------+--------------------+--------------------------+ ++---------------------+--------------------+-----------+ +| time | mean | mean_mean | ++---------------------+--------------------+-----------+ +| 1970-01-01T00:00:00 | 1.9850000000000003 | | ++---------------------+--------------------+-----------+ -- InfluxQL: SELECT MEAN(foo) FROM cpu; ++ ++ @@ -1084,47 +1084,47 @@ name: cpu -- InfluxQL: SELECT MEAN(usage_idle) + MEAN(foo) FROM cpu GROUP BY cpu; name: cpu tags: cpu=cpu-total -+---------------------+--------------------------+ -| time | mean_usage_idle_mean_foo | -+---------------------+--------------------------+ -| 1970-01-01T00:00:00 | | -+---------------------+--------------------------+ ++---------------------+-----------+ +| time | mean_mean | ++---------------------+-----------+ +| 1970-01-01T00:00:00 | | ++---------------------+-----------+ name: cpu tags: cpu=cpu0 -+---------------------+--------------------------+ -| time | mean_usage_idle_mean_foo | -+---------------------+--------------------------+ -| 1970-01-01T00:00:00 | | -+---------------------+--------------------------+ ++---------------------+-----------+ +| time | mean_mean | ++---------------------+-----------+ +| 1970-01-01T00:00:00 | | ++---------------------+-----------+ name: cpu tags: cpu=cpu1 -+---------------------+--------------------------+ -| time | mean_usage_idle_mean_foo | -+---------------------+--------------------------+ -| 1970-01-01T00:00:00 | | -+---------------------+--------------------------+ ++---------------------+-----------+ +| time | mean_mean | ++---------------------+-----------+ +| 1970-01-01T00:00:00 | | ++---------------------+-----------+ -- InfluxQL: SELECT MEAN(usage_idle), MEAN(usage_idle) + MEAN(foo) FROM cpu GROUP BY cpu; name: cpu tags: cpu=cpu-total -+---------------------+--------------------+--------------------------+ -| time | mean | mean_usage_idle_mean_foo | -+---------------------+--------------------+--------------------------+ -| 1970-01-01T00:00:00 | 2.9850000000000003 | | -+---------------------+--------------------+--------------------------+ ++---------------------+--------------------+-----------+ +| time | mean | mean_mean | ++---------------------+--------------------+-----------+ +| 1970-01-01T00:00:00 | 2.9850000000000003 | | ++---------------------+--------------------+-----------+ name: cpu tags: cpu=cpu0 -+---------------------+-------+--------------------------+ -| time | mean | mean_usage_idle_mean_foo | -+---------------------+-------+--------------------------+ -| 1970-01-01T00:00:00 | 0.985 | | -+---------------------+-------+--------------------------+ ++---------------------+-------+-----------+ +| time | mean | mean_mean | ++---------------------+-------+-----------+ +| 1970-01-01T00:00:00 | 0.985 | | ++---------------------+-------+-----------+ name: cpu tags: cpu=cpu1 -+---------------------+--------------------+--------------------------+ -| time | mean | mean_usage_idle_mean_foo | -+---------------------+--------------------+--------------------------+ -| 1970-01-01T00:00:00 | 1.9849999999999999 | | -+---------------------+--------------------+--------------------------+ ++---------------------+--------------------+-----------+ +| time | mean | mean_mean | ++---------------------+--------------------+-----------+ +| 1970-01-01T00:00:00 | 1.9849999999999999 | | ++---------------------+--------------------+-----------+ -- InfluxQL: SELECT MEAN(foo) FROM cpu GROUP BY cpu; ++ ++ diff --git a/iox_query_influxql/src/plan/field.rs b/iox_query_influxql/src/plan/field.rs index c7cad73ccc..158636a550 100644 --- a/iox_query_influxql/src/plan/field.rs +++ b/iox_query_influxql/src/plan/field.rs @@ -66,7 +66,7 @@ impl<'a> Visitor for BinaryExprNameVisitor<'a> { fn pre_visit_call(self, n: &Call) -> Result<Recursion<Self>, Self::Error> { self.0.push(n.name.clone()); - Ok(Recursion::Continue(self)) + Ok(Recursion::Stop(self)) } } @@ -102,7 +102,7 @@ mod test { assert_eq!(field_name(&f), "count"); let f = get_first_field("SELECT COUNT(usage) + SUM(usage_idle) FROM cpu"); - assert_eq!(field_name(&f), "count_usage_sum_usage_idle"); + assert_eq!(field_name(&f), "count_sum"); let f = get_first_field("SELECT 1+2 FROM cpu"); assert_eq!(field_name(&f), ""); diff --git a/iox_query_influxql/src/plan/planner.rs b/iox_query_influxql/src/plan/planner.rs index 443e135cfd..11ec8fae99 100644 --- a/iox_query_influxql/src/plan/planner.rs +++ b/iox_query_influxql/src/plan/planner.rs @@ -3239,11 +3239,11 @@ mod test { // The `COUNT(f64_field)` aggregate is only projected ones in the Aggregate and reused in the projection assert_snapshot!(plan("SELECT COUNT(f64_field), COUNT(f64_field) + COUNT(f64_field), COUNT(f64_field) * 3 FROM data"), @r###" - Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N, count_f64_field_count_f64_field:Int64;N, count_f64_field:Int64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, COUNT(data.f64_field) AS count, COUNT(data.f64_field) + COUNT(data.f64_field) AS count_f64_field_count_f64_field, COUNT(data.f64_field) * Int64(3) AS count_f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N, count_f64_field_count_f64_field:Int64;N, count_f64_field:Int64;N] - Aggregate: groupBy=[[]], aggr=[[COUNT(data.f64_field)]] [COUNT(data.f64_field):Int64;N] - TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] - "###); + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N, count_count:Int64;N, count_1:Int64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, COUNT(data.f64_field) AS count, COUNT(data.f64_field) + COUNT(data.f64_field) AS count_count, COUNT(data.f64_field) * Int64(3) AS count_1 [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N, count_count:Int64;N, count_1:Int64;N] + Aggregate: groupBy=[[]], aggr=[[COUNT(data.f64_field)]] [COUNT(data.f64_field):Int64;N] + TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + "###); // non-existent tags are excluded from the Aggregate groupBy and Sort operators assert_snapshot!(plan("SELECT COUNT(f64_field) FROM data GROUP BY foo, non_existent"), @r###" @@ -3255,31 +3255,31 @@ mod test { // Aggregate expression is projected once and reused in final projection assert_snapshot!(plan("SELECT COUNT(f64_field), COUNT(f64_field) * 2 FROM data"), @r###" - Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N, count_f64_field:Int64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, COUNT(data.f64_field) AS count, COUNT(data.f64_field) * Int64(2) AS count_f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N, count_f64_field:Int64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N, count_1:Int64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, COUNT(data.f64_field) AS count, COUNT(data.f64_field) * Int64(2) AS count_1 [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N, count_1:Int64;N] Aggregate: groupBy=[[]], aggr=[[COUNT(data.f64_field)]] [COUNT(data.f64_field):Int64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); // Aggregate expression selecting non-existent field assert_snapshot!(plan("SELECT MEAN(f64_field) + MEAN(non_existent) FROM data"), @r###" - Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), mean_f64_field_mean_non_existent:Null;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, NULL AS mean_f64_field_mean_non_existent [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), mean_f64_field_mean_non_existent:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), mean_mean:Null;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, NULL AS mean_mean [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), mean_mean:Null;N] EmptyRelation [] "###); // Aggregate expression with GROUP BY and non-existent field assert_snapshot!(plan("SELECT MEAN(f64_field) + MEAN(non_existent) FROM data GROUP BY foo"), @r###" - Sort: foo ASC NULLS LAST, time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, mean_f64_field_mean_non_existent:Null;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, data.foo AS foo, NULL AS mean_f64_field_mean_non_existent [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, mean_f64_field_mean_non_existent:Null;N] + Sort: foo ASC NULLS LAST, time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, mean_mean:Null;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, data.foo AS foo, NULL AS mean_mean [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, mean_mean:Null;N] Aggregate: groupBy=[[data.foo]], aggr=[[]] [foo:Dictionary(Int32, Utf8);N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); // Aggregate expression selecting tag, should treat as non-existent assert_snapshot!(plan("SELECT MEAN(f64_field), MEAN(f64_field) + MEAN(non_existent) FROM data"), @r###" - Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), mean:Float64;N, mean_f64_field_mean_non_existent:Null;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, AVG(data.f64_field) AS mean, NULL AS mean_f64_field_mean_non_existent [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), mean:Float64;N, mean_f64_field_mean_non_existent:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), mean:Float64;N, mean_mean:Null;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, AVG(data.f64_field) AS mean, NULL AS mean_mean [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), mean:Float64;N, mean_mean:Null;N] Aggregate: groupBy=[[]], aggr=[[AVG(data.f64_field)]] [AVG(data.f64_field):Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); @@ -3396,8 +3396,8 @@ mod test { // Aggregates as part of a binary expression assert_snapshot!(plan("SELECT COUNT(f64_field) + MEAN(f64_field) FROM data GROUP BY TIME(10s) FILL(3.2)"), @r###" - Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, count_f64_field_mean_f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, time, coalesce_struct(COUNT(data.f64_field), Int64(3)) + coalesce_struct(AVG(data.f64_field), Float64(3.2)) AS count_f64_field_mean_f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, count_f64_field_mean_f64_field:Float64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, count_mean:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, time, coalesce_struct(COUNT(data.f64_field), Int64(3)) + coalesce_struct(AVG(data.f64_field), Float64(3.2)) AS count_mean [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, count_mean:Float64;N] GapFill: groupBy=[[time]], aggr=[[COUNT(data.f64_field), AVG(data.f64_field)]], time_column=time, stride=IntervalMonthDayNano("10000000000"), range=Unbounded..Excluded(now()) [time:Timestamp(Nanosecond, None);N, COUNT(data.f64_field):Int64;N, AVG(data.f64_field):Float64;N] Aggregate: groupBy=[[datebin(IntervalMonthDayNano("10000000000"), data.time, TimestampNanosecond(0, None)) AS time]], aggr=[[COUNT(data.f64_field), AVG(data.f64_field)]] [time:Timestamp(Nanosecond, None);N, COUNT(data.f64_field):Int64;N, AVG(data.f64_field):Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N]
0c347e8e640e331d6e677369bf37de8e385a321d
Marco Neumann
2023-07-20 10:31:49
batch partition catalog requests in querier (#8269)
This is mostly wiring that builds on top of the other PRs linked to #8089. I think we eventually could make the batching code nicer by adding better wrappers / helpers, but lets do that if we have other batched caches and this patterns proofs to be useful. Closes #8089.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
feat: batch partition catalog requests in querier (#8269) This is mostly wiring that builds on top of the other PRs linked to #8089. I think we eventually could make the batching code nicer by adding better wrappers / helpers, but lets do that if we have other batched caches and this patterns proofs to be useful. Closes #8089. Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/querier/src/cache/partition.rs b/querier/src/cache/partition.rs index fd5fa52cd3..0e79fd26a3 100644 --- a/querier/src/cache/partition.rs +++ b/querier/src/cache/partition.rs @@ -8,7 +8,11 @@ use cache_system::{ PolicyBackend, }, cache::{driver::CacheDriver, metrics::CacheWithMetrics, Cache}, - loader::{metrics::MetricsLoader, FunctionLoader}, + loader::{ + batch::{BatchLoader, BatchLoaderFlusher, BatchLoaderFlusherExt}, + metrics::MetricsLoader, + FunctionLoader, + }, resource_consumption::FunctionEstimator, }; use data_types::{ @@ -16,17 +20,17 @@ use data_types::{ ColumnId, Partition, PartitionId, TransitionPartitionId, }; use datafusion::scalar::ScalarValue; -use iox_catalog::{interface::Catalog, partition_lookup}; +use iox_catalog::{interface::Catalog, partition_lookup_batch}; use iox_query::chunk_statistics::{ColumnRange, ColumnRanges}; use iox_time::TimeProvider; use observability_deps::tracing::debug; use schema::sort::SortKey; use std::{ - collections::{HashMap, HashSet}, + collections::{hash_map::Entry, HashMap, HashSet}, mem::{size_of, size_of_val}, sync::Arc, }; -use trace::span::Span; +use trace::span::{Span, SpanRecorder}; use super::{namespace::CachedTable, ram::RamSize}; @@ -46,6 +50,7 @@ type CacheT = Box< pub struct PartitionCache { cache: CacheT, remove_if_handle: RemoveIfHandle<PartitionId, Option<CachedPartition>>, + flusher: Arc<dyn BatchLoaderFlusher>, } impl PartitionCache { @@ -58,24 +63,61 @@ impl PartitionCache { ram_pool: Arc<ResourcePool<RamSize>>, testing: bool, ) -> Self { - let loader = - FunctionLoader::new(move |partition_id: PartitionId, extra: Arc<CachedTable>| { + let loader = FunctionLoader::new( + move |partition_ids: Vec<PartitionId>, cached_tables: Vec<Arc<CachedTable>>| { + // sanity checks + assert_eq!(partition_ids.len(), cached_tables.len()); + assert!(!partition_ids.is_empty()); + let cached_table = Arc::clone(&cached_tables[0]); + assert!(cached_tables.iter().all(|t| Arc::ptr_eq(t, &cached_table))); + let catalog = Arc::clone(&catalog); let backoff_config = backoff_config.clone(); async move { - let partition = Backoff::new(&backoff_config) + // prepare output buffer + let mut out = (0..partition_ids.len()).map(|_| None).collect::<Vec<_>>(); + let mut out_map = + HashMap::<PartitionId, usize>::with_capacity(partition_ids.len()); + for (idx, id) in partition_ids.iter().enumerate() { + match out_map.entry(*id) { + Entry::Occupied(_) => unreachable!("cache system requested same partition from loader concurrently, this should have been prevented by the CacheDriver"), + Entry::Vacant(v) => { + v.insert(idx); + } + } + } + + // build `&[&TransitionPartitionId]` for batch catalog request + let ids = partition_ids + .iter() + .copied() + .map(TransitionPartitionId::Deprecated) + .collect::<Vec<_>>(); + let ids = ids.iter().collect::<Vec<_>>(); + + // fetch catalog data + let partitions = Backoff::new(&backoff_config) .retry_all_errors("get partition_key", || async { let mut repos = catalog.repositories().await; - let id = TransitionPartitionId::Deprecated(partition_id); - partition_lookup(repos.as_mut(), &id).await + partition_lookup_batch(repos.as_mut(), &ids).await }) .await - .expect("retry forever")?; + .expect("retry forever"); - Some(CachedPartition::new(partition, &extra)) + // build output + for p in partitions { + let p = CachedPartition::new(p, &cached_table); + let idx = out_map[&p.id]; + out[idx] = Some(p); + } + + out } - }); + }, + ); + let loader = Arc::new(BatchLoader::new(loader)); + let flusher = Arc::clone(&loader); let loader = Arc::new(MetricsLoader::new( loader, CACHE_ID, @@ -111,51 +153,79 @@ impl PartitionCache { Self { cache, remove_if_handle, + flusher, } } /// Get cached partition. /// + /// The result only contains existing partitions. The order is undefined. + /// /// Expire partition if the cached sort key does NOT cover the given set of columns. pub async fn get( &self, cached_table: Arc<CachedTable>, - partition_id: PartitionId, - sort_key_should_cover: &[ColumnId], + partitions: Vec<PartitionRequest>, span: Option<Span>, - ) -> Option<CachedPartition> { - self.remove_if_handle - .remove_if_and_get( - &self.cache, - partition_id, - |cached_partition| { - let invalidates = - if let Some(sort_key) = &cached_partition.and_then(|p| p.sort_key) { - sort_key_should_cover - .iter() - .any(|col| !sort_key.column_set.contains(col)) - } else { - // no sort key at all => need to update if there is anything to cover - !sort_key_should_cover.is_empty() - }; - - if invalidates { - debug!( - partition_id = partition_id.get(), - "invalidate partition cache", - ); - } + ) -> Vec<CachedPartition> { + let span_recorder = SpanRecorder::new(span); + + let futures = partitions + .into_iter() + .map( + |PartitionRequest { + partition_id, + sort_key_should_cover, + }| { + let cached_table = Arc::clone(&cached_table); + let span = span_recorder.child_span("single partition cache lookup"); + + self.remove_if_handle.remove_if_and_get( + &self.cache, + partition_id, + move |cached_partition| { + let invalidates = if let Some(sort_key) = + &cached_partition.and_then(|p| p.sort_key) + { + sort_key_should_cover + .iter() + .any(|col| !sort_key.column_set.contains(col)) + } else { + // no sort key at all => need to update if there is anything to cover + !sort_key_should_cover.is_empty() + }; + + if invalidates { + debug!( + partition_id = partition_id.get(), + "invalidate partition cache", + ); + } - invalidates + invalidates + }, + (cached_table, span), + ) }, - (cached_table, span), ) - .await + .collect(); + + let res = self.flusher.auto_flush(futures).await; + + res.into_iter().flatten().collect() } } +/// Request for [`PartitionCache::get`]. +#[derive(Debug)] +pub struct PartitionRequest { + pub partition_id: PartitionId, + pub sort_key_should_cover: Vec<ColumnId>, +} + #[derive(Debug, Clone, PartialEq, Eq)] pub struct CachedPartition { + pub id: PartitionId, pub sort_key: Option<Arc<PartitionSortKey>>, pub column_ranges: ColumnRanges, } @@ -231,6 +301,7 @@ impl CachedPartition { column_ranges.shrink_to_fit(); Self { + id: partition.id, sort_key, column_ranges: Arc::new(column_ranges), } @@ -298,6 +369,7 @@ mod tests { use crate::cache::{ ram::test_util::test_ram_pool, test_util::assert_catalog_access_metric_count, }; + use async_trait::async_trait; use data_types::{partition_template::TablePartitionTemplateOverride, ColumnType}; use generated_types::influxdata::iox::partition_template::v1::{ template_part::Part, PartitionTemplate, TemplatePart, @@ -348,7 +420,7 @@ mod tests { ); let sort_key1a = cache - .get(Arc::clone(&cached_table), p1.id, &Vec::new(), None) + .get_one(Arc::clone(&cached_table), p1.id, &Vec::new(), None) .await .unwrap() .sort_key; @@ -360,18 +432,26 @@ mod tests { column_order: [c1.column.id, c2.column.id].into(), } ); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 1); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 1, + ); let sort_key2 = cache - .get(Arc::clone(&cached_table), p2.id, &Vec::new(), None) + .get_one(Arc::clone(&cached_table), p2.id, &Vec::new(), None) .await .unwrap() .sort_key; assert_eq!(sort_key2, None); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 2); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 2, + ); let sort_key1b = cache - .get(Arc::clone(&cached_table), p1.id, &Vec::new(), None) + .get_one(Arc::clone(&cached_table), p1.id, &Vec::new(), None) .await .unwrap() .sort_key; @@ -379,12 +459,16 @@ mod tests { sort_key1a.as_ref().unwrap(), sort_key1b.as_ref().unwrap() )); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 2); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 2, + ); // non-existing partition for _ in 0..2 { let res = cache - .get( + .get_one( Arc::clone(&cached_table), PartitionId::new(i64::MAX), &Vec::new(), @@ -392,7 +476,11 @@ mod tests { ) .await; assert_eq!(res, None); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 3); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 3, + ); } } @@ -461,7 +549,7 @@ mod tests { ); let ranges1a = cache - .get(Arc::clone(&cached_table), p1.id, &[], None) + .get_one(Arc::clone(&cached_table), p1.id, &[], None) .await .unwrap() .column_ranges; @@ -488,10 +576,14 @@ mod tests { &ranges1a.get("tag1").unwrap().min_value, &ranges1a.get("tag1").unwrap().max_value, )); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 1); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 1, + ); let ranges2 = cache - .get(Arc::clone(&cached_table), p2.id, &[], None) + .get_one(Arc::clone(&cached_table), p2.id, &[], None) .await .unwrap() .column_ranges; @@ -505,10 +597,14 @@ mod tests { } ),]), ); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 2); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 2, + ); let ranges3 = cache - .get(Arc::clone(&cached_table), p3.id, &[], None) + .get_one(Arc::clone(&cached_table), p3.id, &[], None) .await .unwrap() .column_ranges; @@ -531,10 +627,14 @@ mod tests { ), ]), ); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 3); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 3, + ); let ranges4 = cache - .get(Arc::clone(&cached_table), p4.id, &[], None) + .get_one(Arc::clone(&cached_table), p4.id, &[], None) .await .unwrap() .column_ranges; @@ -557,10 +657,14 @@ mod tests { ), ]), ); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 4); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 4, + ); let ranges5 = cache - .get(Arc::clone(&cached_table), p5.id, &[], None) + .get_one(Arc::clone(&cached_table), p5.id, &[], None) .await .unwrap() .column_ranges; @@ -574,20 +678,28 @@ mod tests { } ),]), ); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 5); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 5, + ); let ranges1b = cache - .get(Arc::clone(&cached_table), p1.id, &[], None) + .get_one(Arc::clone(&cached_table), p1.id, &[], None) .await .unwrap() .column_ranges; assert!(Arc::ptr_eq(&ranges1a, &ranges1b)); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 5); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 5, + ); // non-existing partition for _ in 0..2 { let res = cache - .get( + .get_one( Arc::clone(&cached_table), PartitionId::new(i64::MAX), &[], @@ -595,7 +707,11 @@ mod tests { ) .await; assert_eq!(res, None); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 6); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 6, + ); } } @@ -635,31 +751,43 @@ mod tests { ); let sort_key = cache - .get(Arc::clone(&cached_table), p_id, &[], None) + .get_one(Arc::clone(&cached_table), p_id, &[], None) .await .unwrap() .sort_key; assert_eq!(sort_key, None,); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 1); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 1, + ); // requesting nother will not expire assert!(p_sort_key.is_none()); let sort_key = cache - .get(Arc::clone(&cached_table), p_id, &[], None) + .get_one(Arc::clone(&cached_table), p_id, &[], None) .await .unwrap() .sort_key; assert_eq!(sort_key, None,); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 1); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 1, + ); // but requesting something will expire let sort_key = cache - .get(Arc::clone(&cached_table), p_id, &[c1.column.id], None) + .get_one(Arc::clone(&cached_table), p_id, &[c1.column.id], None) .await .unwrap() .sort_key; assert_eq!(sort_key, None,); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 2); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 2, + ); // set sort key let p = p @@ -668,11 +796,12 @@ mod tests { c2.column.name.as_str(), ])) .await; + assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 1); // expire & fetch let p_sort_key = p.partition.sort_key(); let sort_key = cache - .get(Arc::clone(&cached_table), p_id, &[c1.column.id], None) + .get_one(Arc::clone(&cached_table), p_id, &[c1.column.id], None) .await .unwrap() .sort_key; @@ -684,7 +813,11 @@ mod tests { column_order: [c1.column.id, c2.column.id].into(), } ); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 4); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 3, + ); // subsets and the full key don't expire for should_cover in [ @@ -694,7 +827,7 @@ mod tests { vec![c1.column.id, c2.column.id], ] { let sort_key_2 = cache - .get(Arc::clone(&cached_table), p_id, &should_cover, None) + .get_one(Arc::clone(&cached_table), p_id, &should_cover, None) .await .unwrap() .sort_key; @@ -702,13 +835,17 @@ mod tests { sort_key.as_ref().unwrap(), sort_key_2.as_ref().unwrap() )); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 4); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 3, + ); } // unknown columns expire let c3 = t.create_column("x", ColumnType::Tag).await; let sort_key_2 = cache - .get( + .get_one( Arc::clone(&cached_table), p_id, &[c1.column.id, c3.column.id], @@ -722,10 +859,109 @@ mod tests { sort_key_2.as_ref().unwrap() )); assert_eq!(sort_key, sort_key_2); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 5); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 4, + ); + } + + #[tokio::test] + async fn test_multi_get() { + let catalog = TestCatalog::new(); + + let ns = catalog.create_namespace_1hr_retention("ns").await; + let t = ns.create_table("table").await; + let p1 = t.create_partition("k1").await.partition.clone(); + let p2 = t.create_partition("k2").await.partition.clone(); + let cached_table = Arc::new(CachedTable { + id: t.table.id, + schema: schema(), + column_id_map: HashMap::default(), + column_id_map_rev: HashMap::default(), + primary_key_column_ids: [].into(), + partition_template: TablePartitionTemplateOverride::default(), + }); + + let cache = PartitionCache::new( + catalog.catalog(), + BackoffConfig::default(), + catalog.time_provider(), + &catalog.metric_registry(), + test_ram_pool(), + true, + ); + + let mut res = cache + .get( + Arc::clone(&cached_table), + vec![ + PartitionRequest { + partition_id: p1.id, + sort_key_should_cover: vec![], + }, + PartitionRequest { + partition_id: p2.id, + sort_key_should_cover: vec![], + }, + PartitionRequest { + partition_id: p1.id, + sort_key_should_cover: vec![], + }, + PartitionRequest { + partition_id: PartitionId::new(i64::MAX), + sort_key_should_cover: vec![], + }, + ], + None, + ) + .await; + res.sort_by_key(|p| p.id); + let ids = res.iter().map(|p| p.id).collect::<Vec<_>>(); + assert_eq!(ids, vec![p1.id, p1.id, p2.id]); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 1, + ); } fn schema() -> Schema { SchemaBuilder::new().build().unwrap() } + + /// Extension methods for simpler testing. + #[async_trait] + trait PartitionCacheExt { + async fn get_one( + &self, + cached_table: Arc<CachedTable>, + partition_id: PartitionId, + sort_key_should_cover: &[ColumnId], + span: Option<Span>, + ) -> Option<CachedPartition>; + } + + #[async_trait] + impl PartitionCacheExt for PartitionCache { + async fn get_one( + &self, + cached_table: Arc<CachedTable>, + partition_id: PartitionId, + sort_key_should_cover: &[ColumnId], + span: Option<Span>, + ) -> Option<CachedPartition> { + self.get( + cached_table, + vec![PartitionRequest { + partition_id, + sort_key_should_cover: sort_key_should_cover.to_vec(), + }], + span, + ) + .await + .into_iter() + .next() + } + } } diff --git a/querier/src/parquet/mod.rs b/querier/src/parquet/mod.rs index fe75fc3064..c3794a82dc 100644 --- a/querier/src/parquet/mod.rs +++ b/querier/src/parquet/mod.rs @@ -106,6 +106,7 @@ pub mod tests { use crate::cache::{ namespace::{CachedNamespace, CachedTable}, + partition::PartitionRequest, CatalogCache, }; @@ -249,11 +250,15 @@ pub mod tests { .partition() .get( Arc::clone(&self.cached_table), - self.parquet_file.partition_id, - &[], + vec![PartitionRequest { + partition_id: self.parquet_file.partition_id, + sort_key_should_cover: vec![], + }], None, ) .await + .into_iter() + .next() .unwrap(); let cached_partitions = HashMap::from([(self.parquet_file.partition_id, cached_partition)]); diff --git a/querier/src/table/mod.rs b/querier/src/table/mod.rs index add7d855b9..52750ec47e 100644 --- a/querier/src/table/mod.rs +++ b/querier/src/table/mod.rs @@ -1,17 +1,19 @@ use self::query_access::QuerierTableChunkPruner; use crate::{ - cache::{namespace::CachedTable, partition::CachedPartition}, + cache::{ + namespace::CachedTable, + partition::{CachedPartition, PartitionRequest}, + }, ingester::{self, IngesterPartition}, parquet::ChunkAdapter, - IngesterConnection, CONCURRENT_CHUNK_CREATION_JOBS, + IngesterConnection, }; use data_types::{ColumnId, NamespaceId, ParquetFile, PartitionId, TableId}; use datafusion::error::DataFusionError; -use futures::{join, StreamExt}; +use futures::join; use iox_query::{provider, provider::ChunkPruner, QueryChunk}; use observability_deps::tracing::{debug, trace}; use predicate::Predicate; -use rand::{rngs::StdRng, seq::SliceRandom, SeedableRng}; use schema::Schema; use snafu::{ResultExt, Snafu}; use std::{ @@ -345,33 +347,26 @@ impl QuerierTable { .extend(f.column_set.iter().copied().filter(|id| pk.contains(id))); } - // shuffle order to even catalog load, because cache hits/misses might be correlated w/ the order of the - // partitions. - // - // Note that we sort before shuffling to achieve a deterministic pseudo-random order - let mut partitions = should_cover.into_iter().collect::<Vec<_>>(); - let mut rng = StdRng::seed_from_u64(cached_table.id.get() as u64); - partitions.sort_by(|(a_p_id, _a_cols), (b_p_id, _b_cols)| a_p_id.cmp(b_p_id)); - partitions.shuffle(&mut rng); - - futures::stream::iter(partitions) - .map(|(p_id, cover)| { - let catalog_cache = self.chunk_adapter.catalog_cache(); - let span = span_recorder.child_span("fetch partition"); - - async move { - let cover = cover.into_iter().collect::<Vec<_>>(); - let cached_partition = catalog_cache - .partition() - .get(Arc::clone(cached_table), p_id, &cover, span) - .await; - cached_partition.map(|p| (p_id, p)) - } + // batch request all partitions + let requests = should_cover + .into_iter() + .map(|(id, cover)| PartitionRequest { + partition_id: id, + sort_key_should_cover: cover.into_iter().collect(), }) - .buffer_unordered(CONCURRENT_CHUNK_CREATION_JOBS) - .filter_map(|x| async move { x }) - .collect::<HashMap<_, _>>() - .await + .collect(); + let partitions = self + .chunk_adapter + .catalog_cache() + .partition() + .get( + Arc::clone(cached_table), + requests, + span_recorder.child_span("fetch partitions"), + ) + .await; + + partitions.into_iter().map(|p| (p.id, p)).collect() } /// Get a chunk pruner that can be used to prune chunks retrieved via [`chunks`](Self::chunks) @@ -891,12 +886,22 @@ mod tests { let chunks = querier_table.chunks().await.unwrap(); assert_eq!(chunks.len(), 5); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 6); + assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 4); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 1, + ); assert_cache_access_metric_count(&catalog.metric_registry, "partition", 2); let chunks = querier_table.chunks().await.unwrap(); assert_eq!(chunks.len(), 5); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 6); + assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 4); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 1, + ); assert_cache_access_metric_count(&catalog.metric_registry, "partition", 4); partition_2 @@ -904,12 +909,22 @@ mod tests { TestParquetFileBuilder::default().with_line_protocol("table,tag1=a foo=1,bar=1 11"), ) .await; - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 7); + assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 5); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 1, + ); // file not visible yet let chunks = querier_table.chunks().await.unwrap(); assert_eq!(chunks.len(), 5); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 7); + assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 5); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 1, + ); assert_cache_access_metric_count(&catalog.metric_registry, "partition", 6); // change inster ID => invalidates cache @@ -918,7 +933,12 @@ mod tests { .with_ingester_partition(ingester_partition_builder.build()); let chunks = querier_table.chunks().await.unwrap(); assert_eq!(chunks.len(), 6); - assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 8); + assert_catalog_access_metric_count(&catalog.metric_registry, "partition_get_by_id", 5); + assert_catalog_access_metric_count( + &catalog.metric_registry, + "partition_get_by_id_batch", + 2, + ); assert_cache_access_metric_count(&catalog.metric_registry, "partition", 8); }
74b1a5e368abe756c114fe4dbfd081c5257aa38e
Marco Neumann
2023-09-19 13:53:30
allow streaming record batches into query
For #8350, we won't have all the record batches from the ingester during planning but we'll stream them during the execution. Technically the DF plan is already based on streams, it's just `QueryChunkData` that required a materialized `Vec<RecordBatch>`. This change moves the stream creation up so a chunk can decide to either use `QueryChunkData::in_mem` (which conveniently creates the stream) or it can provide its own stream.
null
refactor: allow streaming record batches into query For #8350, we won't have all the record batches from the ingester during planning but we'll stream them during the execution. Technically the DF plan is already based on streams, it's just `QueryChunkData` that required a materialized `Vec<RecordBatch>`. This change moves the stream creation up so a chunk can decide to either use `QueryChunkData::in_mem` (which conveniently creates the stream) or it can provide its own stream.
diff --git a/ingester/src/query_adaptor.rs b/ingester/src/query_adaptor.rs index 0bbfd55651..4d10cb7b10 100644 --- a/ingester/src/query_adaptor.rs +++ b/ingester/src/query_adaptor.rs @@ -139,11 +139,12 @@ impl QueryChunk for QueryAdaptor { fn data(&self) -> QueryChunkData { let schema = self.schema().as_arrow(); - QueryChunkData::RecordBatches( + QueryChunkData::in_mem( self.data .iter() .map(|b| ensure_schema(&schema, b).expect("schema handling broken")) .collect(), + Arc::clone(self.schema.inner()), ) } diff --git a/iox_query/src/lib.rs b/iox_query/src/lib.rs index 258dc77d39..4ef0eebe2b 100644 --- a/iox_query/src/lib.rs +++ b/iox_query/src/lib.rs @@ -12,18 +12,20 @@ unused_crate_dependencies )] +use datafusion_util::MemoryStream; +use futures::TryStreamExt; // Workaround for "unused crate" lint false positives. use workspace_hack as _; use arrow::{ - datatypes::{DataType, Field}, + datatypes::{DataType, Field, SchemaRef}, record_batch::RecordBatch, }; use async_trait::async_trait; use data_types::{ChunkId, ChunkOrder, TransitionPartitionId}; use datafusion::{ error::DataFusionError, - physical_plan::Statistics, + physical_plan::{SendableRecordBatchStream, Statistics}, prelude::{Expr, SessionContext}, }; use exec::IOxSessionContext; @@ -189,12 +191,9 @@ pub trait QueryNamespace: Debug + Send + Sync { } /// Raw data of a [`QueryChunk`]. -#[derive(Debug, Clone)] pub enum QueryChunkData { - /// In-memory record batches. - /// - /// **IMPORTANT: All batches MUST have the schema that the [chunk reports](QueryChunk::schema).** - RecordBatches(Vec<RecordBatch>), + /// Record batches. + RecordBatches(SendableRecordBatchStream), /// Parquet file. /// @@ -210,7 +209,7 @@ impl QueryChunkData { session_ctx: &SessionContext, ) -> Vec<RecordBatch> { match self { - Self::RecordBatches(batches) => batches, + Self::RecordBatches(batches) => batches.try_collect::<Vec<_>>().await.unwrap(), Self::Parquet(exec_input) => exec_input .read_to_batches(schema.as_arrow(), Projection::All, session_ctx) .await @@ -218,11 +217,19 @@ impl QueryChunkData { } } - /// Extract [record batches](Self::RecordBatches) variant. - pub fn into_record_batches(self) -> Option<Vec<RecordBatch>> { + /// Create data based on batches and schema. + pub fn in_mem(batches: Vec<RecordBatch>, schema: SchemaRef) -> Self { + let s = MemoryStream::new_with_schema(batches, schema); + let s: SendableRecordBatchStream = Box::pin(s); + Self::RecordBatches(s) + } +} + +impl std::fmt::Debug for QueryChunkData { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Self::RecordBatches(batches) => Some(batches), - Self::Parquet(_) => None, + Self::RecordBatches(_) => f.debug_tuple("RecordBatches").field(&"<stream>").finish(), + Self::Parquet(input) => f.debug_tuple("Parquet").field(input).finish(), } } } diff --git a/iox_query/src/provider/adapter.rs b/iox_query/src/provider/adapter.rs index f7ec713ea9..5d928852d4 100644 --- a/iox_query/src/provider/adapter.rs +++ b/iox_query/src/provider/adapter.rs @@ -19,16 +19,6 @@ use futures::Stream; #[allow(clippy::enum_variant_names)] #[derive(Debug, Snafu)] pub enum Error { - #[snafu(display( - "Internal error creating SchemaAdapterStream: field '{}' does not appear in the output schema, known fields are: {:?}", - field_name, - known_fields, - ))] - InternalLostInputField { - field_name: String, - known_fields: Vec<String>, - }, - #[snafu(display("Internal error creating SchemaAdapterStream: input field '{}' had type '{:?}' which is different than output field '{}' which had type '{:?}'", input_field_name, input_field_type, output_field_name, output_field_type,))] InternalDataTypeMismatch { @@ -152,29 +142,6 @@ impl SchemaAdapterStream { }) .collect::<Vec<_>>(); - // sanity logic checks - for input_field in input_schema.fields() { - // that there are no fields in the input schema that are - // not present in the desired output schema (otherwise we - // are dropping fields -- theys should have been selected - // out with projection push down) - if !output_schema - .fields() - .iter() - .any(|output_field| input_field.name() == output_field.name()) - { - return InternalLostInputFieldSnafu { - field_name: input_field.name(), - known_fields: output_schema - .fields() - .iter() - .map(|f| f.name().clone()) - .collect::<Vec<String>>(), - } - .fail(); - } - } - // Verify the mappings match the output type for (output_index, mapping) in mappings.iter().enumerate() { let output_field = output_schema.field(output_index); @@ -417,17 +384,27 @@ mod tests { Field::new("a", DataType::Int32, false), ])); let input_stream = stream_from_batch(batch.schema(), batch); - let res = SchemaAdapterStream::try_new( + let adapter_stream = SchemaAdapterStream::try_new( input_stream, output_schema, &Default::default(), baseline_metrics(), - ); + ) + .unwrap(); - assert_contains!( - res.unwrap_err().to_string(), - "field 'b' does not appear in the output schema" - ); + let output = collect(Box::pin(adapter_stream)) + .await + .expect("Running plan"); + let expected = vec![ + "+-----+---+", + "| c | a |", + "+-----+---+", + "| foo | 1 |", + "| bar | 2 |", + "| baz | 3 |", + "+-----+---+", + ]; + assert_batches_eq!(&expected, &output); } #[tokio::test] diff --git a/iox_query/src/provider/record_batch_exec.rs b/iox_query/src/provider/record_batch_exec.rs index a1007fb45b..9b3c591eda 100644 --- a/iox_query/src/provider/record_batch_exec.rs +++ b/iox_query/src/provider/record_batch_exec.rs @@ -9,7 +9,6 @@ use datafusion::{ execution::context::TaskContext, physical_plan::{ expressions::{Column, PhysicalSortExpr}, - memory::MemoryStream, metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet}, ColumnStatistics, DisplayAs, DisplayFormatType, ExecutionPlan, Partitioning, SendableRecordBatchStream, Statistics, @@ -18,11 +17,7 @@ use datafusion::{ }; use observability_deps::tracing::trace; use schema::sort::SortKey; -use std::{ - collections::{HashMap, HashSet}, - fmt, - sync::Arc, -}; +use std::{collections::HashMap, fmt, sync::Arc}; /// Implements the DataFusion physical plan interface for [`RecordBatch`]es with automatic projection and NULL-column creation. /// @@ -178,47 +173,22 @@ impl ExecutionPlan for RecordBatchesExec { let schema = self.schema(); let chunk = &self.chunks[partition]; - let part_schema = chunk.schema().as_arrow(); - - // The output selection is all the columns in the schema. - // - // However, this chunk may not have all those columns. Thus we - // restrict the requested selection to the actual columns - // available, and use SchemaAdapterStream to pad the rest of - // the columns with NULLs if necessary - let final_output_column_names: HashSet<_> = - schema.fields().iter().map(|f| f.name()).collect(); - let projection: Vec<_> = part_schema - .fields() - .iter() - .enumerate() - .filter(|(_idx, field)| final_output_column_names.contains(field.name())) - .map(|(idx, _)| idx) - .collect(); - let projection = (!((projection.len() == part_schema.fields().len()) - && (projection.iter().enumerate().all(|(a, b)| a == *b)))) - .then_some(projection); - let incomplete_output_schema = projection - .as_ref() - .map(|projection| Arc::new(part_schema.project(projection).expect("projection broken"))) - .unwrap_or(part_schema); - - let batches = chunk.data().into_record_batches().ok_or_else(|| { - DataFusionError::Execution(String::from("chunk must contain record batches")) - })?; - - let stream = Box::pin(MemoryStream::try_new( - batches.clone(), - incomplete_output_schema, - projection, - )?); + + let stream = match chunk.data() { + crate::QueryChunkData::RecordBatches(stream) => stream, + crate::QueryChunkData::Parquet(_) => { + return Err(DataFusionError::Execution(String::from( + "chunk must contain record batches", + ))); + } + }; let virtual_columns = HashMap::from([( CHUNK_ORDER_COLUMN_NAME, ScalarValue::from(chunk.order().get()), )]); let adapter = Box::pin( SchemaAdapterStream::try_new(stream, schema, &virtual_columns, baseline_metrics) - .map_err(|e| DataFusionError::Internal(e.to_string()))?, + .map_err(|e| DataFusionError::External(Box::new(e)))?, ); trace!(partition, "End RecordBatchesExec::execute"); diff --git a/iox_query/src/test.rs b/iox_query/src/test.rs index d6e07b523b..05ffd796da 100644 --- a/iox_query/src/test.rs +++ b/iox_query/src/test.rs @@ -282,6 +282,12 @@ impl TableProvider for TestDatabaseTableProvider { } } +#[derive(Debug)] +enum TestChunkData { + RecordBatches(Vec<RecordBatch>), + Parquet(ParquetExecInput), +} + #[derive(Debug)] pub struct TestChunk { /// Table name @@ -302,7 +308,7 @@ pub struct TestChunk { may_contain_pk_duplicates: bool, /// Data in this chunk. - table_data: QueryChunkData, + table_data: TestChunkData, /// A saved error that is returned instead of actual results saved_error: Option<String>, @@ -372,7 +378,7 @@ impl TestChunk { num_rows: None, id: ChunkId::new_test(0), may_contain_pk_duplicates: Default::default(), - table_data: QueryChunkData::RecordBatches(vec![]), + table_data: TestChunkData::RecordBatches(vec![]), saved_error: Default::default(), order: ChunkOrder::MIN, sort_key: None, @@ -383,10 +389,10 @@ impl TestChunk { fn push_record_batch(&mut self, batch: RecordBatch) { match &mut self.table_data { - QueryChunkData::RecordBatches(batches) => { + TestChunkData::RecordBatches(batches) => { batches.push(batch); } - QueryChunkData::Parquet(_) => panic!("chunk is parquet-based"), + TestChunkData::Parquet(_) => panic!("chunk is parquet-based"), } } @@ -403,14 +409,14 @@ impl TestChunk { pub fn with_dummy_parquet_file_and_store(self, store: &str) -> Self { match self.table_data { - QueryChunkData::RecordBatches(batches) => { + TestChunkData::RecordBatches(batches) => { assert!(batches.is_empty(), "chunk already has record batches"); } - QueryChunkData::Parquet(_) => panic!("chunk already has a file"), + TestChunkData::Parquet(_) => panic!("chunk already has a file"), } Self { - table_data: QueryChunkData::Parquet(ParquetExecInput { + table_data: TestChunkData::Parquet(ParquetExecInput { object_store_url: ObjectStoreUrl::parse(store).unwrap(), object_meta: ObjectMeta { location: Self::parquet_location(self.id), @@ -436,7 +442,7 @@ impl TestChunk { pub fn with_id(mut self, id: u128) -> Self { self.id = ChunkId::new_test(id); - if let QueryChunkData::Parquet(parquet_input) = &mut self.table_data { + if let TestChunkData::Parquet(parquet_input) = &mut self.table_data { parquet_input.object_meta.location = Self::parquet_location(self.id); } @@ -1078,7 +1084,12 @@ impl QueryChunk for TestChunk { fn data(&self) -> QueryChunkData { self.check_error().unwrap(); - self.table_data.clone() + match &self.table_data { + TestChunkData::RecordBatches(batches) => { + QueryChunkData::in_mem(batches.clone(), Arc::clone(self.schema.inner())) + } + TestChunkData::Parquet(input) => QueryChunkData::Parquet(input.clone()), + } } fn chunk_type(&self) -> &str { diff --git a/querier/src/ingester/mod.rs b/querier/src/ingester/mod.rs index a62d598150..36db5d34f5 100644 --- a/querier/src/ingester/mod.rs +++ b/querier/src/ingester/mod.rs @@ -954,7 +954,7 @@ impl QueryChunk for IngesterChunk { } fn data(&self) -> QueryChunkData { - QueryChunkData::RecordBatches(self.batches.clone()) + QueryChunkData::in_mem(self.batches.clone(), Arc::clone(self.schema.inner())) } fn chunk_type(&self) -> &str {
1f732595d1fc5b982daa0ee8073aba64c95788c9
Marco Neumann
2023-08-31 18:18:36
re-introduce sqlx pool "used" counter (#8624)
This was removed in #8336 because the tracking was broken. However having an "almost right" metric is still helpful because it helps with dashboarding (see code comment).
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
feat: re-introduce sqlx pool "used" counter (#8624) This was removed in #8336 because the tracking was broken. However having an "almost right" metric is still helpful because it helps with dashboarding (see code comment). Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs index a88e8d0b3e..6420a7d117 100644 --- a/iox_catalog/src/postgres.rs +++ b/iox_catalog/src/postgres.rs @@ -392,19 +392,34 @@ impl Instrument for PoolMetrics { MetricKind::U64Gauge, ); for (id, p) in pools.iter() { + let active = p.size() as u64; + let idle = p.num_idle() as u64; + + // We get both values independently (from underlying atomic counters) so they might be out of sync (with a + // low likelyhood). Calculating this value and emitting it is useful though since it allows easier use in + // dashboards since you can `max_over_time` w/o any recording rules. + let used = active.saturating_sub(idle); + reporter.report_observation( &Attributes::from([ ("pool_id", Cow::Owned(id.as_ref().to_owned())), ("state", Cow::Borrowed("active")), ]), - metric::Observation::U64Gauge(p.size() as u64), + metric::Observation::U64Gauge(active), ); reporter.report_observation( &Attributes::from([ ("pool_id", Cow::Owned(id.as_ref().to_owned())), ("state", Cow::Borrowed("idle")), ]), - metric::Observation::U64Gauge(p.num_idle() as u64), + metric::Observation::U64Gauge(idle), + ); + reporter.report_observation( + &Attributes::from([ + ("pool_id", Cow::Owned(id.as_ref().to_owned())), + ("state", Cow::Borrowed("used")), + ]), + metric::Observation::U64Gauge(used), ); reporter.report_observation( &Attributes::from([
b961bc79c41f279ec323548aef38d012e2433ca3
wiedld
2023-07-05 13:13:13
move the background task handler onto the parent IngesterGuard
* follow the pattern of the periodic wal rotation * do NOT follow the pattern of the wal.flusher_task
null
refactor: move the background task handler onto the parent IngesterGuard * follow the pattern of the periodic wal rotation * do NOT follow the pattern of the wal.flusher_task
diff --git a/ingester/src/init.rs b/ingester/src/init.rs index d6b83195fe..c00bc80876 100644 --- a/ingester/src/init.rs +++ b/ingester/src/init.rs @@ -99,6 +99,11 @@ pub struct IngesterGuard<T> { /// Aborted on drop. rotation_task: tokio::task::JoinHandle<()>, + /// The handle of the periodic disk protection task. + /// + /// Aborted on drop. + disk_protection_task: tokio::task::JoinHandle<()>, + /// The task handle executing the graceful shutdown once triggered. graceful_shutdown_handler: tokio::task::JoinHandle<()>, shutdown_complete: Shared<oneshot::Receiver<()>>, @@ -125,6 +130,7 @@ where impl<T> Drop for IngesterGuard<T> { fn drop(&mut self) { self.rotation_task.abort(); + self.disk_protection_task.abort(); self.graceful_shutdown_handler.abort(); } } @@ -332,7 +338,7 @@ where .map_err(InitError::WalInit)?; // Initialize the disk proetction after the WAL directory is initialized let disk_protection = InstrumentedDiskProtection::new(wal_directory, &metrics); - disk_protection.start().await; + let disk_protection_task = disk_protection.start().await; // Replay the WAL log files, if any. let max_sequence_number = @@ -411,6 +417,7 @@ where persist_handle, ), rotation_task, + disk_protection_task, graceful_shutdown_handler: shutdown_task, shutdown_complete: shutdown_rx.shared(), }) diff --git a/tracker/src/disk_protection.rs b/tracker/src/disk_protection.rs index c532c27869..284e37d9b6 100644 --- a/tracker/src/disk_protection.rs +++ b/tracker/src/disk_protection.rs @@ -1,7 +1,6 @@ -use std::{borrow::Cow, path::PathBuf, sync::Arc, time::Duration}; +use std::{borrow::Cow, path::PathBuf, time::Duration}; use metric::{Attributes, U64Gauge}; -use parking_lot::Mutex; use sysinfo::{DiskExt, System, SystemExt}; use tokio::{self, task::JoinHandle}; @@ -66,8 +65,6 @@ impl DiskProtectionMetrics { pub struct InstrumentedDiskProtection { /// The metrics that are reported to the registry. metrics: DiskProtectionMetrics, - /// The handle to terminate the background task. - background_task: Mutex<Option<JoinHandle<()>>>, } impl std::fmt::Debug for InstrumentedDiskProtection { @@ -81,27 +78,12 @@ impl InstrumentedDiskProtection { pub fn new(directory_to_track: PathBuf, registry: &metric::Registry) -> Self { let metrics = DiskProtectionMetrics::new(directory_to_track, registry); - Self { - metrics, - background_task: Default::default(), - } + Self { metrics } } /// Start the [`InstrumentedDiskProtection`] background task. - pub async fn start(self) { - let rc_self = Arc::new(self); - let rc_self_clone = Arc::clone(&rc_self); - - *rc_self.background_task.lock() = Some(tokio::task::spawn(async move { - rc_self_clone.background_task().await - })); - } - - /// Stop the [`InstrumentedDiskProtection`] background task. - pub fn stop(&mut self) { - if let Some(t) = self.background_task.lock().take() { - t.abort() - } + pub async fn start(self) -> JoinHandle<()> { + tokio::task::spawn(async move { self.background_task().await }) } /// The background task that periodically performs the disk protection check. @@ -118,15 +100,10 @@ impl InstrumentedDiskProtection { } } -impl Drop for InstrumentedDiskProtection { - fn drop(&mut self) { - // future-proof, such that stop does not need to be explicitly called. - self.stop(); - } -} - #[cfg(test)] mod tests { + use std::sync::Arc; + use metric::Metric; use super::*; @@ -135,18 +112,20 @@ mod tests { async fn test_metrics() { let registry = Arc::new(metric::Registry::new()); - struct MockAnyStruct; + struct MockAnyStruct { + abort_handle: JoinHandle<()>, + } impl MockAnyStruct { pub(crate) async fn new(registry: &metric::Registry) -> Self { let disk_protection = InstrumentedDiskProtection::new(PathBuf::from("/"), registry); - disk_protection.start().await; + let abort_handle = disk_protection.start().await; - Self + Self { abort_handle } } } - let _mock = MockAnyStruct::new(&registry).await; + let mock = MockAnyStruct::new(&registry).await; tokio::time::sleep(2 * Duration::from_secs(2)).await; @@ -158,5 +137,6 @@ mod tests { .fetch(); assert!(recorded_metric > 0_u64); + mock.abort_handle.abort(); } }
fbe9f27f107434623862c87626d8ad691eae495d
Andrew Lamb
2022-11-16 09:41:52
Update datafusion again (#6108)
* chore: Update datafusion pin + api code * chore: Run cargo hakari tasks * refactor: combine_sort_key is more idomatic and add rationale comments * refactor: satisfy borrow checker and updated comments * fix: Add test case for combine_sort_key * fix: Apply suggestions from code review Co-authored-by: Marco Neumann <[email protected]> * fix: Add back test for deeply nested expression * fix: Update output ordering
Co-authored-by: CircleCI[bot] <[email protected]> Co-authored-by: Marco Neumann <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
chore: Update datafusion again (#6108) * chore: Update datafusion pin + api code * chore: Run cargo hakari tasks * refactor: combine_sort_key is more idomatic and add rationale comments * refactor: satisfy borrow checker and updated comments * fix: Add test case for combine_sort_key * fix: Apply suggestions from code review Co-authored-by: Marco Neumann <[email protected]> * fix: Add back test for deeply nested expression * fix: Update output ordering Co-authored-by: CircleCI[bot] <[email protected]> Co-authored-by: Marco Neumann <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/Cargo.lock b/Cargo.lock index b973f63a07..100136c438 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1135,6 +1135,19 @@ dependencies = [ "syn", ] +[[package]] +name = "dashmap" +version = "5.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" +dependencies = [ + "cfg-if", + "hashbrown 0.12.3", + "lock_api", + "once_cell", + "parking_lot_core 0.9.4", +] + [[package]] name = "data_types" version = "0.1.0" @@ -1155,8 +1168,8 @@ dependencies = [ [[package]] name = "datafusion" -version = "13.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=dd081d64a2fba8574e63bdd0662c14aec5852b48#dd081d64a2fba8574e63bdd0662c14aec5852b48" +version = "14.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=d2814c960168b45c4a0f5d7bbb72d9f412cb08bd#d2814c960168b45c4a0f5d7bbb72d9f412cb08bd" dependencies = [ "ahash 0.8.2", "arrow", @@ -1165,6 +1178,7 @@ dependencies = [ "bytes", "bzip2", "chrono", + "dashmap", "datafusion-common", "datafusion-expr", "datafusion-optimizer", @@ -1180,7 +1194,6 @@ dependencies = [ "log", "num_cpus", "object_store", - "ordered-float 3.4.0", "parking_lot 0.12.1", "parquet", "paste", @@ -1199,21 +1212,20 @@ dependencies = [ [[package]] name = "datafusion-common" -version = "13.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=dd081d64a2fba8574e63bdd0662c14aec5852b48#dd081d64a2fba8574e63bdd0662c14aec5852b48" +version = "14.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=d2814c960168b45c4a0f5d7bbb72d9f412cb08bd#d2814c960168b45c4a0f5d7bbb72d9f412cb08bd" dependencies = [ "arrow", "chrono", "object_store", - "ordered-float 3.4.0", "parquet", "sqlparser 0.26.0", ] [[package]] name = "datafusion-expr" -version = "13.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=dd081d64a2fba8574e63bdd0662c14aec5852b48#dd081d64a2fba8574e63bdd0662c14aec5852b48" +version = "14.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=d2814c960168b45c4a0f5d7bbb72d9f412cb08bd#d2814c960168b45c4a0f5d7bbb72d9f412cb08bd" dependencies = [ "ahash 0.8.2", "arrow", @@ -1224,8 +1236,8 @@ dependencies = [ [[package]] name = "datafusion-optimizer" -version = "13.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=dd081d64a2fba8574e63bdd0662c14aec5852b48#dd081d64a2fba8574e63bdd0662c14aec5852b48" +version = "14.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=d2814c960168b45c4a0f5d7bbb72d9f412cb08bd#d2814c960168b45c4a0f5d7bbb72d9f412cb08bd" dependencies = [ "arrow", "async-trait", @@ -1239,8 +1251,8 @@ dependencies = [ [[package]] name = "datafusion-physical-expr" -version = "13.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=dd081d64a2fba8574e63bdd0662c14aec5852b48#dd081d64a2fba8574e63bdd0662c14aec5852b48" +version = "14.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=d2814c960168b45c4a0f5d7bbb72d9f412cb08bd#d2814c960168b45c4a0f5d7bbb72d9f412cb08bd" dependencies = [ "ahash 0.8.2", "arrow", @@ -1258,7 +1270,6 @@ dependencies = [ "lazy_static", "md-5", "num-traits", - "ordered-float 3.4.0", "paste", "rand", "regex", @@ -1269,8 +1280,8 @@ dependencies = [ [[package]] name = "datafusion-proto" -version = "13.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=dd081d64a2fba8574e63bdd0662c14aec5852b48#dd081d64a2fba8574e63bdd0662c14aec5852b48" +version = "14.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=d2814c960168b45c4a0f5d7bbb72d9f412cb08bd#d2814c960168b45c4a0f5d7bbb72d9f412cb08bd" dependencies = [ "arrow", "datafusion", @@ -1283,8 +1294,8 @@ dependencies = [ [[package]] name = "datafusion-row" -version = "13.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=dd081d64a2fba8574e63bdd0662c14aec5852b48#dd081d64a2fba8574e63bdd0662c14aec5852b48" +version = "14.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=d2814c960168b45c4a0f5d7bbb72d9f412cb08bd#d2814c960168b45c4a0f5d7bbb72d9f412cb08bd" dependencies = [ "arrow", "datafusion-common", @@ -1294,8 +1305,8 @@ dependencies = [ [[package]] name = "datafusion-sql" -version = "13.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=dd081d64a2fba8574e63bdd0662c14aec5852b48#dd081d64a2fba8574e63bdd0662c14aec5852b48" +version = "14.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=d2814c960168b45c4a0f5d7bbb72d9f412cb08bd#d2814c960168b45c4a0f5d7bbb72d9f412cb08bd" dependencies = [ "arrow", "datafusion-common", diff --git a/Cargo.toml b/Cargo.toml index 9e3121fd46..ab02e0196e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -110,8 +110,8 @@ license = "MIT OR Apache-2.0" [workspace.dependencies] arrow = { version = "26.0.0" } arrow-flight = { version = "26.0.0" } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="dd081d64a2fba8574e63bdd0662c14aec5852b48", default-features = false } -datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="dd081d64a2fba8574e63bdd0662c14aec5852b48" } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="d2814c960168b45c4a0f5d7bbb72d9f412cb08bd", default-features = false } +datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="d2814c960168b45c4a0f5d7bbb72d9f412cb08bd" } hashbrown = { version = "0.13.1" } parquet = { version = "26.0.0" } diff --git a/iox_query/src/exec/non_null_checker.rs b/iox_query/src/exec/non_null_checker.rs index ce6ed0a0f4..08fd7bc817 100644 --- a/iox_query/src/exec/non_null_checker.rs +++ b/iox_query/src/exec/non_null_checker.rs @@ -218,8 +218,8 @@ impl ExecutionPlan for NonNullCheckerExec { None } - fn required_child_distribution(&self) -> Distribution { - Distribution::UnspecifiedDistribution + fn required_input_distribution(&self) -> Vec<Distribution> { + vec![Distribution::UnspecifiedDistribution] } fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> { diff --git a/iox_query/src/exec/schema_pivot.rs b/iox_query/src/exec/schema_pivot.rs index 5d2a2696ae..293afe1356 100644 --- a/iox_query/src/exec/schema_pivot.rs +++ b/iox_query/src/exec/schema_pivot.rs @@ -189,8 +189,8 @@ impl ExecutionPlan for SchemaPivotExec { None } - fn required_child_distribution(&self) -> Distribution { - Distribution::UnspecifiedDistribution + fn required_input_distribution(&self) -> Vec<Distribution> { + vec![Distribution::UnspecifiedDistribution] } fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> { diff --git a/iox_query/src/exec/split.rs b/iox_query/src/exec/split.rs index efe0212fa7..1a5639e603 100644 --- a/iox_query/src/exec/split.rs +++ b/iox_query/src/exec/split.rs @@ -207,8 +207,8 @@ impl ExecutionPlan for StreamSplitExec { /// Always require a single input (eventually we might imagine /// running this on multiple partitions concurrently to compute /// the splits in parallel, but not now) - fn required_child_distribution(&self) -> Distribution { - Distribution::SinglePartition + fn required_input_distribution(&self) -> Vec<Distribution> { + vec![Distribution::SinglePartition] } fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> { diff --git a/iox_query/src/provider/deduplicate.rs b/iox_query/src/provider/deduplicate.rs index 2f765e552d..f75bb7c7ba 100644 --- a/iox_query/src/provider/deduplicate.rs +++ b/iox_query/src/provider/deduplicate.rs @@ -219,8 +219,11 @@ impl ExecutionPlan for DeduplicateExec { Ok(AdapterStream::adapt(self.schema(), rx, handle)) } - fn required_child_distribution(&self) -> Distribution { - Distribution::SinglePartition + fn required_input_distribution(&self) -> Vec<Distribution> { + // For now use a single input -- it might be helpful + // eventually to deduplicate in parallel by hash partitioning + // the inputs (based on sort keys) + vec![Distribution::SinglePartition] } fn fmt_as(&self, t: DisplayFormatType, f: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/iox_query/src/provider/physical.rs b/iox_query/src/provider/physical.rs index b2977e3ee2..945fcaa3e4 100644 --- a/iox_query/src/provider/physical.rs +++ b/iox_query/src/provider/physical.rs @@ -1,6 +1,9 @@ //! Implementation of a DataFusion PhysicalPlan node across partition chunks -use crate::{provider::record_batch_exec::RecordBatchesExec, QueryChunk, QueryChunkData}; +use crate::{ + provider::record_batch_exec::RecordBatchesExec, util::arrow_sort_key_exprs, QueryChunk, + QueryChunkData, +}; use arrow::{datatypes::SchemaRef, record_batch::RecordBatch}; use data_types::TableSummary; use datafusion::{ @@ -15,12 +18,65 @@ use datafusion::{ }; use object_store::ObjectMeta; use predicate::Predicate; -use schema::Schema; +use schema::{sort::SortKey, Schema}; use std::{ collections::{hash_map::Entry, HashMap}, sync::Arc, }; +/// Holds a list of chunks that all have the same "URL" +#[derive(Debug)] +struct ParquetChunkList { + object_store_url: ObjectStoreUrl, + object_metas: Vec<ObjectMeta>, + sort_key: Option<SortKey>, +} + +impl ParquetChunkList { + fn new(object_store_url: ObjectStoreUrl, chunk: &dyn QueryChunk, meta: ObjectMeta) -> Self { + Self { + object_store_url, + object_metas: vec![meta], + sort_key: chunk.sort_key().cloned(), + } + } + + /// Add the chunk to the list of files + fn add_parquet_file(&mut self, chunk: &dyn QueryChunk, meta: ObjectMeta) { + self.object_metas.push(meta); + + self.sort_key = combine_sort_key(self.sort_key.take(), chunk.sort_key()); + } +} + +/// Combines the existing sort key with the sort key of the chunk, +/// returning the new combined compatible sort key that describes both +/// chunks. +/// +/// If it is not possible to find a compatible sort key, None is +/// returned signifying "unknown sort order" +fn combine_sort_key( + existing_sort_key: Option<SortKey>, + chunk_sort_key: Option<&SortKey>, +) -> Option<SortKey> { + if let (Some(existing_sort_key), Some(chunk_sort_key)) = (existing_sort_key, chunk_sort_key) { + let combined_sort_key = SortKey::try_merge_key(&existing_sort_key, chunk_sort_key); + + // Avoid cloning the sort key when possible, as the sort key + // is likely to commonly be the same + match combined_sort_key { + Some(combined_sort_key) if combined_sort_key == &existing_sort_key => { + Some(existing_sort_key) + } + Some(combined_sort_key) => Some(combined_sort_key.clone()), + None => None, + } + } else { + // no existing sort key means the data wasn't consistently sorted so leave it alone + None + } +} + /// Place [chunk](QueryChunk)s into physical nodes. /// /// This will group chunks into [record batch](QueryChunkData::RecordBatches) and [parquet @@ -52,7 +108,7 @@ pub fn chunks_to_physical_nodes( } let mut record_batch_chunks: Vec<(SchemaRef, Vec<RecordBatch>, Arc<TableSummary>)> = vec![]; - let mut parquet_chunks: HashMap<String, (ObjectStoreUrl, Vec<ObjectMeta>)> = HashMap::new(); + let mut parquet_chunks: HashMap<String, ParquetChunkList> = HashMap::new(); for chunk in &chunks { match chunk.data() { @@ -63,12 +119,14 @@ pub fn chunks_to_physical_nodes( let url_str = parquet_input.object_store_url.as_str().to_owned(); match parquet_chunks.entry(url_str) { Entry::Occupied(mut o) => { - o.get_mut().1.push(parquet_input.object_meta); + o.get_mut() + .add_parquet_file(chunk.as_ref(), parquet_input.object_meta); } Entry::Vacant(v) => { - v.insert(( + v.insert(ParquetChunkList::new( parquet_input.object_store_url, - vec![parquet_input.object_meta], + chunk.as_ref(), + parquet_input.object_meta, )); } } @@ -86,9 +144,15 @@ pub fn chunks_to_physical_nodes( let mut parquet_chunks: Vec<_> = parquet_chunks.into_iter().collect(); parquet_chunks.sort_by_key(|(url_str, _)| url_str.clone()); let target_partitions = context.session_config().target_partitions; - for (_url_str, (url, chunks)) in parquet_chunks { + for (_url_str, chunk_list) in parquet_chunks { + let ParquetChunkList { + object_store_url, + object_metas, + sort_key, + } = chunk_list; + let file_groups = distribute( - chunks.into_iter().map(|object_meta| PartitionedFile { + object_metas.into_iter().map(|object_meta| PartitionedFile { object_meta, partition_values: vec![], range: None, @@ -96,21 +160,26 @@ pub fn chunks_to_physical_nodes( }), target_partitions, ); + + // Tell datafusion about the sort key, if any + let file_schema = iox_schema.as_arrow(); + let output_ordering = + sort_key.map(|sort_key| arrow_sort_key_exprs(&sort_key, &file_schema)); + let base_config = FileScanConfig { - object_store_url: url, - file_schema: iox_schema.as_arrow(), + object_store_url, + file_schema, file_groups, statistics: Statistics::default(), projection: None, limit: None, table_partition_cols: vec![], config_options: context.session_config().config_options(), + output_ordering, }; - output_nodes.push(Arc::new(ParquetExec::new( - base_config, - predicate.filter_expr(), - None, - ))); + let meta_size_hint = None; + let parquet_exec = ParquetExec::new(base_config, predicate.filter_expr(), meta_size_hint); + output_nodes.push(Arc::new(parquet_exec)); } assert!(!output_nodes.is_empty()); @@ -144,6 +213,8 @@ where #[cfg(test)] mod tests { + use schema::sort::SortKeyBuilder; + use super::*; #[test] @@ -156,4 +227,50 @@ mod tests { assert_eq!(distribute(0..3u8, 10), vec![vec![0], vec![1], vec![2]],); } + + #[test] + fn test_combine_sort_key() { + let skey_t1 = SortKeyBuilder::new() + .with_col("t1") + .with_col("time") + .build(); + + let skey_t1_t2 = SortKeyBuilder::new() + .with_col("t1") + .with_col("t2") + .with_col("time") + .build(); + + let skey_t2_t1 = SortKeyBuilder::new() + .with_col("t2") + .with_col("t1") + .with_col("time") + .build(); + + assert_eq!(combine_sort_key(None, None), None); + assert_eq!(combine_sort_key(Some(skey_t1.clone()), None), None); + assert_eq!(combine_sort_key(None, Some(&skey_t1)), None); + + assert_eq!( + combine_sort_key(Some(skey_t1.clone()), Some(&skey_t1)), + Some(skey_t1.clone()) + ); + + assert_eq!( + combine_sort_key(Some(skey_t1.clone()), Some(&skey_t1_t2)), + Some(skey_t1_t2.clone()) + ); + + assert_eq!( + combine_sort_key(Some(skey_t1_t2.clone()), Some(&skey_t1)), + Some(skey_t1_t2.clone()) + ); + + assert_eq!( + combine_sort_key(Some(skey_t2_t1.clone()), Some(&skey_t1)), + Some(skey_t2_t1.clone()) + ); + + assert_eq!(combine_sort_key(Some(skey_t2_t1), Some(&skey_t1_t2)), None); + } } diff --git a/parquet_file/src/storage.rs b/parquet_file/src/storage.rs index 963bab4618..a41d8fda4e 100644 --- a/parquet_file/src/storage.rs +++ b/parquet_file/src/storage.rs @@ -123,6 +123,8 @@ impl ParquetExecInput { table_partition_cols: vec![], // TODO avoid this `copied_config` when config_options are directly available on context config_options: session_ctx.copied_config().config_options(), + // Parquet files ARE actually sorted but we don't care here since we just construct a `collect` plan. + output_ordering: None, }; let exec = ParquetExec::new(base_config, None, None); let exec_schema = exec.schema(); diff --git a/parquet_to_line_protocol/src/lib.rs b/parquet_to_line_protocol/src/lib.rs index 0d8ff70210..46710d5db1 100644 --- a/parquet_to_line_protocol/src/lib.rs +++ b/parquet_to_line_protocol/src/lib.rs @@ -213,6 +213,7 @@ impl ParquetFileReader { projection: None, limit: None, table_partition_cols: vec![], + output_ordering: None, config_options: ConfigOptions::new().into_shareable(), }; diff --git a/querier/src/ingester/flight_client.rs b/querier/src/ingester/flight_client.rs index fae40e0646..c621802a7f 100644 --- a/querier/src/ingester/flight_client.rs +++ b/querier/src/ingester/flight_client.rs @@ -217,11 +217,32 @@ impl CachedConnection { #[cfg(test)] mod tests { use data_types::{NamespaceId, TableId}; - use datafusion::prelude::{col, lit}; + use datafusion::prelude::{col, lit, when, Expr}; use predicate::Predicate; use super::*; + #[test] + fn serialize_deeply_nested_and() { + // we need more stack space so this doesn't overflow in dev builds + std::thread::Builder::new() + .stack_size(10_000_000) + .spawn(|| { + let n = 100; + println!("testing: {n}"); + + // build a deeply nested (a < 5) AND (a < 5) AND .... tree + let expr_base = col("a").lt(lit(5i32)); + let expr = (0..n).fold(expr_base.clone(), |expr, _| expr.and(expr_base.clone())); + + let (request, request2) = serialize_roundtrip(expr); + assert_eq!(request, request2); + }) + .expect("spawning thread") + .join() + .expect("joining thread"); + } + #[test] fn serialize_deeply_nested_predicate() { // see https://github.com/influxdata/influxdb_iox/issues/5974 @@ -234,28 +255,54 @@ mod tests { for n in [1, 2, n_max] { println!("testing: {n}"); - let expr_base = col("a").lt(lit(5i32)); - let expr = (0..n).fold(expr_base.clone(), |expr, _| expr.and(expr_base.clone())); - - let predicate = Predicate {exprs: vec![expr], ..Default::default()}; - let request = IngesterQueryRequest { - namespace_id: NamespaceId::new(42), - table_id: TableId::new(1337), - columns: vec![String::from("col1"), String::from("col2")], - predicate: Some(predicate), - }; + // build a deeply recursive nested expression: + // + // CASE + // WHEN TRUE + // THEN (WHEN ...) + // ELSE FALSE + // + let expr = (0..n).fold(lit(false), |expr, _|{ + when(lit(true), expr) + .end() + .unwrap() + }); - let proto = serialize_ingester_query_request(request.clone()).expect("serialization"); - let request2 = IngesterQueryRequest::try_from(proto).expect("deserialization"); + let (request1, request2) = serialize_roundtrip(expr); + // expect that the self preservation mechanism has + // kicked in and the predicate has been ignored. if request2.predicate.is_none() { assert!(n > 2, "not really deeply nested"); return; + } else { + assert_eq!(request1, request2); } } panic!("did not find a 'too deeply nested' expression, tested up to a depth of {n_max}") }).expect("spawning thread").join().expect("joining thread"); } + + /// Creates a [`IngesterQueryRequest`] and round trips it through + /// serialization, returning both the original and the serialized + /// request + fn serialize_roundtrip(expr: Expr) -> (IngesterQueryRequest, IngesterQueryRequest) { + let predicate = Predicate { + exprs: vec![expr], + ..Default::default() + }; + + let request = IngesterQueryRequest { + namespace_id: NamespaceId::new(42), + table_id: TableId::new(1337), + columns: vec![String::from("col1"), String::from("col2")], + predicate: Some(predicate), + }; + + let proto = serialize_ingester_query_request(request.clone()).expect("serialization"); + let request2 = IngesterQueryRequest::try_from(proto).expect("deserialization"); + (request, request2) + } } diff --git a/querier/src/namespace/query_access.rs b/querier/src/namespace/query_access.rs index 1d6df8fe5c..7dd36ad112 100644 --- a/querier/src/namespace/query_access.rs +++ b/querier/src/namespace/query_access.rs @@ -508,24 +508,24 @@ mod tests { &querier_namespace, "EXPLAIN SELECT * FROM mem ORDER BY host,time", &[ - "+---------------+---------------------------------------------------------------------------------------------------------------------------------------+", - "| plan_type | plan |", - "+---------------+---------------------------------------------------------------------------------------------------------------------------------------+", - "| logical_plan | Sort: mem.host ASC NULLS LAST, mem.time ASC NULLS LAST |", - "| | Projection: mem.host, mem.perc, mem.time |", - "| | TableScan: mem projection=[host, perc, time] |", - "| physical_plan | SortExec: [host@0 ASC NULLS LAST,time@2 ASC NULLS LAST] |", - "| | CoalescePartitionsExec |", - "| | ProjectionExec: expr=[host@0 as host, perc@1 as perc, time@2 as time] |", - "| | UnionExec |", - "| | CoalesceBatchesExec: target_batch_size=4096 |", - "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |", - "| | ParquetExec: limit=None, partitions=[1/2/1/4/<uuid>.parquet], projection=[host, perc, time] |", - "| | CoalesceBatchesExec: target_batch_size=4096 |", - "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |", - "| | ParquetExec: limit=None, partitions=[1/2/1/4/<uuid>.parquet], projection=[host, perc, time] |", - "| | |", - "+---------------+---------------------------------------------------------------------------------------------------------------------------------------+", + "+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", + "| plan_type | plan |", + "+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", + "| logical_plan | Sort: mem.host ASC NULLS LAST, mem.time ASC NULLS LAST |", + "| | Projection: mem.host, mem.perc, mem.time |", + "| | TableScan: mem projection=[host, perc, time] |", + "| physical_plan | SortExec: [host@0 ASC NULLS LAST,time@2 ASC NULLS LAST] |", + "| | CoalescePartitionsExec |", + "| | ProjectionExec: expr=[host@0 as host, perc@1 as perc, time@2 as time] |", + "| | UnionExec |", + "| | CoalesceBatchesExec: target_batch_size=4096 |", + "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |", + "| | ParquetExec: limit=None, partitions=[1/2/1/4/<uuid>.parquet], output_ordering=[host@0 ASC, time@2 ASC], projection=[host, perc, time] |", + "| | CoalesceBatchesExec: target_batch_size=4096 |", + "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |", + "| | ParquetExec: limit=None, partitions=[1/2/1/4/<uuid>.parquet], output_ordering=[host@0 ASC, time@2 ASC], projection=[host, perc, time] |", + "| | |", + "+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", ], ) .await; @@ -569,20 +569,20 @@ mod tests { "EXPLAIN SELECT * FROM cpu", &[ "+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", - "| plan_type | plan |", - "+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", - "| logical_plan | Projection: cpu.foo, cpu.host, cpu.load, cpu.time |", - "| | TableScan: cpu projection=[foo, host, load, time] |", - "| physical_plan | ProjectionExec: expr=[foo@0 as foo, host@1 as host, load@2 as load, time@3 as time] |", - "| | UnionExec |", - "| | DeduplicateExec: [host@1 ASC,time@3 ASC] |", - "| | SortPreservingMergeExec: [host@1 ASC,time@3 ASC] |", - "| | UnionExec |", - "| | ParquetExec: limit=None, partitions=[1/1/2/2/<uuid>.parquet], projection=[foo, host, load, time] |", - "| | ParquetExec: limit=None, partitions=[1/1/2/2/<uuid>.parquet], projection=[foo, host, load, time] |", - "| | ParquetExec: limit=None, partitions=[1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/3/<uuid>.parquet], projection=[foo, host, load, time] |", - "| | |", - "+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", + "| plan_type | plan |", + "+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", + "| logical_plan | Projection: cpu.foo, cpu.host, cpu.load, cpu.time |", + "| | TableScan: cpu projection=[foo, host, load, time] |", + "| physical_plan | ProjectionExec: expr=[foo@0 as foo, host@1 as host, load@2 as load, time@3 as time] |", + "| | UnionExec |", + "| | DeduplicateExec: [host@1 ASC,time@3 ASC] |", + "| | SortPreservingMergeExec: [host@1 ASC,time@3 ASC] |", + "| | UnionExec |", + "| | ParquetExec: limit=None, partitions=[1/1/2/2/<uuid>.parquet], output_ordering=[host@1 ASC, time@3 ASC], projection=[foo, host, load, time] |", + "| | ParquetExec: limit=None, partitions=[1/1/2/2/<uuid>.parquet], output_ordering=[host@1 ASC, time@3 ASC], projection=[foo, host, load, time] |", + "| | ParquetExec: limit=None, partitions=[1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/3/<uuid>.parquet], projection=[foo, host, load, time] |", + "| | |", + "+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", ], ) .await; diff --git a/query_tests/cases/in/duplicates_ingester.expected b/query_tests/cases/in/duplicates_ingester.expected index 560145efe9..5aa8b0cc41 100644 --- a/query_tests/cases/in/duplicates_ingester.expected +++ b/query_tests/cases/in/duplicates_ingester.expected @@ -1,91 +1,91 @@ -- Test Setup: OneMeasurementFourChunksWithDuplicatesWithIngester -- SQL: explain select time, state, city, min_temp, max_temp, area from h2o order by time, state, city; -- Results After Normalizing UUIDs -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Sort: h2o.time ASC NULLS LAST, h2o.state ASC NULLS LAST, h2o.city ASC NULLS LAST | -| | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area | -| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] | -| physical_plan | SortExec: [time@0 ASC NULLS LAST,state@1 ASC NULLS LAST,city@2 ASC NULLS LAST] | -| | CoalescePartitionsExec | -| | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | UnionExec | -| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] | -| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[area, city, max_temp, min_temp, state, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], projection=[area, city, max_temp, min_temp, state, time] | -| | DeduplicateExec: [city@1 ASC,state@4 ASC,time@5 ASC] | -| | SortExec: [city@1 ASC,state@4 ASC,time@5 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], projection=[area, city, max_temp, min_temp, state, time] | -| | | -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Sort: h2o.time ASC NULLS LAST, h2o.state ASC NULLS LAST, h2o.city ASC NULLS LAST | +| | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area | +| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] | +| physical_plan | SortExec: [time@0 ASC NULLS LAST,state@1 ASC NULLS LAST,city@2 ASC NULLS LAST] | +| | CoalescePartitionsExec | +| | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | UnionExec | +| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] | +| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | +| | DeduplicateExec: [city@1 ASC,state@4 ASC,time@5 ASC] | +| | SortExec: [city@1 ASC,state@4 ASC,time@5 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | +| | | ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: EXPLAIN select time, state, city, min_temp, max_temp, area from h2o; -- Results After Normalizing UUIDs -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area | -| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] | -| physical_plan | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] | -| | UnionExec | -| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] | -| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[area, city, max_temp, min_temp, state, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], projection=[area, city, max_temp, min_temp, state, time] | -| | DeduplicateExec: [city@1 ASC,state@4 ASC,time@5 ASC] | -| | SortExec: [city@1 ASC,state@4 ASC,time@5 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], projection=[area, city, max_temp, min_temp, state, time] | -| | | -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area | +| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] | +| physical_plan | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] | +| | UnionExec | +| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] | +| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | +| | DeduplicateExec: [city@1 ASC,state@4 ASC,time@5 ASC] | +| | SortExec: [city@1 ASC,state@4 ASC,time@5 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | +| | | ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: EXPLAIN select state as name from h2o UNION ALL select city as name from h2o; -- Results After Normalizing UUIDs -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Union | -| | Projection: h2o.state AS name | -| | TableScan: h2o projection=[state] | -| | Projection: h2o.city AS name | -| | TableScan: h2o projection=[city] | -| physical_plan | UnionExec | -| | ProjectionExec: expr=[state@0 as name] | -| | UnionExec | -| | ProjectionExec: expr=[state@1 as state] | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | DeduplicateExec: [state@1 ASC,city@0 ASC,time@2 ASC] | -| | SortPreservingMergeExec: [state@1 ASC,city@0 ASC,time@2 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[city, state, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], projection=[city, state, time] | -| | ProjectionExec: expr=[state@1 as state] | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | DeduplicateExec: [city@0 ASC,state@1 ASC,time@2 ASC] | -| | SortExec: [city@0 ASC,state@1 ASC,time@2 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], projection=[state] | -| | ProjectionExec: expr=[city@0 as name] | -| | UnionExec | -| | ProjectionExec: expr=[city@0 as city] | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | DeduplicateExec: [state@1 ASC,city@0 ASC,time@2 ASC] | -| | SortPreservingMergeExec: [state@1 ASC,city@0 ASC,time@2 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[city, state, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], projection=[city, state, time] | -| | ProjectionExec: expr=[city@0 as city] | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | DeduplicateExec: [city@0 ASC,state@1 ASC,time@2 ASC] | -| | SortExec: [city@0 ASC,state@1 ASC,time@2 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], projection=[city] | -| | | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Union | +| | Projection: h2o.state AS name | +| | TableScan: h2o projection=[state] | +| | Projection: h2o.city AS name | +| | TableScan: h2o projection=[city] | +| physical_plan | UnionExec | +| | ProjectionExec: expr=[state@0 as name] | +| | UnionExec | +| | ProjectionExec: expr=[state@1 as state] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | DeduplicateExec: [state@1 ASC,city@0 ASC,time@2 ASC] | +| | SortPreservingMergeExec: [state@1 ASC,city@0 ASC,time@2 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] | +| | ProjectionExec: expr=[state@1 as state] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | DeduplicateExec: [city@0 ASC,state@1 ASC,time@2 ASC] | +| | SortExec: [city@0 ASC,state@1 ASC,time@2 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], output_ordering=[state@0 ASC], projection=[state] | +| | ProjectionExec: expr=[city@0 as name] | +| | UnionExec | +| | ProjectionExec: expr=[city@0 as city] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | DeduplicateExec: [state@1 ASC,city@0 ASC,time@2 ASC] | +| | SortPreservingMergeExec: [state@1 ASC,city@0 ASC,time@2 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] | +| | ProjectionExec: expr=[city@0 as city] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | DeduplicateExec: [city@0 ASC,state@1 ASC,time@2 ASC] | +| | SortExec: [city@0 ASC,state@1 ASC,time@2 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], output_ordering=[city@0 ASC], projection=[city] | +| | | ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: select count(*) from h2o; +-----------------+ | COUNT(UInt8(1)) | diff --git a/query_tests/cases/in/duplicates_parquet.expected b/query_tests/cases/in/duplicates_parquet.expected index 57751bb248..bde1410942 100644 --- a/query_tests/cases/in/duplicates_parquet.expected +++ b/query_tests/cases/in/duplicates_parquet.expected @@ -1,75 +1,75 @@ -- Test Setup: OneMeasurementFourChunksWithDuplicatesParquetOnly -- SQL: explain select time, state, city, min_temp, max_temp, area from h2o order by time, state, city; -- Results After Normalizing UUIDs -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Sort: h2o.time ASC NULLS LAST, h2o.state ASC NULLS LAST, h2o.city ASC NULLS LAST | -| | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area | -| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] | -| physical_plan | SortExec: [time@0 ASC NULLS LAST,state@1 ASC NULLS LAST,city@2 ASC NULLS LAST] | -| | CoalescePartitionsExec | -| | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | UnionExec | -| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] | -| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[area, city, max_temp, min_temp, state, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], projection=[area, city, max_temp, min_temp, state, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], projection=[area, city, max_temp, min_temp, state, time] | -| | | -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Sort: h2o.time ASC NULLS LAST, h2o.state ASC NULLS LAST, h2o.city ASC NULLS LAST | +| | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area | +| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] | +| physical_plan | SortExec: [time@0 ASC NULLS LAST,state@1 ASC NULLS LAST,city@2 ASC NULLS LAST] | +| | CoalescePartitionsExec | +| | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | UnionExec | +| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] | +| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | +| | | ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: EXPLAIN select time, state, city, min_temp, max_temp, area from h2o; -- Results After Normalizing UUIDs -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area | -| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] | -| physical_plan | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] | -| | UnionExec | -| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] | -| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[area, city, max_temp, min_temp, state, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], projection=[area, city, max_temp, min_temp, state, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], projection=[area, city, max_temp, min_temp, state, time] | -| | | -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: h2o.time, h2o.state, h2o.city, h2o.min_temp, h2o.max_temp, h2o.area | +| | TableScan: h2o projection=[area, city, max_temp, min_temp, state, time] | +| physical_plan | ProjectionExec: expr=[time@5 as time, state@4 as state, city@1 as city, min_temp@3 as min_temp, max_temp@2 as max_temp, area@0 as area] | +| | UnionExec | +| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC] | +| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time] | +| | | ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: EXPLAIN select state as name from h2o UNION ALL select city as name from h2o; -- Results After Normalizing UUIDs -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Union | -| | Projection: h2o.state AS name | -| | TableScan: h2o projection=[state] | -| | Projection: h2o.city AS name | -| | TableScan: h2o projection=[city] | -| physical_plan | UnionExec | -| | ProjectionExec: expr=[state@0 as name] | -| | UnionExec | -| | ProjectionExec: expr=[state@1 as state] | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | DeduplicateExec: [state@1 ASC,city@0 ASC,time@2 ASC] | -| | SortPreservingMergeExec: [state@1 ASC,city@0 ASC,time@2 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[city, state, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], projection=[city, state, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], projection=[state] | -| | ProjectionExec: expr=[city@0 as name] | -| | UnionExec | -| | ProjectionExec: expr=[city@0 as city] | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | DeduplicateExec: [state@1 ASC,city@0 ASC,time@2 ASC] | -| | SortPreservingMergeExec: [state@1 ASC,city@0 ASC,time@2 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[city, state, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], projection=[city, state, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], projection=[city] | -| | | -+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Union | +| | Projection: h2o.state AS name | +| | TableScan: h2o projection=[state] | +| | Projection: h2o.city AS name | +| | TableScan: h2o projection=[city] | +| physical_plan | UnionExec | +| | ProjectionExec: expr=[state@0 as name] | +| | UnionExec | +| | ProjectionExec: expr=[state@1 as state] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | DeduplicateExec: [state@1 ASC,city@0 ASC,time@2 ASC] | +| | SortPreservingMergeExec: [state@1 ASC,city@0 ASC,time@2 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], output_ordering=[state@0 ASC], projection=[state] | +| | ProjectionExec: expr=[city@0 as name] | +| | UnionExec | +| | ProjectionExec: expr=[city@0 as city] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | DeduplicateExec: [state@1 ASC,city@0 ASC,time@2 ASC] | +| | SortPreservingMergeExec: [state@1 ASC,city@0 ASC,time@2 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[state@1 ASC, city@0 ASC, time@2 ASC], projection=[city, state, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], output_ordering=[city@0 ASC], projection=[city] | +| | | ++---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: select count(*) from h2o; +-----------------+ | COUNT(UInt8(1)) | @@ -91,8 +91,8 @@ | | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC], metrics=[elapsed_compute=1.234ms, mem_used=0, num_dupes=2, output_rows=5, spill_count=0, spilled_bytes=0] | | | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC], metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=7, spill_count=0, spilled_bytes=0] | | | UnionExec, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=7, spill_count=0, spilled_bytes=0] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=state_min@0 <= MA AND MA <= state_max@1, projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=474, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=4, predicate_evaluation_errors{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=0, pushdown_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=1.234ms, pushdown_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=0, row_groups_pruned{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=state_min@0 <= MA AND MA <= state_max@1, projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=632, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=3, predicate_evaluation_errors{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=0, pushdown_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=1.234ms, pushdown_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=3, row_groups_pruned{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], predicate=state_min@0 <= MA AND MA <= state_max@1, projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=591, bytes_scanned{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=628, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=5, predicate_evaluation_errors{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=0, predicate_evaluation_errors{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=0, pushdown_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=1.234ms, pushdown_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=1.234ms, pushdown_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=2, pushdown_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=3, row_groups_pruned{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=0, row_groups_pruned{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=state_min@0 <= MA AND MA <= state_max@1, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=474, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=4, page_index_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=1.234ms, page_index_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=0, predicate_evaluation_errors{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=0, pushdown_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=1.234ms, pushdown_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=0, row_groups_pruned{filename=1/1/1/1/00000000-0000-0000-0000-000000000000.parquet}=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=state_min@0 <= MA AND MA <= state_max@1, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=632, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=3, page_index_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=1.234ms, page_index_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=0, predicate_evaluation_errors{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=0, pushdown_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=1.234ms, pushdown_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=3, row_groups_pruned{filename=1/1/1/1/00000000-0000-0000-0000-000000000001.parquet}=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], predicate=state_min@0 <= MA AND MA <= state_max@1, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=591, bytes_scanned{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=628, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=5, page_index_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=1.234ms, page_index_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=1.234ms, page_index_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=0, page_index_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=0, predicate_evaluation_errors{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=0, predicate_evaluation_errors{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=0, pushdown_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=1.234ms, pushdown_eval_time{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=1.234ms, pushdown_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=2, pushdown_rows_filtered{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=3, row_groups_pruned{filename=1/1/1/1/00000000-0000-0000-0000-000000000002.parquet}=0, row_groups_pruned{filename=1/1/1/1/00000000-0000-0000-0000-000000000003.parquet}=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning=1.234ms] | | | | ---------- diff --git a/query_tests/cases/in/duplicates_parquet_many.expected b/query_tests/cases/in/duplicates_parquet_many.expected index 3de03d3c29..679b685697 100644 --- a/query_tests/cases/in/duplicates_parquet_many.expected +++ b/query_tests/cases/in/duplicates_parquet_many.expected @@ -23,16 +23,16 @@ | | DeduplicateExec: [tag@1 ASC,time@2 ASC] | | | SortPreservingMergeExec: [tag@1 ASC,time@2 ASC] | | | UnionExec | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000004.parquet], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000005.parquet], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000006.parquet], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000007.parquet], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000008.parquet], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000009.parquet], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000004.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000005.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000006.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000007.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000008.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000009.parquet], output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | | | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-00000000000a.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000b.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000c.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000d.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000e.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000f.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000010.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000011.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000012.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000013.parquet], projection=[f] | | | | +---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/query_tests/cases/in/pushdown.expected b/query_tests/cases/in/pushdown.expected index 8347c216aa..ae21077c1b 100644 --- a/query_tests/cases/in/pushdown.expected +++ b/query_tests/cases/in/pushdown.expected @@ -14,15 +14,15 @@ +-------+--------+--------------------------------+-----------+ -- SQL: EXPLAIN SELECT * from restaurant; -- Results After Normalizing UUIDs -+---------------+--------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | TableScan: restaurant projection=[count, system, time, town] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[count, system, time, town] | -| | | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | +| | TableScan: restaurant projection=[count, system, time, town] | +| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[town@3 ASC, time@2 ASC], projection=[count, system, time, town] | +| | | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: SELECT * from restaurant where count > 200; -- Results After Sorting +-------+--------+--------------------------------+-----------+ @@ -37,49 +37,49 @@ +-------+--------+--------------------------------+-----------+ -- SQL: EXPLAIN SELECT * from restaurant where count > 200; -- Results After Normalizing UUIDs -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: CAST(restaurant.count AS Int64) > Int64(200) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.count AS Int64) > Int64(200)] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: CAST(count@0 AS Int64) > 200 | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[count, system, time, town] | -| | | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | +| | Filter: CAST(restaurant.count AS Int64) > Int64(200) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.count AS Int64) > Int64(200)] | +| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | +| | CoalesceBatchesExec: target_batch_size=4096 | +| | FilterExec: CAST(count@0 AS Int64) > 200 | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[town@3 ASC, time@2 ASC], projection=[count, system, time, town] | +| | | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: EXPLAIN SELECT * from restaurant where count > 200.0; -- Results After Normalizing UUIDs -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: CAST(restaurant.count AS Float64) > Float64(200) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.count AS Float64) > Float64(200)] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: CAST(count@0 AS Float64) > 200 | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[count, system, time, town] | -| | | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | +| | Filter: CAST(restaurant.count AS Float64) > Float64(200) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.count AS Float64) > Float64(200)] | +| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | +| | CoalesceBatchesExec: target_batch_size=4096 | +| | FilterExec: CAST(count@0 AS Float64) > 200 | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[town@3 ASC, time@2 ASC], projection=[count, system, time, town] | +| | | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: EXPLAIN SELECT * from restaurant where system > 4.0; -- Results After Normalizing UUIDs -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: restaurant.system > Float64(4) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4)] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: system@1 > 4 | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system_max@0 > 4, projection=[count, system, time, town] | -| | | -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | +| | Filter: restaurant.system > Float64(4) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4)] | +| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | +| | CoalesceBatchesExec: target_batch_size=4096 | +| | FilterExec: system@1 > 4 | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system_max@0 > 4, output_ordering=[town@3 ASC, time@2 ASC], projection=[count, system, time, town] | +| | | ++---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: SELECT * from restaurant where count > 200 and town != 'tewsbury'; -- Results After Sorting +-------+--------+--------------------------------+-----------+ @@ -93,19 +93,19 @@ +-------+--------+--------------------------------+-----------+ -- SQL: EXPLAIN SELECT * from restaurant where count > 200 and town != 'tewsbury'; -- Results After Normalizing UUIDs -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: CAST(restaurant.count AS Int64) > Int64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.count AS Int64) > Int64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury"))] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: CAST(count@0 AS Int64) > 200 AND town@3 != tewsbury | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=town_min@0 != tewsbury OR tewsbury != town_max@1, projection=[count, system, time, town] | -| | | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | +| | Filter: CAST(restaurant.count AS Int64) > Int64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.count AS Int64) > Int64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury"))] | +| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | +| | CoalesceBatchesExec: target_batch_size=4096 | +| | FilterExec: CAST(count@0 AS Int64) > 200 AND town@3 != tewsbury | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=town_min@0 != tewsbury OR tewsbury != town_max@1, output_ordering=[town@3 ASC, time@2 ASC], projection=[count, system, time, town] | +| | | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: SELECT * from restaurant where count > 200 and town != 'tewsbury' and (system =5 or town = 'lawrence'); -- Results After Sorting +-------+--------+--------------------------------+-----------+ @@ -128,7 +128,7 @@ | | CoalesceBatchesExec: target_batch_size=4096 | | | FilterExec: CAST(count@0 AS Int64) > 200 AND town@3 != tewsbury AND system@1 = 5 OR town@3 = lawrence | | | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=town_min@0 != tewsbury OR tewsbury != town_max@1, projection=[count, system, time, town] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=town_min@0 != tewsbury OR tewsbury != town_max@1, output_ordering=[town@3 ASC, time@2 ASC], projection=[count, system, time, town] | | | | +---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: SELECT * from restaurant where count > 200 and town != 'tewsbury' and (system =5 or town = 'lawrence') and count < 40000; @@ -154,7 +154,7 @@ | | FilterExec: CAST(restaurant.count AS Int64)restaurant.count@0 > 200 AND town@4 != tewsbury AND system@2 = 5 OR town@4 = lawrence AND CAST(restaurant.count AS Int64)restaurant.count@0 < 40000 | | | ProjectionExec: expr=[CAST(count@0 AS Int64) as CAST(restaurant.count AS Int64)restaurant.count, count@0 as count, system@1 as system, time@2 as time, town@3 as town] | | | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=town_min@0 != tewsbury OR tewsbury != town_max@1, projection=[count, system, time, town] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=town_min@0 != tewsbury OR tewsbury != town_max@1, output_ordering=[town@3 ASC, time@2 ASC], projection=[count, system, time, town] | | | | +---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: SELECT * from restaurant where count > 200 and count < 40000; @@ -170,21 +170,21 @@ +-------+--------+--------------------------------+-----------+ -- SQL: EXPLAIN SELECT * from restaurant where count > 200 and count < 40000; -- Results After Normalizing UUIDs -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: CAST(restaurant.count AS Int64)restaurant.count > Int64(200) AND CAST(restaurant.count AS Int64)restaurant.count < Int64(40000) | -| | Projection: CAST(restaurant.count AS Int64) AS CAST(restaurant.count AS Int64)restaurant.count, restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.count AS Int64) > Int64(200), CAST(restaurant.count AS Int64) < Int64(40000)] | -| physical_plan | ProjectionExec: expr=[count@1 as count, system@2 as system, time@3 as time, town@4 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: CAST(restaurant.count AS Int64)restaurant.count@0 > 200 AND CAST(restaurant.count AS Int64)restaurant.count@0 < 40000 | -| | ProjectionExec: expr=[CAST(count@0 AS Int64) as CAST(restaurant.count AS Int64)restaurant.count, count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[count, system, time, town] | -| | | -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | +| | Filter: CAST(restaurant.count AS Int64)restaurant.count > Int64(200) AND CAST(restaurant.count AS Int64)restaurant.count < Int64(40000) | +| | Projection: CAST(restaurant.count AS Int64) AS CAST(restaurant.count AS Int64)restaurant.count, restaurant.count, restaurant.system, restaurant.time, restaurant.town | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.count AS Int64) > Int64(200), CAST(restaurant.count AS Int64) < Int64(40000)] | +| physical_plan | ProjectionExec: expr=[count@1 as count, system@2 as system, time@3 as time, town@4 as town] | +| | CoalesceBatchesExec: target_batch_size=4096 | +| | FilterExec: CAST(restaurant.count AS Int64)restaurant.count@0 > 200 AND CAST(restaurant.count AS Int64)restaurant.count@0 < 40000 | +| | ProjectionExec: expr=[CAST(count@0 AS Int64) as CAST(restaurant.count AS Int64)restaurant.count, count@0 as count, system@1 as system, time@2 as time, town@3 as town] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[town@3 ASC, time@2 ASC], projection=[count, system, time, town] | +| | | ++---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: SELECT * from restaurant where system > 4.0 and system < 7.0; -- Results After Sorting +-------+--------+--------------------------------+-----------+ @@ -199,19 +199,19 @@ +-------+--------+--------------------------------+-----------+ -- SQL: EXPLAIN SELECT * from restaurant where system > 4.0 and system < 7.0; -- Results After Normalizing UUIDs -+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: restaurant.system > Float64(4) AND restaurant.system < Float64(7) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4), restaurant.system < Float64(7)] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: system@1 > 4 AND system@1 < 7 | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system_max@0 > 4 AND system_min@1 < 7, projection=[count, system, time, town] | -| | | -+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | +| | Filter: restaurant.system > Float64(4) AND restaurant.system < Float64(7) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4), restaurant.system < Float64(7)] | +| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | +| | CoalesceBatchesExec: target_batch_size=4096 | +| | FilterExec: system@1 > 4 AND system@1 < 7 | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system_max@0 > 4 AND system_min@1 < 7, output_ordering=[town@3 ASC, time@2 ASC], projection=[count, system, time, town] | +| | | ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: SELECT * from restaurant where system > 5.0 and system < 7.0; -- Results After Sorting +-------+--------+--------------------------------+----------+ @@ -223,19 +223,19 @@ +-------+--------+--------------------------------+----------+ -- SQL: EXPLAIN SELECT * from restaurant where system > 5.0 and system < 7.0; -- Results After Normalizing UUIDs -+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: restaurant.system > Float64(5) AND restaurant.system < Float64(7) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.system < Float64(7)] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: system@1 > 5 AND system@1 < 7 | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system_max@0 > 5 AND system_min@1 < 7, projection=[count, system, time, town] | -| | | -+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | +| | Filter: restaurant.system > Float64(5) AND restaurant.system < Float64(7) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.system < Float64(7)] | +| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | +| | CoalesceBatchesExec: target_batch_size=4096 | +| | FilterExec: system@1 > 5 AND system@1 < 7 | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system_max@0 > 5 AND system_min@1 < 7, output_ordering=[town@3 ASC, time@2 ASC], projection=[count, system, time, town] | +| | | ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: SELECT * from restaurant where system > 5.0 and town != 'tewsbury' and 7.0 > system; -- Results After Sorting +-------+--------+--------------------------------+----------+ @@ -246,19 +246,19 @@ +-------+--------+--------------------------------+----------+ -- SQL: EXPLAIN SELECT * from restaurant where system > 5.0 and town != 'tewsbury' and 7.0 > system; -- Results After Normalizing UUIDs -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: restaurant.system > Float64(5) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > restaurant.system | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), Float64(7) > restaurant.system] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: system@1 > 5 AND town@3 != tewsbury AND 7 > system@1 | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7, projection=[count, system, time, town] | -| | | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | +| | Filter: restaurant.system > Float64(5) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > restaurant.system | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), Float64(7) > restaurant.system] | +| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | +| | CoalesceBatchesExec: target_batch_size=4096 | +| | FilterExec: system@1 > 5 AND town@3 != tewsbury AND 7 > system@1 | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7, output_ordering=[town@3 ASC, time@2 ASC], projection=[count, system, time, town] | +| | | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: SELECT * from restaurant where system > 5.0 and 'tewsbury' != town and system < 7.0 and (count = 632 or town = 'reading'); -- Results After Sorting +-------+--------+--------------------------------+---------+ @@ -278,7 +278,7 @@ | | CoalesceBatchesExec: target_batch_size=4096 | | | FilterExec: system@1 > 5 AND tewsbury != town@3 AND system@1 < 7 AND CAST(count@0 AS Int64) = 632 OR town@3 = reading | | | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7, projection=[count, system, time, town] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7, output_ordering=[town@3 ASC, time@2 ASC], projection=[count, system, time, town] | | | | +---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: SELECT * from restaurant where 5.0 < system and town != 'tewsbury' and system < 7.0 and (count = 632 or town = 'reading') and time > to_timestamp('1970-01-01T00:00:00.000000130+00:00'); diff --git a/query_tests/cases/in/several_chunks.expected b/query_tests/cases/in/several_chunks.expected index 48a28fac67..11adaf06f4 100644 --- a/query_tests/cases/in/several_chunks.expected +++ b/query_tests/cases/in/several_chunks.expected @@ -14,25 +14,25 @@ +---------+------------+-------+------+--------------------------------+ -- SQL: EXPLAIN SELECT * from h2o; -- Results After Normalizing UUIDs -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time | -| | TableScan: h2o projection=[city, other_temp, state, temp, time] | -| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] | -| | UnionExec | -| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[city, other_temp, state, temp, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], projection=[city, other_temp, state, temp, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], projection=[city, other_temp, state, temp, time] | -| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], projection=[city, other_temp, state, temp, time] | -| | | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time | +| | TableScan: h2o projection=[city, other_temp, state, temp, time] | +| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] | +| | UnionExec | +| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | | ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: select temp, other_temp, time from h2o; -- Results After Sorting +------+------------+--------------------------------+ @@ -48,50 +48,50 @@ +------+------------+--------------------------------+ -- SQL: EXPLAIN select temp, other_temp, time from h2o; -- Results After Normalizing UUIDs -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: h2o.temp, h2o.other_temp, h2o.time | -| | TableScan: h2o projection=[other_temp, temp, time] | -| physical_plan | ProjectionExec: expr=[temp@1 as temp, other_temp@0 as other_temp, time@2 as time] | -| | UnionExec | -| | ProjectionExec: expr=[other_temp@1 as other_temp, temp@3 as temp, time@4 as time] | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[city, other_temp, state, temp, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], projection=[city, other_temp, state, temp, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], projection=[city, other_temp, state, temp, time] | -| | ProjectionExec: expr=[other_temp@1 as other_temp, temp@3 as temp, time@4 as time] | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], projection=[other_temp, temp, time] | -| | | -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: h2o.temp, h2o.other_temp, h2o.time | +| | TableScan: h2o projection=[other_temp, temp, time] | +| physical_plan | ProjectionExec: expr=[temp@1 as temp, other_temp@0 as other_temp, time@2 as time] | +| | UnionExec | +| | ProjectionExec: expr=[other_temp@1 as other_temp, temp@3 as temp, time@4 as time] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | ProjectionExec: expr=[other_temp@1 as other_temp, temp@3 as temp, time@4 as time] | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet], output_ordering=[time@2 ASC], projection=[other_temp, temp, time] | +| | | ++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: EXPLAIN SELECT * from h2o where time >= to_timestamp('1970-01-01T00:00:00.000000250+00:00'); -- Results After Normalizing UUIDs -+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time | -| | Filter: h2o.time >= TimestampNanosecond(250, None) | -| | TableScan: h2o projection=[city, other_temp, state, temp, time], partial_filters=[h2o.time >= TimestampNanosecond(250, None)] | -| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] | -| | CoalesceBatchesExec: target_batch_size=4096 | -| | FilterExec: time@4 >= 250 | -| | RepartitionExec: partitioning=RoundRobinBatch(4) | -| | UnionExec | -| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=time_max@0 >= 250, projection=[city, other_temp, state, temp, time] | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=time_max@0 >= 250, projection=[city, other_temp, state, temp, time] | -| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], predicate=time_max@0 >= 250, projection=[city, other_temp, state, temp, time] | -| | | -+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time | +| | Filter: h2o.time >= TimestampNanosecond(250, None) | +| | TableScan: h2o projection=[city, other_temp, state, temp, time], partial_filters=[h2o.time >= TimestampNanosecond(250, None)] | +| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] | +| | CoalesceBatchesExec: target_batch_size=4096 | +| | FilterExec: time@4 >= 250 | +| | RepartitionExec: partitioning=RoundRobinBatch(4) | +| | UnionExec | +| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], predicate=time_max@0 >= 250, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet], predicate=time_max@0 >= 250, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], predicate=time_max@0 >= 250, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | | ++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/query_tests/cases/in/two_chunks.expected b/query_tests/cases/in/two_chunks.expected index 7ccfd44fd7..661ffcefa0 100644 --- a/query_tests/cases/in/two_chunks.expected +++ b/query_tests/cases/in/two_chunks.expected @@ -10,20 +10,20 @@ +--------+------------+-------+------+--------------------------------+ -- SQL: EXPLAIN SELECT * from h2o; -- Results After Normalizing UUIDs -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time | -| | TableScan: h2o projection=[city, other_temp, state, temp, time] | -| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] | -| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[city, other_temp, state, temp, time] | -| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 | -| | | -+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time | +| | TableScan: h2o projection=[city, other_temp, state, temp, time] | +| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] | +| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 | +| | | ++---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -- SQL: select temp, other_temp, time from h2o; +------+------------+--------------------------------+ | temp | other_temp | time | @@ -34,18 +34,18 @@ +------+------------+--------------------------------+ -- SQL: EXPLAIN select temp, other_temp, time from h2o; -- Results After Normalizing UUIDs -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ -| plan_type | plan | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ -| logical_plan | Projection: h2o.temp, h2o.other_temp, h2o.time | -| | TableScan: h2o projection=[other_temp, temp, time] | -| physical_plan | ProjectionExec: expr=[temp@1 as temp, other_temp@0 as other_temp, time@2 as time] | -| | ProjectionExec: expr=[other_temp@1 as other_temp, temp@3 as temp, time@4 as time] | -| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], projection=[city, other_temp, state, temp, time] | -| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 | -| | | -+---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| plan_type | plan | ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| logical_plan | Projection: h2o.temp, h2o.other_temp, h2o.time | +| | TableScan: h2o projection=[other_temp, temp, time] | +| physical_plan | ProjectionExec: expr=[temp@1 as temp, other_temp@0 as other_temp, time@2 as time] | +| | ProjectionExec: expr=[other_temp@1 as other_temp, temp@3 as temp, time@4 as time] | +| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions=[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet], output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | SortExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 | +| | | ++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 9e30ea0f9c..884c9131b9 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -26,7 +26,7 @@ bytes = { version = "1", features = ["std"] } chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "iana-time-zone", "serde", "std", "winapi"] } crossbeam-utils = { version = "0.8", features = ["std"] } crypto-common = { version = "0.1", default-features = false, features = ["std"] } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "dd081d64a2fba8574e63bdd0662c14aec5852b48", features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "d2814c960168b45c4a0f5d7bbb72d9f412cb08bd", features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] } digest = { version = "0.10", features = ["alloc", "block-buffer", "core-api", "mac", "std", "subtle"] } either = { version = "1", features = ["use_std"] } fixedbitset = { version = "0.4", features = ["std"] }
3a31f41c2c9b49ca6cd46dd32dff0927d4c3ac71
Marco Neumann
2023-03-16 09:45:14
use arrow schema in `chunks_to_physical_nodes` (#7217)
We don't need a validated IOx schema in this method. This will simplify some work on #6098.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
refactor: use arrow schema in `chunks_to_physical_nodes` (#7217) We don't need a validated IOx schema in this method. This will simplify some work on #6098. Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/iox_query/src/physical_optimizer/chunk_extraction.rs b/iox_query/src/physical_optimizer/chunk_extraction.rs index 071e3ae040..f5f615bda2 100644 --- a/iox_query/src/physical_optimizer/chunk_extraction.rs +++ b/iox_query/src/physical_optimizer/chunk_extraction.rs @@ -215,7 +215,7 @@ mod tests { #[test] fn test_stop_at_other_node_types() { let chunk1 = chunk(1); - let schema = chunk1.schema().clone(); + let schema = chunk1.schema().as_arrow(); let plan = chunks_to_physical_nodes( &schema, None, @@ -259,7 +259,13 @@ mod tests { #[track_caller] fn assert_roundtrip(schema: Schema, chunks: Vec<Arc<dyn QueryChunk>>) { - let plan = chunks_to_physical_nodes(&schema, None, chunks.clone(), Predicate::default(), 2); + let plan = chunks_to_physical_nodes( + &schema.as_arrow(), + None, + chunks.clone(), + Predicate::default(), + 2, + ); let (schema2, chunks2) = extract_chunks(plan.as_ref()).expect("data found"); assert_eq!(schema, schema2); assert_eq!(chunk_ids(&chunks), chunk_ids(&chunks2)); diff --git a/iox_query/src/physical_optimizer/combine_chunks.rs b/iox_query/src/physical_optimizer/combine_chunks.rs index ac898e4c0f..78cd14602a 100644 --- a/iox_query/src/physical_optimizer/combine_chunks.rs +++ b/iox_query/src/physical_optimizer/combine_chunks.rs @@ -36,7 +36,7 @@ impl PhysicalOptimizerRule for CombineChunks { plan.transform_up(&|plan| { if let Some((iox_schema, chunks)) = extract_chunks(plan.as_ref()) { return Ok(Some(chunks_to_physical_nodes( - &iox_schema, + &iox_schema.as_arrow(), None, chunks, Predicate::new(), @@ -72,7 +72,7 @@ mod tests { let chunk3 = TestChunk::new("table").with_id(3); let chunk4 = TestChunk::new("table").with_id(4).with_dummy_parquet_file(); let chunk5 = TestChunk::new("table").with_id(5).with_dummy_parquet_file(); - let schema = chunk1.schema().clone(); + let schema = chunk1.schema().as_arrow(); let plan = Arc::new(UnionExec::new(vec![ chunks_to_physical_nodes( &schema, diff --git a/iox_query/src/physical_optimizer/dedup/dedup_null_columns.rs b/iox_query/src/physical_optimizer/dedup/dedup_null_columns.rs index 8efcf43e5b..f9790cb22c 100644 --- a/iox_query/src/physical_optimizer/dedup/dedup_null_columns.rs +++ b/iox_query/src/physical_optimizer/dedup/dedup_null_columns.rs @@ -64,15 +64,16 @@ impl PhysicalOptimizerRule for DedupNullColumns { } let sort_key = sort_key_builder.build(); + let arrow_schema = schema.as_arrow(); let child = chunks_to_physical_nodes( - &schema, + &arrow_schema, (!sort_key.is_empty()).then_some(&sort_key), chunks, Predicate::new(), config.execution.target_partitions, ); - let sort_exprs = arrow_sort_key_exprs(&sort_key, schema.as_arrow().as_ref()); + let sort_exprs = arrow_sort_key_exprs(&sort_key, &arrow_schema); return Ok(Some(Arc::new(DeduplicateExec::new(child, sort_exprs)))); } diff --git a/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs b/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs index 57a5d2d840..650f9dc141 100644 --- a/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs +++ b/iox_query/src/physical_optimizer/dedup/dedup_sort_order.rs @@ -124,15 +124,16 @@ impl PhysicalOptimizerRule for DedupSortOrder { } let quorum_sort_key = quorum_sort_key_builder.build(); + let arrow_schema = schema.as_arrow(); let child = chunks_to_physical_nodes( - &schema, + &arrow_schema, (!quorum_sort_key.is_empty()).then_some(&quorum_sort_key), chunks, Predicate::new(), config.execution.target_partitions, ); - let sort_exprs = arrow_sort_key_exprs(&quorum_sort_key, schema.as_arrow().as_ref()); + let sort_exprs = arrow_sort_key_exprs(&quorum_sort_key, &arrow_schema); return Ok(Some(Arc::new(DeduplicateExec::new(child, sort_exprs)))); } diff --git a/iox_query/src/physical_optimizer/dedup/partition_split.rs b/iox_query/src/physical_optimizer/dedup/partition_split.rs index 0d21fc5113..066f97febb 100644 --- a/iox_query/src/physical_optimizer/dedup/partition_split.rs +++ b/iox_query/src/physical_optimizer/dedup/partition_split.rs @@ -76,13 +76,14 @@ impl PhysicalOptimizerRule for PartitionSplit { let mut chunks_by_partition = chunks_by_partition.into_iter().collect::<Vec<_>>(); chunks_by_partition.sort_by_key(|(p_id, _chunks)| *p_id); + let arrow_schema = schema.as_arrow(); let out = UnionExec::new( chunks_by_partition .into_iter() .map(|(_p_id, chunks)| { Arc::new(DeduplicateExec::new( chunks_to_physical_nodes( - &schema, + &arrow_schema, None, chunks, Predicate::new(), diff --git a/iox_query/src/physical_optimizer/dedup/remove_dedup.rs b/iox_query/src/physical_optimizer/dedup/remove_dedup.rs index edda8ac6b9..1aae525dcf 100644 --- a/iox_query/src/physical_optimizer/dedup/remove_dedup.rs +++ b/iox_query/src/physical_optimizer/dedup/remove_dedup.rs @@ -35,8 +35,9 @@ impl PhysicalOptimizerRule for RemoveDedup { }; if (chunks.len() < 2) && chunks.iter().all(|c| !c.may_contain_pk_duplicates()) { + let arrow_schema = schema.as_arrow(); return Ok(Some(chunks_to_physical_nodes( - &schema, + &arrow_schema, None, chunks, Predicate::new(), diff --git a/iox_query/src/physical_optimizer/dedup/test_util.rs b/iox_query/src/physical_optimizer/dedup/test_util.rs index 19bb3e508a..25fff9131a 100644 --- a/iox_query/src/physical_optimizer/dedup/test_util.rs +++ b/iox_query/src/physical_optimizer/dedup/test_util.rs @@ -16,7 +16,7 @@ pub fn dedup_plan(schema: Schema, chunks: Vec<TestChunk>) -> Arc<dyn ExecutionPl .into_iter() .map(|c| Arc::new(c) as _) .collect::<Vec<Arc<dyn QueryChunk>>>(); - let plan = chunks_to_physical_nodes(&schema, None, chunks, Predicate::new(), 2); + let plan = chunks_to_physical_nodes(&schema.as_arrow(), None, chunks, Predicate::new(), 2); let sort_key = schema::sort::SortKey::from_columns(schema.primary_key()); let sort_exprs = arrow_sort_key_exprs(&sort_key, &schema.as_arrow()); diff --git a/iox_query/src/physical_optimizer/dedup/time_split.rs b/iox_query/src/physical_optimizer/dedup/time_split.rs index bed6b825cd..a54715f34e 100644 --- a/iox_query/src/physical_optimizer/dedup/time_split.rs +++ b/iox_query/src/physical_optimizer/dedup/time_split.rs @@ -63,13 +63,14 @@ impl PhysicalOptimizerRule for TimeSplit { return Ok(None); } + let arrow_schema = schema.as_arrow(); let out = UnionExec::new( groups .into_iter() .map(|chunks| { Arc::new(DeduplicateExec::new( chunks_to_physical_nodes( - &schema, + &arrow_schema, None, chunks, Predicate::new(), diff --git a/iox_query/src/provider.rs b/iox_query/src/provider.rs index 0683dea5fd..e75e619dab 100644 --- a/iox_query/src/provider.rs +++ b/iox_query/src/provider.rs @@ -1089,7 +1089,7 @@ impl Deduplicater { // Create the bottom node RecordBatchesExec for this chunk let mut input = chunks_to_physical_nodes( - &input_schema, + &input_schema.as_arrow(), output_sort_key, vec![Arc::clone(&chunk)], predicate, @@ -1267,7 +1267,7 @@ impl Deduplicater { debug!("Build one scan RecordBatchesExec for all non duplicated chunks even if empty"); plans.push(chunks_to_physical_nodes( - output_schema, + &output_schema.as_arrow(), output_sort_key, chunks.into_no_duplicates(deduplication), predicate, @@ -1452,7 +1452,7 @@ mod test { // IOx scan operator let input = chunks_to_physical_nodes( - chunk.schema(), + &chunk.schema().as_arrow(), None, vec![Arc::clone(&chunk)], Predicate::default(), @@ -1540,7 +1540,7 @@ mod test { // IOx scan operator let input = chunks_to_physical_nodes( - chunk.schema(), + &chunk.schema().as_arrow(), None, vec![Arc::clone(&chunk)], Predicate::default(), diff --git a/iox_query/src/provider/physical.rs b/iox_query/src/provider/physical.rs index 1a7bf3a2e2..d9e73bb038 100644 --- a/iox_query/src/provider/physical.rs +++ b/iox_query/src/provider/physical.rs @@ -4,6 +4,7 @@ use crate::{ provider::record_batch_exec::RecordBatchesExec, util::arrow_sort_key_exprs, QueryChunk, QueryChunkData, }; +use arrow::datatypes::SchemaRef; use datafusion::{ datasource::{listing::PartitionedFile, object_store::ObjectStoreUrl}, physical_expr::execution_props::ExecutionProps, @@ -135,14 +136,14 @@ fn combine_sort_key( /// pushdown ([`RecordBatchesExec`] has NO builtin filter function). Delete predicates are NOT applied at all. The /// caller is responsible for wrapping the output node into appropriate filter nodes. pub fn chunks_to_physical_nodes( - iox_schema: &Schema, + schema: &SchemaRef, output_sort_key: Option<&SortKey>, chunks: Vec<Arc<dyn QueryChunk>>, predicate: Predicate, target_partitions: usize, ) -> Arc<dyn ExecutionPlan> { if chunks.is_empty() { - return Arc::new(EmptyExec::new(false, iox_schema.as_arrow())); + return Arc::new(EmptyExec::new(false, Arc::clone(schema))); } let mut record_batch_chunks: Vec<Arc<dyn QueryChunk>> = vec![]; @@ -177,7 +178,7 @@ pub fn chunks_to_physical_nodes( if !record_batch_chunks.is_empty() { output_nodes.push(Arc::new(RecordBatchesExec::new( record_batch_chunks, - iox_schema.as_arrow(), + Arc::clone(schema), ))); } let mut parquet_chunks: Vec<_> = parquet_chunks.into_iter().collect(); @@ -202,14 +203,12 @@ pub fn chunks_to_physical_nodes( ); // Tell datafusion about the sort key, if any - let file_schema = iox_schema.as_arrow(); - let output_ordering = - sort_key.map(|sort_key| arrow_sort_key_exprs(&sort_key, &file_schema)); + let output_ordering = sort_key.map(|sort_key| arrow_sort_key_exprs(&sort_key, schema)); let props = ExecutionProps::new(); let filter_expr = predicate.filter_expr() .and_then(|filter_expr| { - match create_physical_expr_from_schema(&props, &filter_expr, &file_schema) { + match create_physical_expr_from_schema(&props, &filter_expr, schema) { Ok(f) => Some(f), Err(e) => { warn!(%e, ?filter_expr, "Error creating physical filter expression, can not push down"); @@ -220,7 +219,7 @@ pub fn chunks_to_physical_nodes( let base_config = FileScanConfig { object_store_url, - file_schema, + file_schema: Arc::clone(schema), file_groups, statistics: Statistics::default(), projection: None, @@ -361,7 +360,7 @@ mod tests { #[test] fn test_chunks_to_physical_nodes_empty() { - let schema = TestChunk::new("table").schema().clone(); + let schema = TestChunk::new("table").schema().as_arrow(); let plan = chunks_to_physical_nodes(&schema, None, vec![], Predicate::new(), 2); insta::assert_yaml_snapshot!( format_execution_plan(&plan), @@ -375,7 +374,7 @@ mod tests { #[test] fn test_chunks_to_physical_nodes_recordbatch() { let chunk = TestChunk::new("table"); - let schema = chunk.schema().clone(); + let schema = chunk.schema().as_arrow(); let plan = chunks_to_physical_nodes(&schema, None, vec![Arc::new(chunk)], Predicate::new(), 2); insta::assert_yaml_snapshot!( @@ -391,7 +390,7 @@ mod tests { #[test] fn test_chunks_to_physical_nodes_parquet_one_file() { let chunk = TestChunk::new("table").with_dummy_parquet_file(); - let schema = chunk.schema().clone(); + let schema = chunk.schema().as_arrow(); let plan = chunks_to_physical_nodes(&schema, None, vec![Arc::new(chunk)], Predicate::new(), 2); insta::assert_yaml_snapshot!( @@ -409,7 +408,7 @@ mod tests { let chunk1 = TestChunk::new("table").with_id(0).with_dummy_parquet_file(); let chunk2 = TestChunk::new("table").with_id(1).with_dummy_parquet_file(); let chunk3 = TestChunk::new("table").with_id(2).with_dummy_parquet_file(); - let schema = chunk1.schema().clone(); + let schema = chunk1.schema().as_arrow(); let plan = chunks_to_physical_nodes( &schema, None, @@ -435,7 +434,7 @@ mod tests { let chunk2 = TestChunk::new("table") .with_id(1) .with_dummy_parquet_file_and_store("iox2://"); - let schema = chunk1.schema().clone(); + let schema = chunk1.schema().as_arrow(); let plan = chunks_to_physical_nodes( &schema, None, @@ -458,7 +457,7 @@ mod tests { fn test_chunks_to_physical_nodes_mixed() { let chunk1 = TestChunk::new("table").with_dummy_parquet_file(); let chunk2 = TestChunk::new("table"); - let schema = chunk1.schema().clone(); + let schema = chunk1.schema().as_arrow(); let plan = chunks_to_physical_nodes( &schema, None,
e5a9e1534a4e52cae86a3863a0add868a0a8060b
Dom Dwyer
2023-07-03 15:50:29
assert 1 file persisted
There should be a single file persisted during graceful shutdown.
null
test: assert 1 file persisted There should be a single file persisted during graceful shutdown.
diff --git a/influxdb_iox/tests/end_to_end_cases/querier.rs b/influxdb_iox/tests/end_to_end_cases/querier.rs index 4cf54fa3c3..d27fe361bd 100644 --- a/influxdb_iox/tests/end_to_end_cases/querier.rs +++ b/influxdb_iox/tests/end_to_end_cases/querier.rs @@ -325,6 +325,7 @@ async fn query_after_shutdown_sees_new_files() { Step::WriteLineProtocol("bananas,tag1=A,tag2=B val=42i 123456".to_string()), Step::AssertNumParquetFiles { expected: 0 }, // test invariant Step::GracefulStopIngesters, + Step::AssertNumParquetFiles { expected: 1 }, Step::Query { sql: "select * from bananas".to_string(), expected: vec![
3779bda65a6a936b98262021259f521707206b19
Marco Neumann
2023-08-10 18:03:36
remove dead code in `predicate` (#8469)
The pushdown check is a leftover from the old imperative planning code. This is now handled by `iox_query::physical_optimizer::predicate_pushdown`.
null
refactor: remove dead code in `predicate` (#8469) The pushdown check is a leftover from the old imperative planning code. This is now handled by `iox_query::physical_optimizer::predicate_pushdown`.
diff --git a/predicate/src/lib.rs b/predicate/src/lib.rs index d5784ce5bb..78779384bf 100644 --- a/predicate/src/lib.rs +++ b/predicate/src/lib.rs @@ -21,21 +21,16 @@ pub mod rpc_predicate; use data_types::TimestampRange; use datafusion::{ - common::tree_node::{TreeNode, TreeNodeVisitor, VisitRecursion}, + common::tree_node::{TreeNodeVisitor, VisitRecursion}, error::DataFusionError, - logical_expr::{binary_expr, utils::expr_to_columns, BinaryExpr}, - optimizer::utils::split_conjunction, + logical_expr::{binary_expr, BinaryExpr}, prelude::{col, lit_timestamp_nano, Expr}, }; use datafusion_util::{make_range_expr, AsExpr}; use observability_deps::tracing::debug; use rpc_predicate::VALUE_COLUMN_NAME; use schema::TIME_COLUMN_NAME; -use std::{ - collections::{BTreeSet, HashSet}, - fmt, - ops::Not, -}; +use std::{collections::BTreeSet, fmt, ops::Not}; /// This `Predicate` represents the empty predicate (aka that evaluates to true for all rows). pub const EMPTY_PREDICATE: Predicate = Predicate { @@ -362,55 +357,6 @@ impl Predicate { self.exprs.extend(filters.into_iter()); self } - - /// Remove any clauses of this predicate that can not be run before deduplication. - /// - /// See <https://github.com/influxdata/influxdb_iox/issues/6066> for more details. - /// - /// Only expressions that are row-based and refer to primary key columns (and constants) - /// can be evaluated prior to deduplication. - /// - /// If a predicate can filter out some but not all of the rows with - /// the same primary key, it may filter out the row that should have been updated - /// allowing the original through, producing incorrect results. - /// - /// Any predicate that operates solely on primary key columns will either pass or filter - /// all rows with that primary key and thus is safe to push through. - pub fn push_through_dedup(self, schema: &schema::Schema) -> Self { - let pk: HashSet<_> = schema.primary_key().into_iter().collect(); - - let exprs = self - .exprs - .iter() - .flat_map(split_conjunction) - .filter(|expr| { - let mut columns = HashSet::default(); - if expr_to_columns(expr, &mut columns).is_err() { - // bail out, do NOT include this weird expression - return false; - } - - // check if all columns are part of the primary key - if !columns.into_iter().all(|c| pk.contains(c.name.as_str())) { - return false; - } - - let mut visitor = RowBasedVisitor::default(); - expr.visit(&mut visitor).expect("never fails"); - - visitor.row_based - }) - .cloned() - .collect(); - - Self { - // can always push time range through de-dup because it is a primary keys set operation - range: self.range, - exprs, - field_columns: None, - value_expr: vec![], - } - } } // Wrapper around `Expr::BinaryExpr` where left input is known to be @@ -535,10 +481,8 @@ impl TreeNodeVisitor for RowBasedVisitor { #[cfg(test)] mod tests { use super::*; - use arrow::datatypes::DataType as ArrowDataType; use data_types::{MAX_NANO_TIME, MIN_NANO_TIME}; - use datafusion::prelude::{col, cube, lit}; - use schema::builder::SchemaBuilder; + use datafusion::prelude::{col, lit}; #[test] fn test_default_predicate_is_empty() { @@ -647,120 +591,4 @@ mod tests { // rewrite assert_eq!(p.with_clear_timestamp_if_max_range(), expected); } - - #[test] - fn test_push_through_dedup() { - let schema = SchemaBuilder::default() - .tag("tag1") - .tag("tag2") - .field("field1", ArrowDataType::Float64) - .unwrap() - .field("field2", ArrowDataType::Float64) - .unwrap() - .timestamp() - .build() - .unwrap(); - - // no-op predicate - assert_eq!( - Predicate { - field_columns: None, - range: None, - exprs: vec![], - value_expr: vec![], - } - .push_through_dedup(&schema), - Predicate { - field_columns: None, - range: None, - exprs: vec![], - value_expr: vec![], - }, - ); - - // simple case - assert_eq!( - Predicate { - field_columns: Some(BTreeSet::from([ - String::from("tag1"), - String::from("field1"), - String::from("time"), - ])), - range: Some(TimestampRange::new(42, 1337)), - exprs: vec![ - col("tag1").eq(lit("foo")), - col("field1").eq(lit(1.0)), // filtered out - col("time").eq(lit(1)), - ], - value_expr: vec![ValueExpr::try_from(col("_value").eq(lit(1.0))).unwrap()], - } - .push_through_dedup(&schema), - Predicate { - field_columns: None, - range: Some(TimestampRange::new(42, 1337)), - exprs: vec![col("tag1").eq(lit("foo")), col("time").eq(lit(1)),], - value_expr: vec![], - }, - ); - - // disassemble AND - assert_eq!( - Predicate { - field_columns: None, - range: None, - exprs: vec![col("tag1") - .eq(lit("foo")) - .and(col("field1").eq(lit(1.0))) - .and(col("time").eq(lit(1))),], - value_expr: vec![], - } - .push_through_dedup(&schema), - Predicate { - field_columns: None, - range: None, - exprs: vec![col("tag1").eq(lit("foo")), col("time").eq(lit(1)),], - value_expr: vec![], - }, - ); - - // filter no-row operations - assert_eq!( - Predicate { - field_columns: None, - range: None, - exprs: vec![ - col("tag1").eq(lit("foo")), - cube(vec![col("time").eq(lit(1))]), - ], - value_expr: vec![], - } - .push_through_dedup(&schema), - Predicate { - field_columns: None, - range: None, - exprs: vec![col("tag1").eq(lit("foo"))], - value_expr: vec![], - }, - ); - - // do NOT disassemble OR - assert_eq!( - Predicate { - field_columns: None, - range: None, - exprs: vec![col("tag1") - .eq(lit("foo")) - .or(col("field1").eq(lit(1.0))) - .or(col("time").eq(lit(1))),], - value_expr: vec![], - } - .push_through_dedup(&schema), - Predicate { - field_columns: None, - range: None, - exprs: vec![], - value_expr: vec![], - }, - ); - } }
8f78fe7b083683cc6cc88060bd037d6852950f7b
Stuart Carnie
2023-04-18 16:47:52
rewrite `DISTINCT <ident>` to `Call` expression
This simplifies downstream processing of the AST if DISTINCT is consistent.
null
chore: rewrite `DISTINCT <ident>` to `Call` expression This simplifies downstream processing of the AST if DISTINCT is consistent.
diff --git a/iox_query_influxql/src/plan/rewriter.rs b/iox_query_influxql/src/plan/rewriter.rs index 098ebf8668..35d29a30ad 100644 --- a/iox_query_influxql/src/plan/rewriter.rs +++ b/iox_query_influxql/src/plan/rewriter.rs @@ -194,6 +194,24 @@ fn rewrite_field_list(s: &dyn SchemaProvider, stmt: &mut SelectStatement) -> Res } } + // Rewrite all `DISTINCT <identifier>` expressions to `DISTINCT(<var ref>)` + if let ControlFlow::Break(e) = stmt.fields.iter_mut().try_for_each(|f| { + walk_expr_mut::<DataFusionError>(&mut f.expr, &mut |e| { + if let Expr::Distinct(ident) = e { + *e = Expr::Call(Call { + name: "distinct".to_owned(), + args: vec![Expr::VarRef(VarRef { + name: ident.take().into(), + data_type: None, + })], + }); + } + ControlFlow::Continue(()) + }) + }) { + return Err(e); + } + // Attempt to rewrite all variable references in the fields with their types, if one // hasn't been specified. if let ControlFlow::Break(e) = stmt.fields.iter_mut().try_for_each(|f| { @@ -1711,6 +1729,23 @@ mod test { "SELECT -1 * bytes_free::integer AS bytes_free FROM disk" ); + // DISTINCT clause + + // COUNT(DISTINCT) + let stmt = parse_select("SELECT COUNT(DISTINCT bytes_free) FROM disk"); + let stmt = rewrite_statement(&namespace, &stmt).unwrap(); + assert_eq!( + stmt.to_string(), + "SELECT count(distinct(bytes_free::integer)) AS count FROM disk" + ); + + let stmt = parse_select("SELECT DISTINCT bytes_free FROM disk"); + let stmt = rewrite_statement(&namespace, &stmt).unwrap(); + assert_eq!( + stmt.to_string(), + "SELECT distinct(bytes_free::integer) AS \"distinct\" FROM disk" + ); + // Call expressions let stmt = parse_select("SELECT COUNT(field_i64) FROM temp_01");
20e9c91866dcf6ac0a6bd201c9bbab58345abe05
Andrew Lamb
2023-04-12 12:07:19
Use workspace dependencies for `tonic`, `tonic-build`, etc (#7515)
* refactor: Use workspace dependencies for `tonic`, `tonic-build`, etc * chore: Run cargo hakari tasks ---------
Co-authored-by: CircleCI[bot] <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
refactor: Use workspace dependencies for `tonic`, `tonic-build`, etc (#7515) * refactor: Use workspace dependencies for `tonic`, `tonic-build`, etc * chore: Run cargo hakari tasks --------- Co-authored-by: CircleCI[bot] <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/Cargo.toml b/Cargo.toml index 4cbd657c4a..a475f047fa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -121,6 +121,12 @@ datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="b878 datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="b87871fdd1f4ce64201eb1f7c79a0547627f37e9" } hashbrown = { version = "0.13.2" } parquet = { version = "36.0.0" } +tonic = { version = "0.8.3", features = ["tls", "tls-webpki-roots"] } +tonic-build = { version = "0.8" } +tonic-health = { version = "0.8.0" } +tonic-reflection = { version = "0.6.0" } + + # This profile optimizes for runtime performance and small binary size at the expense of longer # build times. It's most suitable for final release builds. diff --git a/authz/Cargo.toml b/authz/Cargo.toml index 5f82c57010..8bc75f4b98 100644 --- a/authz/Cargo.toml +++ b/authz/Cargo.toml @@ -16,5 +16,4 @@ workspace-hack = { version = "0.1", path = "../workspace-hack" } # crates.io dependencies in alphabetical order. async-trait = "0.1" snafu = "0.7" -tonic = "0.8" - +tonic = { workspace = true } diff --git a/client_util/Cargo.toml b/client_util/Cargo.toml index 0da32caa9a..21a30b5c0a 100644 --- a/client_util/Cargo.toml +++ b/client_util/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true http = "0.2.9" reqwest = { version = "0.11", default-features = false, features = ["stream", "rustls-tls"] } thiserror = "1.0.40" -tonic = { version = "0.8", features = ["tls", "tls-webpki-roots"] } +tonic = { workspace = true } tower = "0.4" workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/flightsql/Cargo.toml b/flightsql/Cargo.toml index 3f70bfd17c..ff28eb09c2 100644 --- a/flightsql/Cargo.toml +++ b/flightsql/Cargo.toml @@ -20,5 +20,5 @@ snafu = "0.7" once_cell = { version = "1", default-features = false } prost = "0.11" tokio = { version = "1.27", features = ["macros", "net", "parking_lot", "rt-multi-thread", "signal", "sync", "time"] } -tonic = "0.8" +tonic = { workspace = true } workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/generated_types/Cargo.toml b/generated_types/Cargo.toml index ad4655d9c9..da22a1201d 100644 --- a/generated_types/Cargo.toml +++ b/generated_types/Cargo.toml @@ -19,11 +19,11 @@ prost = "0.11" query_functions = { path = "../query_functions" } serde = { version = "1.0", features = ["derive"] } snafu = "0.7" -tonic = "0.8" +tonic = { workspace = true } workspace-hack = { version = "0.1", path = "../workspace-hack" } [build-dependencies] # In alphabetical order -tonic-build = "0.8" +tonic-build = { workspace = true } prost-build = "0.11" pbjson-build = "0.5" diff --git a/grpc-binary-logger-proto/Cargo.toml b/grpc-binary-logger-proto/Cargo.toml index f3a7aebf06..8d82c3a591 100644 --- a/grpc-binary-logger-proto/Cargo.toml +++ b/grpc-binary-logger-proto/Cargo.toml @@ -8,9 +8,9 @@ license.workspace = true [dependencies] prost = "0.11" prost-types = { version = "0.11.7", features = ["std"] } -tonic = "0.8" +tonic = { workspace = true } workspace-hack = { version = "0.1", path = "../workspace-hack" } [build-dependencies] prost-build = "0.11" -tonic-build = "0.8" +tonic-build = { workspace = true } diff --git a/grpc-binary-logger-test-proto/Cargo.toml b/grpc-binary-logger-test-proto/Cargo.toml index 20eae0aee4..f9e26b87d3 100644 --- a/grpc-binary-logger-test-proto/Cargo.toml +++ b/grpc-binary-logger-test-proto/Cargo.toml @@ -8,9 +8,9 @@ license.workspace = true [dependencies] prost = "0.11" prost-types = { version = "0.11.7", features = ["std"] } -tonic = "0.8" +tonic = { workspace = true } workspace-hack = { version = "0.1", path = "../workspace-hack" } [build-dependencies] prost-build = "0.11" -tonic-build = "0.8" +tonic-build = { workspace = true } diff --git a/grpc-binary-logger/Cargo.toml b/grpc-binary-logger/Cargo.toml index 8ce5f94389..2c88c34d97 100644 --- a/grpc-binary-logger/Cargo.toml +++ b/grpc-binary-logger/Cargo.toml @@ -16,7 +16,7 @@ hyper = "0.14" pin-project = "1.0" prost = "0.11" tokio = {version = "1", features = [ "rt" ]} -tonic = "0.8" +tonic = { workspace = true } tower = "0.4" grpc-binary-logger-proto = { path = "../grpc-binary-logger-proto" } workspace-hack = { version = "0.1", path = "../workspace-hack" } @@ -28,4 +28,4 @@ assert_matches = "1" [build-dependencies] prost-build = "0.11" -tonic-build = "0.8" +tonic-build = { workspace = true } diff --git a/import/Cargo.toml b/import/Cargo.toml index 598a05d014..17750fc752 100644 --- a/import/Cargo.toml +++ b/import/Cargo.toml @@ -18,7 +18,7 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0.95" thiserror = "1.0.40" tokio = { version = "1.27" } -tonic = { version = "0.8" } +tonic = { workspace = true } workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] diff --git a/influxdb_iox/Cargo.toml b/influxdb_iox/Cargo.toml index ebd29b00ad..6f46dc4434 100644 --- a/influxdb_iox/Cargo.toml +++ b/influxdb_iox/Cargo.toml @@ -71,7 +71,7 @@ tikv-jemalloc-ctl = { version = "0.5.0", optional = true } tokio = { version = "1.27", features = ["macros", "net", "parking_lot", "rt-multi-thread", "signal", "sync", "time", "io-std"] } tokio-stream = { version = "0.1", features = ["net"] } tokio-util = { version = "0.7.7", features = ["compat"] } -tonic = "0.8" +tonic = { workspace = true } uuid = { version = "1", features = ["v4"] } # jemalloc-sys with unprefixed_malloc_on_supported_platforms feature and heappy are mutually exclusive tikv-jemalloc-sys = { version = "0.5.3", optional = true, features = ["unprefixed_malloc_on_supported_platforms"] } diff --git a/influxdb_iox_client/Cargo.toml b/influxdb_iox_client/Cargo.toml index 6cc442a268..fc3b82860d 100644 --- a/influxdb_iox_client/Cargo.toml +++ b/influxdb_iox_client/Cargo.toml @@ -28,7 +28,7 @@ serde_json = "1.0.95" tokio = { version = "1.27", features = ["macros", "parking_lot", "rt-multi-thread"] } tokio-stream = "0.1.12" thiserror = "1.0.40" -tonic = { version = "0.8" } +tonic = { workspace = true } [dev-dependencies] insta = { version = "1" } diff --git a/influxdb_storage_client/Cargo.toml b/influxdb_storage_client/Cargo.toml index 2a70a5af54..28a6c958b2 100644 --- a/influxdb_storage_client/Cargo.toml +++ b/influxdb_storage_client/Cargo.toml @@ -9,7 +9,7 @@ license.workspace = true client_util = { path = "../client_util" } generated_types = { path = "../generated_types", default-features=false, features=["data_types"] } prost = "0.11" -tonic = { version = "0.8" } +tonic = { workspace = true } futures-util = { version = "0.3" } observability_deps = { path = "../observability_deps"} workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/ingester2/Cargo.toml b/ingester2/Cargo.toml index 179d51aba3..c29ba5794d 100644 --- a/ingester2/Cargo.toml +++ b/ingester2/Cargo.toml @@ -44,7 +44,7 @@ test_helpers = { path = "../test_helpers", features = ["future_timeout"], option thiserror = "1.0.40" tokio = { version = "1.27", features = ["macros", "parking_lot", "rt-multi-thread", "sync", "time"] } tokio-util = "0.7.7" -tonic = "0.8.3" +tonic = { workspace = true } trace = { version = "0.1.0", path = "../trace" } uuid = "1.3.1" wal = { version = "0.1.0", path = "../wal" } diff --git a/ingester2_test_ctx/Cargo.toml b/ingester2_test_ctx/Cargo.toml index eb203af816..fe678353e4 100644 --- a/ingester2_test_ctx/Cargo.toml +++ b/ingester2_test_ctx/Cargo.toml @@ -30,6 +30,6 @@ tempfile = { version = "3.5.0" } test_helpers = { path = "../test_helpers", features = ["future_timeout"] } tokio = { version = "1.27", features = ["macros", "parking_lot", "rt-multi-thread", "sync", "time"] } tokio-util = "0.7.7" -tonic = "0.8.3" +tonic = { workspace = true } wal = { version = "0.1.0", path = "../wal" } workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/ioxd_common/Cargo.toml b/ioxd_common/Cargo.toml index b10c89a2d1..4a0e088a55 100644 --- a/ioxd_common/Cargo.toml +++ b/ioxd_common/Cargo.toml @@ -44,9 +44,9 @@ snafu = "0.7" tokio = { version = "1.27", features = ["macros", "net", "parking_lot", "rt-multi-thread", "signal", "sync", "time"] } tokio-stream = { version = "0.1", features = ["net"] } tokio-util = { version = "0.7.7" } -tonic = "0.8" -tonic-health = "0.8.0" -tonic-reflection = "0.6.0" +tonic = { workspace = true } +tonic-health = { workspace = true } +tonic-reflection = { workspace = true } tower = "0.4" tower-http = { version = "0.4", features = ["catch-panic"] } workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/ioxd_querier/Cargo.toml b/ioxd_querier/Cargo.toml index 4aafd65a03..885fae88a9 100644 --- a/ioxd_querier/Cargo.toml +++ b/ioxd_querier/Cargo.toml @@ -29,7 +29,7 @@ async-trait = "0.1" hyper = "0.14" thiserror = "1.0.40" tokio = { version = "1.27", features = ["macros", "net", "parking_lot", "rt-multi-thread", "signal", "sync", "time"] } -tonic = "0.8" +tonic = { workspace = true } workspace-hack = { version = "0.1", path = "../workspace-hack" } tokio-util = "0.7.7" diff --git a/router/Cargo.toml b/router/Cargo.toml index 14437c8ec5..b4dc122131 100644 --- a/router/Cargo.toml +++ b/router/Cargo.toml @@ -43,7 +43,7 @@ smallvec = "1.10.0" snafu = "0.7" thiserror = "1.0" tokio = { version = "1", features = ["rt-multi-thread", "macros", "time"] } -tonic = "0.8" +tonic = { workspace = true } trace = { path = "../trace/" } workspace-hack = { version = "0.1", path = "../workspace-hack" } write_summary = { path = "../write_summary" } diff --git a/service_common/Cargo.toml b/service_common/Cargo.toml index d67e6feff2..c77d565a82 100644 --- a/service_common/Cargo.toml +++ b/service_common/Cargo.toml @@ -16,7 +16,7 @@ flightsql = { path = "../flightsql" } metric = { path = "../metric" } parking_lot = "0.12" predicate = { path = "../predicate" } -tonic = "0.8" +tonic = { workspace = true } trace = { path = "../trace" } tracker = { path = "../tracker" } workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/service_grpc_catalog/Cargo.toml b/service_grpc_catalog/Cargo.toml index c48daf2239..7af0a415b5 100644 --- a/service_grpc_catalog/Cargo.toml +++ b/service_grpc_catalog/Cargo.toml @@ -11,7 +11,7 @@ generated_types = { path = "../generated_types" } iox_catalog = { path = "../iox_catalog" } observability_deps = { path = "../observability_deps" } tokio = { version = "1", features = ["rt-multi-thread", "macros"] } -tonic = "0.8" +tonic = { workspace = true } workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] diff --git a/service_grpc_flight/Cargo.toml b/service_grpc_flight/Cargo.toml index e9428cb84c..34c6bf6333 100644 --- a/service_grpc_flight/Cargo.toml +++ b/service_grpc_flight/Cargo.toml @@ -31,7 +31,7 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0.95" snafu = "0.7" tokio = { version = "1.27", features = ["macros", "net", "parking_lot", "rt-multi-thread", "signal", "sync", "time"] } -tonic = "0.8" +tonic = { workspace = true } workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] diff --git a/service_grpc_influxrpc/Cargo.toml b/service_grpc_influxrpc/Cargo.toml index 94bd0efaf1..37c18b2767 100644 --- a/service_grpc_influxrpc/Cargo.toml +++ b/service_grpc_influxrpc/Cargo.toml @@ -33,7 +33,7 @@ serde_json = "1.0.95" snafu = "0.7" tokio = { version = "1.27", features = ["macros", "net", "parking_lot", "rt-multi-thread", "signal", "sync", "time"] } tokio-stream = { version = "0.1", features = ["net"] } -tonic = "0.8" +tonic = { workspace = true } workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] diff --git a/service_grpc_namespace/Cargo.toml b/service_grpc_namespace/Cargo.toml index 583fc19b8c..e7683835b9 100644 --- a/service_grpc_namespace/Cargo.toml +++ b/service_grpc_namespace/Cargo.toml @@ -9,7 +9,7 @@ license.workspace = true data_types = { path = "../data_types" } generated_types = { path = "../generated_types" } observability_deps = { path = "../observability_deps" } -tonic = "0.8" +tonic = { workspace = true } iox_catalog = { path = "../iox_catalog" } workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/service_grpc_object_store/Cargo.toml b/service_grpc_object_store/Cargo.toml index 3f1118b56b..db11c7be1c 100644 --- a/service_grpc_object_store/Cargo.toml +++ b/service_grpc_object_store/Cargo.toml @@ -14,7 +14,7 @@ object_store = "0.5.6" observability_deps = { path = "../observability_deps" } parquet_file = { path = "../parquet_file" } tokio = { version = "1", features = ["rt-multi-thread", "macros"] } -tonic = "0.8" +tonic = { workspace = true } uuid = { version = "1", features = ["v4"] } workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/service_grpc_schema/Cargo.toml b/service_grpc_schema/Cargo.toml index 391f713b28..621a7c4d19 100644 --- a/service_grpc_schema/Cargo.toml +++ b/service_grpc_schema/Cargo.toml @@ -9,7 +9,7 @@ license.workspace = true data_types = { path = "../data_types" } generated_types = { path = "../generated_types" } observability_deps = { path = "../observability_deps" } -tonic = "0.8" +tonic = { workspace = true } iox_catalog = { path = "../iox_catalog" } workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/service_grpc_testing/Cargo.toml b/service_grpc_testing/Cargo.toml index f8a5097b22..3f3ef9279f 100644 --- a/service_grpc_testing/Cargo.toml +++ b/service_grpc_testing/Cargo.toml @@ -8,5 +8,5 @@ license.workspace = true [dependencies] generated_types = { path = "../generated_types" } observability_deps = { path = "../observability_deps" } -tonic = "0.8" +tonic = { workspace = true } workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/test_helpers_end_to_end/Cargo.toml b/test_helpers_end_to_end/Cargo.toml index 0744d9980d..27aad52232 100644 --- a/test_helpers_end_to_end/Cargo.toml +++ b/test_helpers_end_to_end/Cargo.toml @@ -33,5 +33,5 @@ tempfile = "3.5.0" test_helpers = { path = "../test_helpers", features = ["future_timeout"] } tokio = { version = "1.27", features = ["macros", "net", "parking_lot", "rt-multi-thread", "signal", "sync", "time"] } tokio-util = "0.7" -tonic = "0.8" +tonic = { workspace = true } workspace-hack = { version = "0.1", path = "../workspace-hack" }
c7b35659900db7cc5982ce2325b3714de613af38
Fraser Savage
2023-03-28 11:28:53
Compare data inside the partition as persisted
Use tokio::task::yield_now to allow the persist task to be enqueued instead of tokio::time::advance.
null
refactor: Compare data inside the partition as persisted Use tokio::task::yield_now to allow the persist task to be enqueued instead of tokio::time::advance.
diff --git a/ingester2/src/persist/hot_partitions.rs b/ingester2/src/persist/hot_partitions.rs index e8898d04ce..6cffb96faa 100644 --- a/ingester2/src/persist/hot_partitions.rs +++ b/ingester2/src/persist/hot_partitions.rs @@ -163,10 +163,8 @@ mod tests { // Observe the partition after the first write hot_partition_persister.observe(Arc::clone(&p), p.lock()); - // Give the persist call a chance to be enqueued - tokio::time::pause(); - tokio::time::advance(Duration::from_secs(1)).await; - tokio::time::resume(); + // Yield to allow the enqueue task to run + tokio::task::yield_now().await; // Assert no persist calls were made assert_eq!(persist_handle.calls().len(), 0); @@ -178,20 +176,22 @@ mod tests { ); // Write more data to the partition - let mb = lp_to_mutable_batch(r#"potatoes,city=Worcester people=2,crisps="fine" 5"#).1; - p.lock() - .buffer_write(mb, SequenceNumber::new(2)) - .expect("write should succeed"); + let want_query_data = { + let mb = lp_to_mutable_batch(r#"potatoes,city=Worcester people=2,crisps="fine" 5"#).1; + let mut guard = p.lock(); + guard + .buffer_write(mb, SequenceNumber::new(2)) + .expect("write should succeed"); + guard.get_query_data().expect("should have query adaptor") + }; hot_partition_persister.observe(Arc::clone(&p), p.lock()); - tokio::time::pause(); - tokio::time::advance(Duration::from_secs(1)).await; - tokio::time::resume(); - - // Assert the partition was queued for persistence + tokio::task::yield_now().await; + // Assert the partition was queued for persistence with the correct data. assert_matches!(persist_handle.calls().as_slice(), [got] => { - assert!(Arc::ptr_eq(got, &p)); + let got_query_data = got.lock().get_query_data().expect("should have query adaptor"); + assert_eq!(got_query_data.record_batches(), want_query_data.record_batches()); }); metric::assert_counter!( @@ -201,7 +201,7 @@ mod tests { value = 1, ); - // Wait for the persist call to complete, then check persist completion. + // Check persist completion. drop(hot_partition_persister); Arc::try_unwrap(persist_handle) .expect("should be no more refs")
efc817c2a87a0a76e6c8f3c1f848f4c7992ecc65
Carol (Nichols || Goulding)
2023-05-24 10:09:41
Remove From impl, leaving TablePartitionTemplateOverride::new as only creation mechanism
This makes it clearer that you do or do not have a custom table override (in the first argument to `new`).
null
fix: Remove From impl, leaving TablePartitionTemplateOverride::new as only creation mechanism This makes it clearer that you do or do not have a custom table override (in the first argument to `new`).
diff --git a/data_types/src/partition_template.rs b/data_types/src/partition_template.rs index 0128f4f5f1..02cd2b40eb 100644 --- a/data_types/src/partition_template.rs +++ b/data_types/src/partition_template.rs @@ -33,19 +33,6 @@ impl From<proto::PartitionTemplate> for NamespacePartitionTemplateOverride { } } -/// When a table is being created implicitly by a write, there is no possibility of a user-supplied -/// partition template, so the table will get the namespace's partition template. -impl From<&NamespacePartitionTemplateOverride> for TablePartitionTemplateOverride { - fn from(namespace_template: &NamespacePartitionTemplateOverride) -> Self { - Self( - namespace_template - .0 - .as_ref() - .map(|sw| SerializationWrapper(Arc::clone(&sw.0))), - ) - } -} - /// A partition template specified by a table record. #[derive(Debug, PartialEq, Clone, Default, sqlx::Type)] #[sqlx(transparent)] @@ -60,12 +47,13 @@ impl TablePartitionTemplateOverride { custom_table_template: Option<proto::PartitionTemplate>, namespace_template: &NamespacePartitionTemplateOverride, ) -> Self { - custom_table_template - .map(Arc::new) - .map(SerializationWrapper) - .map(Some) - .map(Self) - .unwrap_or_else(|| namespace_template.into()) + match (custom_table_template, namespace_template.0.as_ref()) { + (Some(table_proto), _) => Self(Some(SerializationWrapper(Arc::new(table_proto)))), + (None, Some(namespace_serialization_wrapper)) => Self(Some(SerializationWrapper( + Arc::clone(&namespace_serialization_wrapper.0), + ))), + (None, None) => Self(None), + } } /// Iterate through the protobuf parts and lend out what the `mutable_batch` crate needs to @@ -253,7 +241,7 @@ mod tests { let namespace_json_str: String = buf.iter().map(extract_sqlite_argument_text).collect(); assert_eq!(namespace_json_str, expected_json_str); - let table = TablePartitionTemplateOverride::from(&namespace); + let table = TablePartitionTemplateOverride::new(None, &namespace); let mut buf = Default::default(); let _ = <TablePartitionTemplateOverride as Encode<'_, sqlx::Sqlite>>::encode_by_ref( &table, &mut buf, diff --git a/ingester_test_ctx/src/lib.rs b/ingester_test_ctx/src/lib.rs index 6fb3f1b001..50551d3ed4 100644 --- a/ingester_test_ctx/src/lib.rs +++ b/ingester_test_ctx/src/lib.rs @@ -251,7 +251,8 @@ where .namespaces .get_mut(&namespace_id) .expect("namespace does not exist"); - let partition_template = TablePartitionTemplateOverride::from(&schema.partition_template); + let partition_template = + TablePartitionTemplateOverride::new(None, &schema.partition_template); let batches = lines_to_batches(lp, 0).unwrap(); diff --git a/iox_catalog/src/interface.rs b/iox_catalog/src/interface.rs index 68f05eb3c9..91dcd8d3c8 100644 --- a/iox_catalog/src/interface.rs +++ b/iox_catalog/src/interface.rs @@ -1095,7 +1095,7 @@ pub(crate) mod test_helpers { .tables() .create( "test_table", - TablePartitionTemplateOverride::from(&namespace.partition_template), + TablePartitionTemplateOverride::new(None, &namespace.partition_template), namespace.id, ) .await; @@ -1190,7 +1190,7 @@ pub(crate) mod test_helpers { .tables() .create( "definitely_unique", - TablePartitionTemplateOverride::from(&latest.partition_template), + TablePartitionTemplateOverride::new(None, &latest.partition_template), latest.id, ) .await @@ -1261,7 +1261,7 @@ pub(crate) mod test_helpers { .unwrap(); assert_eq!( table_templated_by_namespace.partition_template, - TablePartitionTemplateOverride::from(&custom_namespace_template) + TablePartitionTemplateOverride::new(None, &custom_namespace_template) ); repos diff --git a/iox_catalog/src/lib.rs b/iox_catalog/src/lib.rs index 006c285625..d126ebbec4 100644 --- a/iox_catalog/src/lib.rs +++ b/iox_catalog/src/lib.rs @@ -223,7 +223,10 @@ where .tables() .create( table_name, - TablePartitionTemplateOverride::from(namespace_partition_template), + // This table is being created implicitly by this write, so there's no + // possibility of a user-supplied partition template here, which is why there's + // a hardcoded `None`. + TablePartitionTemplateOverride::new(None, namespace_partition_template), namespace_id, ) .await; @@ -301,7 +304,7 @@ pub mod test_helpers { .tables() .create( name, - TablePartitionTemplateOverride::from(&namespace.partition_template), + TablePartitionTemplateOverride::new(None, &namespace.partition_template), namespace.id, ) .await diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs index 8e8852e9c9..983a795021 100644 --- a/iox_catalog/src/postgres.rs +++ b/iox_catalog/src/postgres.rs @@ -2412,7 +2412,10 @@ RETURNING *; // it should have the namespace's template assert_eq!( table_no_template_with_namespace_template.partition_template, - TablePartitionTemplateOverride::from(&namespace_custom_template.partition_template) + TablePartitionTemplateOverride::new( + None, + &namespace_custom_template.partition_template + ) ); // and store that value in the database record. @@ -2427,7 +2430,10 @@ RETURNING *; record.try_get("partition_template").unwrap(); assert_eq!( partition_template.unwrap(), - TablePartitionTemplateOverride::from(&namespace_custom_template.partition_template) + TablePartitionTemplateOverride::new( + None, + &namespace_custom_template.partition_template + ) ); // # Table template true, namespace template false diff --git a/iox_catalog/src/sqlite.rs b/iox_catalog/src/sqlite.rs index df680fea4e..aeb96f1500 100644 --- a/iox_catalog/src/sqlite.rs +++ b/iox_catalog/src/sqlite.rs @@ -2053,7 +2053,10 @@ RETURNING *; // it should have the namespace's template assert_eq!( table_no_template_with_namespace_template.partition_template, - TablePartitionTemplateOverride::from(&namespace_custom_template.partition_template) + TablePartitionTemplateOverride::new( + None, + &namespace_custom_template.partition_template + ) ); // and store that value in the database record. @@ -2068,7 +2071,10 @@ RETURNING *; record.try_get("partition_template").unwrap(); assert_eq!( partition_template.unwrap(), - TablePartitionTemplateOverride::from(&namespace_custom_template.partition_template) + TablePartitionTemplateOverride::new( + None, + &namespace_custom_template.partition_template + ) ); // # Table template true, namespace template false
8dc18a98384e4d7c61117863c582843e1005505c
Dom Dwyer
2023-01-13 14:05:30
remove double-ref Partition map
The ingester no longer needs to access a specific PartitionData by ID (they are addressed either via an iterator over the BufferTree, or shared by Arc reference). This allows us to remove the extra map maintaining ID -> PartitionData references, and the shared access lock protecting it.
null
perf: remove double-ref Partition map The ingester no longer needs to access a specific PartitionData by ID (they are addressed either via an iterator over the BufferTree, or shared by Arc reference). This allows us to remove the extra map maintaining ID -> PartitionData references, and the shared access lock protecting it.
diff --git a/ingester2/src/buffer_tree/namespace.rs b/ingester2/src/buffer_tree/namespace.rs index 2fe285b1ae..b886c6fb31 100644 --- a/ingester2/src/buffer_tree/namespace.rs +++ b/ingester2/src/buffer_tree/namespace.rs @@ -231,7 +231,7 @@ where mod tests { use std::{sync::Arc, time::Duration}; - use data_types::{PartitionId, PartitionKey, ShardId, ShardIndex}; + use data_types::{PartitionId, PartitionKey, ShardId}; use metric::{Attributes, Metric}; use super::*; @@ -246,7 +246,6 @@ mod tests { test_util::make_write_op, }; - const SHARD_INDEX: ShardIndex = ShardIndex::new(24); const TABLE_NAME: &str = "bananas"; const TABLE_ID: TableId = TableId::new(44); const NAMESPACE_NAME: &str = "platanos"; diff --git a/ingester2/src/buffer_tree/table.rs b/ingester2/src/buffer_tree/table.rs index 0ed2db7fae..2560dc3c27 100644 --- a/ingester2/src/buffer_tree/table.rs +++ b/ingester2/src/buffer_tree/table.rs @@ -5,10 +5,10 @@ pub(crate) mod name_resolver; use std::{fmt::Debug, sync::Arc}; use async_trait::async_trait; -use data_types::{NamespaceId, PartitionId, PartitionKey, SequenceNumber, ShardId, TableId}; +use data_types::{NamespaceId, PartitionKey, SequenceNumber, ShardId, TableId}; use datafusion_util::MemoryStream; use mutable_batch::MutableBatch; -use parking_lot::{Mutex, RwLock}; +use parking_lot::Mutex; use schema::Projection; use trace::span::{Span, SpanRecorder}; @@ -25,38 +25,6 @@ use crate::{ }, }; -/// A double-referenced map where [`PartitionData`] can be looked up by -/// [`PartitionKey`], or ID. -#[derive(Debug, Default)] -struct DoubleRef { - by_key: ArcMap<PartitionKey, Mutex<PartitionData>>, - by_id: ArcMap<PartitionId, Mutex<PartitionData>>, -} - -impl DoubleRef { - /// Try to insert the provided [`PartitionData`]. - /// - /// Note that the partition MAY have been inserted concurrently, and the - /// returned [`PartitionData`] MAY be a different instance for the same - /// underlying partition. - fn try_insert(&mut self, ns: PartitionData) -> Arc<Mutex<PartitionData>> { - let id = ns.partition_id(); - let key = ns.partition_key().clone(); - - let ns = Arc::new(Mutex::new(ns)); - self.by_key.get_or_insert_with(&key, || Arc::clone(&ns)); - self.by_id.get_or_insert_with(&id, || ns) - } - - fn by_key(&self, key: &PartitionKey) -> Option<Arc<Mutex<PartitionData>>> { - self.by_key.get(key) - } - - fn by_id(&self, id: PartitionId) -> Option<Arc<Mutex<PartitionData>>> { - self.by_id.get(&id) - } -} - /// The string name / identifier of a Table. /// /// A reference-counted, cheap clone-able string. @@ -113,7 +81,7 @@ pub(crate) struct TableData<O> { partition_provider: Arc<dyn PartitionProvider>, // Map of partition key to its data - partition_data: RwLock<DoubleRef>, + partition_data: ArcMap<PartitionKey, Mutex<PartitionData>>, post_write_observer: Arc<O>, transition_shard_id: ShardId, @@ -165,20 +133,7 @@ impl<O> TableData<O> { /// invoked, but the data within them may change as they continue to buffer /// DML operations. pub(crate) fn partitions(&self) -> Vec<Arc<Mutex<PartitionData>>> { - self.partition_data.read().by_key.values() - } - - /// Return the [`PartitionData`] for the specified ID. - pub(crate) fn partition(&self, partition_id: PartitionId) -> Option<Arc<Mutex<PartitionData>>> { - self.partition_data.read().by_id(partition_id) - } - - /// Return the [`PartitionData`] for the specified partition key. - pub(crate) fn get_partition_by_key( - &self, - partition_key: &PartitionKey, - ) -> Option<Arc<Mutex<PartitionData>>> { - self.partition_data.read().by_key(partition_key) + self.partition_data.values() } /// Returns the table ID for this partition. @@ -209,7 +164,7 @@ where batch: MutableBatch, partition_key: PartitionKey, ) -> Result<(), mutable_batch::Error> { - let p = self.partition_data.read().by_key(&partition_key); + let p = self.partition_data.get(&partition_key); let partition_data = match p { Some(p) => p, None => { @@ -224,11 +179,12 @@ where self.transition_shard_id, ) .await; - // Add the double-referenced partition to the map. + // Add the partition to the map. // // This MAY return a different instance than `p` if another // thread has already initialised the partition. - self.partition_data.write().try_insert(p) + self.partition_data + .get_or_insert_with(&partition_key, || Arc::new(Mutex::new(p))) } }; @@ -328,7 +284,7 @@ mod tests { const TRANSITION_SHARD_ID: ShardId = ShardId::new(84); #[tokio::test] - async fn test_partition_double_ref() { + async fn test_partition_init() { // Configure the mock partition provider to return a partition for this // table ID. let partition_provider = Arc::new(MockPartitionProvider::default().with_partition( @@ -368,12 +324,7 @@ mod tests { .unwrap(); // Assert the table does not contain the test partition - assert!(table - .partition_data - .read() - .by_key(&PARTITION_KEY.into()) - .is_none()); - assert!(table.partition_data.read().by_id(PARTITION_ID).is_none()); + assert!(table.partition_data.get(&PARTITION_KEY.into()).is_none()); // Write some test data table @@ -382,11 +333,6 @@ mod tests { .expect("buffer op should succeed"); // Referencing the partition should succeed - assert!(table - .partition_data - .read() - .by_key(&PARTITION_KEY.into()) - .is_some()); - assert!(table.partition_data.read().by_id(PARTITION_ID).is_some()); + assert!(table.partition_data.get(&PARTITION_KEY.into()).is_some()); } }
94c2f94ea150d4ecd82e43d97ca15d2efa284f8a
Andrew Lamb
2022-12-19 06:35:20
Extract common ArrowFlight client into iox_arrow_flight (#6427)
* refactor: Extract common ArrowFlight client into iox_arrow_flight * chore: Run cargo hakari tasks * fix: clarify intent of iox_arrow_flight crate * refactor: Apply suggestions from code review Co-authored-by: Raphael Taylor-Davies <[email protected]> * fix: loop --> while let * fix: REmove make_tonic_error in favor of From impl
Co-authored-by: CircleCI[bot] <[email protected]> Co-authored-by: Raphael Taylor-Davies <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
refactor: Extract common ArrowFlight client into iox_arrow_flight (#6427) * refactor: Extract common ArrowFlight client into iox_arrow_flight * chore: Run cargo hakari tasks * fix: clarify intent of iox_arrow_flight crate * refactor: Apply suggestions from code review Co-authored-by: Raphael Taylor-Davies <[email protected]> * fix: loop --> while let * fix: REmove make_tonic_error in favor of From impl Co-authored-by: CircleCI[bot] <[email protected]> Co-authored-by: Raphael Taylor-Davies <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/Cargo.lock b/Cargo.lock index f1ef6627a0..028e5895fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -214,6 +214,7 @@ dependencies = [ "prost 0.11.3", "prost-build 0.11.3", "prost-derive 0.11.2", + "prost-types 0.11.2", "tokio", "tonic", "tonic-build", @@ -2344,6 +2345,7 @@ dependencies = [ "influxdb_iox_client", "influxdb_storage_client", "influxrpc_parser", + "iox_arrow_flight", "iox_catalog", "iox_query", "iox_time", @@ -2395,13 +2397,13 @@ name = "influxdb_iox_client" version = "0.1.0" dependencies = [ "arrow", - "arrow-flight", "arrow_util", "bytes", "client_util", "futures-util", "generated_types", "influxdb_line_protocol", + "iox_arrow_flight", "prost 0.11.3", "rand", "reqwest", @@ -2409,8 +2411,6 @@ dependencies = [ "tokio", "tokio-stream", "tonic", - "trace", - "trace_exporters", "trace_http", ] @@ -2615,6 +2615,20 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "iox_arrow_flight" +version = "0.1.0" +dependencies = [ + "arrow", + "arrow-flight", + "futures", + "futures-util", + "prost 0.11.3", + "prost-types 0.11.2", + "tonic", + "workspace-hack", +] + [[package]] name = "iox_catalog" version = "0.1.0" @@ -4252,6 +4266,7 @@ dependencies = [ "futures", "generated_types", "influxdb_iox_client", + "iox_arrow_flight", "iox_catalog", "iox_query", "iox_tests", @@ -4280,6 +4295,8 @@ dependencies = [ "tokio-util", "tonic", "trace", + "trace_exporters", + "trace_http", "tracker", "uuid", "workspace-hack", @@ -4321,6 +4338,7 @@ dependencies = [ "hashbrown 0.13.1", "influxdb_iox_client", "ingester", + "iox_arrow_flight", "iox_catalog", "iox_query", "iox_tests", @@ -6365,6 +6383,7 @@ version = "0.1.0" dependencies = [ "ahash 0.8.2", "arrow", + "arrow-flight", "arrow-ord", "arrow-string", "base64 0.13.1", diff --git a/Cargo.toml b/Cargo.toml index 9f9bae10c5..5f39a98196 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,7 @@ members = [ "influxrpc_parser", "ingester", "ingester2", + "iox_arrow_flight", "iox_catalog", "iox_data_generator", "garbage_collector", diff --git a/influxdb_iox/Cargo.toml b/influxdb_iox/Cargo.toml index d06e9f24b5..167506cba8 100644 --- a/influxdb_iox/Cargo.toml +++ b/influxdb_iox/Cargo.toml @@ -18,6 +18,7 @@ influxdb_iox_client = { path = "../influxdb_iox_client", features = ["flight", " influxdb_storage_client = { path = "../influxdb_storage_client" } influxrpc_parser = { path = "../influxrpc_parser"} iox_catalog = { path = "../iox_catalog" } +iox_arrow_flight = { path = "../iox_arrow_flight" } ioxd_common = { path = "../ioxd_common"} ioxd_compactor = { path = "../ioxd_compactor"} ioxd_ingester = { path = "../ioxd_ingester"} diff --git a/influxdb_iox/src/commands/query.rs b/influxdb_iox/src/commands/query.rs index 6a4a437b3f..52e4066126 100644 --- a/influxdb_iox/src/commands/query.rs +++ b/influxdb_iox/src/commands/query.rs @@ -59,10 +59,7 @@ pub async fn command(connection: Connection, config: Config) -> Result<()> { // It might be nice to do some sort of streaming write // rather than buffering the whole thing. - let mut batches = vec![]; - while let Some(data) = query_results.next().await? { - batches.push(data); - } + let batches = query_results.collect().await?; let formatted_result = format.format(&batches)?; diff --git a/influxdb_iox/src/commands/query_ingester.rs b/influxdb_iox/src/commands/query_ingester.rs index 3c9edf7ebc..860fe12415 100644 --- a/influxdb_iox/src/commands/query_ingester.rs +++ b/influxdb_iox/src/commands/query_ingester.rs @@ -3,9 +3,10 @@ use generated_types::ingester::{ }; use influxdb_iox_client::{ connection::Connection, - flight::{self, low_level::LowLevelMessage}, + flight::{self}, format::QueryOutputFormat, }; +use iox_arrow_flight::prost::Message; use std::str::FromStr; use thiserror::Error; @@ -15,7 +16,7 @@ pub enum Error { Formatting(#[from] influxdb_iox_client::format::Error), #[error("Error querying: {0}")] - Query(#[from] influxdb_iox_client::flight::Error), + Query(#[from] iox_arrow_flight::FlightError), #[error("Error decoding base64-encoded predicate from argument: {0}")] PredicateFromBase64(#[from] DecodeProtoPredicateFromBase64Error), @@ -53,7 +54,7 @@ pub struct Config { } pub async fn command(connection: Connection, config: Config) -> Result<()> { - let mut client = flight::low_level::Client::new(connection, None); + let client = influxdb_iox_client::flight::Client::new(connection); let Config { namespace_id, format, @@ -76,17 +77,13 @@ pub async fn command(connection: Connection, config: Config) -> Result<()> { predicate, namespace_id, }; - - let mut query_results = client.perform_query(request).await?; + // send the message directly encoded as bytes to the ingester. + let request = request.encode_to_vec(); + let mut query_results = client.into_inner().do_get(request).await?; // It might be nice to do some sort of streaming write // rather than buffering the whole thing. - let mut batches = vec![]; - while let Some((msg, _md)) = query_results.next().await? { - if let LowLevelMessage::RecordBatch(batch) = msg { - batches.push(batch); - } - } + let batches = query_results.collect().await?; let formatted_result = format.format(&batches)?; diff --git a/influxdb_iox/tests/end_to_end_cases/cli.rs b/influxdb_iox/tests/end_to_end_cases/cli.rs index 0693b60797..ce857c13f5 100644 --- a/influxdb_iox/tests/end_to_end_cases/cli.rs +++ b/influxdb_iox/tests/end_to_end_cases/cli.rs @@ -476,9 +476,9 @@ async fn query_error_handling() { .arg("drop table this_table_doesnt_exist") .assert() .failure() - .stderr(predicate::eq( - "Error querying: Error while planning query: This feature is not \ - implemented: DropTable\n", + .stderr(predicate::str::contains( + "Error while planning query: This feature is not \ + implemented: DropTable", )); } .boxed() @@ -517,8 +517,8 @@ async fn influxql_error_handling() { .arg("CREATE DATABASE foo") .assert() .failure() - .stderr(predicate::eq( - "Error querying: Error while planning query: This feature is not implemented: CREATE DATABASE\n", + .stderr(predicate::str::contains( + "Error while planning query: This feature is not implemented: CREATE DATABASE", )); } .boxed() diff --git a/influxdb_iox/tests/end_to_end_cases/ingester.rs b/influxdb_iox/tests/end_to_end_cases/ingester.rs index fd307927a3..5e05311f52 100644 --- a/influxdb_iox/tests/end_to_end_cases/ingester.rs +++ b/influxdb_iox/tests/end_to_end_cases/ingester.rs @@ -1,10 +1,10 @@ +use arrow::{datatypes::SchemaRef, record_batch::RecordBatch}; use arrow_util::assert_batches_sorted_eq; use data_types::{NamespaceId, TableId}; -use generated_types::{ - influxdata::iox::ingester::v1::PartitionStatus, ingester::IngesterQueryRequest, -}; +use generated_types::{influxdata::iox::ingester::v1 as proto, ingester::IngesterQueryRequest}; use http::StatusCode; use influxdb_iox_client::flight::generated_types::IngesterQueryResponseMetadata; +use iox_arrow_flight::{prost::Message, DecodedFlightData, DecodedPayload, FlightDataStream}; use test_helpers_end_to_end::{ get_write_token, maybe_skip_integration, wait_for_readable, MiniCluster, }; @@ -28,9 +28,8 @@ async fn ingester_flight_api() { let write_token = get_write_token(&response); wait_for_readable(write_token, cluster.ingester().ingester_grpc_connection()).await; - let mut querier_flight = influxdb_iox_client::flight::low_level::Client::< - influxdb_iox_client::flight::generated_types::IngesterQueryRequest, - >::new(cluster.ingester().ingester_grpc_connection(), None); + let querier_flight = + influxdb_iox_client::flight::Client::new(cluster.ingester().ingester_grpc_connection()); let query = IngesterQueryRequest::new( cluster.namespace_id().await, @@ -39,19 +38,24 @@ async fn ingester_flight_api() { Some(::predicate::EMPTY_PREDICATE), ); + let query: proto::IngesterQueryRequest = query.try_into().unwrap(); + let mut performed_query = querier_flight - .perform_query(query.try_into().unwrap()) + .into_inner() + .do_get(query.encode_to_vec()) .await - .unwrap(); + .unwrap() + .into_inner(); + + let (msg, app_metadata) = next_message(&mut performed_query).await.unwrap(); + assert!(matches!(msg, DecodedPayload::None), "{:?}", msg); - let (msg, app_metadata) = performed_query.next().await.unwrap().unwrap(); - msg.unwrap_none(); let partition_id = app_metadata.partition_id; assert_eq!( app_metadata, IngesterQueryResponseMetadata { partition_id, - status: Some(PartitionStatus { + status: Some(proto::PartitionStatus { parquet_max_sequence_number: None, }), ingester_uuid: String::new(), @@ -59,12 +63,12 @@ async fn ingester_flight_api() { }, ); - let (msg, _) = performed_query.next().await.unwrap().unwrap(); - let schema = msg.unwrap_schema(); + let (msg, _) = next_message(&mut performed_query).await.unwrap(); + let schema = unwrap_schema(msg); let mut query_results = vec![]; - while let Some((msg, _md)) = performed_query.next().await.unwrap() { - let batch = msg.unwrap_record_batch(); + while let Some((msg, _md)) = next_message(&mut performed_query).await { + let batch = unwrap_record_batch(msg); query_results.push(batch); } @@ -104,9 +108,9 @@ async fn ingester2_flight_api() { let response = cluster.write_to_router(lp).await; assert_eq!(response.status(), StatusCode::NO_CONTENT); - let mut querier_flight = influxdb_iox_client::flight::low_level::Client::< - influxdb_iox_client::flight::generated_types::IngesterQueryRequest, - >::new(cluster.ingester().ingester_grpc_connection(), None); + let mut querier_flight = + influxdb_iox_client::flight::Client::new(cluster.ingester().ingester_grpc_connection()) + .into_inner(); let query = IngesterQueryRequest::new( cluster.namespace_id().await, @@ -115,23 +119,27 @@ async fn ingester2_flight_api() { Some(::predicate::EMPTY_PREDICATE), ); + let query: proto::IngesterQueryRequest = query.try_into().unwrap(); + let query = query.encode_to_vec(); + let mut performed_query = querier_flight - .perform_query(query.clone().try_into().unwrap()) + .do_get(query.clone()) .await - .unwrap(); + .unwrap() + .into_inner(); - let (msg, app_metadata) = performed_query.next().await.unwrap().unwrap(); - msg.unwrap_none(); + let (msg, app_metadata) = next_message(&mut performed_query).await.unwrap(); + assert!(matches!(msg, DecodedPayload::None), "{:?}", msg); let ingester_uuid = app_metadata.ingester_uuid.clone(); assert!(!ingester_uuid.is_empty()); - let (msg, _) = performed_query.next().await.unwrap().unwrap(); - let schema = msg.unwrap_schema(); + let (msg, _) = next_message(&mut performed_query).await.unwrap(); + let schema = unwrap_schema(msg); let mut query_results = vec![]; - while let Some((msg, _md)) = performed_query.next().await.unwrap() { - let batch = msg.unwrap_record_batch(); + while let Some((msg, _md)) = next_message(&mut performed_query).await { + let batch = unwrap_record_batch(msg); query_results.push(batch); } @@ -157,21 +165,20 @@ async fn ingester2_flight_api() { // Ensure the ingester UUID is the same in the next query let mut performed_query = querier_flight - .perform_query(query.clone().try_into().unwrap()) + .do_get(query.clone()) .await - .unwrap(); - let (msg, app_metadata) = performed_query.next().await.unwrap().unwrap(); - msg.unwrap_none(); + .unwrap() + .into_inner(); + + let (msg, app_metadata) = next_message(&mut performed_query).await.unwrap(); + assert!(matches!(msg, DecodedPayload::None), "{:?}", msg); assert_eq!(app_metadata.ingester_uuid, ingester_uuid); // Restart the ingester and ensure it gets a new UUID cluster.restart_ingester().await; - let mut performed_query = querier_flight - .perform_query(query.try_into().unwrap()) - .await - .unwrap(); - let (msg, app_metadata) = performed_query.next().await.unwrap().unwrap(); - msg.unwrap_none(); + let mut performed_query = querier_flight.do_get(query).await.unwrap().into_inner(); + let (msg, app_metadata) = next_message(&mut performed_query).await.unwrap(); + assert!(matches!(msg, DecodedPayload::None), "{:?}", msg); assert_ne!(app_metadata.ingester_uuid, ingester_uuid); } @@ -183,9 +190,9 @@ async fn ingester_flight_api_namespace_not_found() { // Set up cluster let cluster = MiniCluster::create_shared(database_url).await; - let mut querier_flight = influxdb_iox_client::flight::low_level::Client::< - influxdb_iox_client::flight::generated_types::IngesterQueryRequest, - >::new(cluster.ingester().ingester_grpc_connection(), None); + let mut querier_flight = + influxdb_iox_client::flight::Client::new(cluster.ingester().ingester_grpc_connection()) + .into_inner(); let query = IngesterQueryRequest::new( NamespaceId::new(i64::MAX), @@ -193,12 +200,13 @@ async fn ingester_flight_api_namespace_not_found() { vec![], Some(::predicate::EMPTY_PREDICATE), ); + let query: proto::IngesterQueryRequest = query.try_into().unwrap(); let err = querier_flight - .perform_query(query.try_into().unwrap()) + .do_get(query.encode_to_vec()) .await .unwrap_err(); - if let influxdb_iox_client::flight::Error::GrpcError(status) = err { + if let iox_arrow_flight::FlightError::Tonic(status) = err { assert_eq!(status.code(), tonic::Code::NotFound); } else { panic!("Wrong error variant: {err}") @@ -222,9 +230,9 @@ async fn ingester_flight_api_table_not_found() { let write_token = get_write_token(&response); wait_for_readable(write_token, cluster.ingester().ingester_grpc_connection()).await; - let mut querier_flight = influxdb_iox_client::flight::low_level::Client::< - influxdb_iox_client::flight::generated_types::IngesterQueryRequest, - >::new(cluster.ingester().ingester_grpc_connection(), None); + let mut querier_flight = + influxdb_iox_client::flight::Client::new(cluster.ingester().ingester_grpc_connection()) + .into_inner(); let query = IngesterQueryRequest::new( cluster.namespace_id().await, @@ -232,14 +240,41 @@ async fn ingester_flight_api_table_not_found() { vec![], Some(::predicate::EMPTY_PREDICATE), ); + let query: proto::IngesterQueryRequest = query.try_into().unwrap(); let err = querier_flight - .perform_query(query.try_into().unwrap()) + .do_get(query.encode_to_vec()) .await .unwrap_err(); - if let influxdb_iox_client::flight::Error::GrpcError(status) = err { + if let iox_arrow_flight::FlightError::Tonic(status) = err { assert_eq!(status.code(), tonic::Code::NotFound); } else { panic!("Wrong error variant: {err}") } } + +async fn next_message( + performed_query: &mut FlightDataStream, +) -> Option<(DecodedPayload, proto::IngesterQueryResponseMetadata)> { + let DecodedFlightData { inner, payload } = performed_query.next().await.unwrap()?; + + // extract the metadata from the underlying FlightData structure + let app_metadata = &inner.app_metadata[..]; + let app_metadata: proto::IngesterQueryResponseMetadata = Message::decode(app_metadata).unwrap(); + + Some((payload, app_metadata)) +} + +fn unwrap_schema(msg: DecodedPayload) -> SchemaRef { + match msg { + DecodedPayload::Schema(s) => s, + _ => panic!("Unexpected message type: {:?}", msg), + } +} + +fn unwrap_record_batch(msg: DecodedPayload) -> RecordBatch { + match msg { + DecodedPayload::RecordBatch(b) => b, + _ => panic!("Unexpected message type: {:?}", msg), + } +} diff --git a/influxdb_iox/tests/end_to_end_cases/querier.rs b/influxdb_iox/tests/end_to_end_cases/querier.rs index 5a2940187f..493ea149a2 100644 --- a/influxdb_iox/tests/end_to_end_cases/querier.rs +++ b/influxdb_iox/tests/end_to_end_cases/querier.rs @@ -663,7 +663,7 @@ async fn table_or_namespace_not_found() { .read_filter(read_filter_request) .await .unwrap_err(); - check_tonic_status(status, tonic::Code::NotFound, None); + check_tonic_status(&status, tonic::Code::NotFound, None); } .boxed() })), @@ -740,7 +740,7 @@ async fn oom_protection() { .read_group(read_group_request) .await .unwrap_err(); - check_tonic_status(status, tonic::Code::ResourceExhausted, None); + check_tonic_status(&status, tonic::Code::ResourceExhausted, None); } .boxed() })), diff --git a/influxdb_iox_client/Cargo.toml b/influxdb_iox_client/Cargo.toml index 9abce3c010..be1bd7b138 100644 --- a/influxdb_iox_client/Cargo.toml +++ b/influxdb_iox_client/Cargo.toml @@ -7,17 +7,17 @@ license.workspace = true [features] default = ["flight", "format"] -flight = ["arrow", "arrow-flight", "arrow_util", "futures-util"] +flight = ["arrow", "iox_arrow_flight", "arrow_util", "futures-util"] format = ["arrow", "arrow_util"] [dependencies] arrow = { workspace = true, optional = true } -arrow-flight = { workspace = true, optional = true } arrow_util = { path = "../arrow_util", optional = true } bytes = "1.3" client_util = { path = "../client_util" } futures-util = { version = "0.3", optional = true } influxdb_line_protocol = { path = "../influxdb_line_protocol"} +iox_arrow_flight = { path = "../iox_arrow_flight", optional = true } generated_types = { path = "../generated_types", default-features = false, features = ["data_types_conversions"] } prost = "0.11" rand = "0.8.3" @@ -26,6 +26,4 @@ tokio = { version = "1.22", features = ["macros", "parking_lot", "rt-multi-threa tokio-stream = "0.1.11" thiserror = "1.0.38" tonic = { version = "0.8" } -trace = { path = "../trace" } -trace_exporters = { path = "../trace_exporters" } trace_http = { path = "../trace_http" } diff --git a/influxdb_iox_client/src/client/flight/low_level.rs b/influxdb_iox_client/src/client/flight/low_level.rs deleted file mode 100644 index 5a81b2e7cd..0000000000 --- a/influxdb_iox_client/src/client/flight/low_level.rs +++ /dev/null @@ -1,306 +0,0 @@ -//! Low-level flight client. -//! -//! This client allows more inspection of the flight messages which can be helpful to implement -//! more advanced protocols. -//! -//! # Protocol Usage -//! -//! The client handles flight messages as followes: -//! -//! - **None:** App metadata is extracted. Otherwise this message has no effect. This is useful to -//! transmit metadata without any actual payload. -//! - **Schema:** The schema is (re-)set. Dictionaries are cleared. App metadata is extraced and -//! both the schema and the metadata are presented to the user. -//! - **Dictionary Batch:** A new dictionary for a given column is registered. An existing -//! dictionary for the same column will be overwritten. No app metadata is extracted. This -//! message is NOT visible to the user. -//! - **Record Batch:** Record batch is created based on the current schema and dictionaries. This -//! fails if no schema was transmitted yet. App metadata is extracted is is presented -- together -//! with the record batch -- to the user. -//! -//! All other message types (at the time of writing: tensor and sparse tensor) lead to an error. - -use super::Error; -use ::generated_types::influxdata::iox::{ - ingester::v1::{IngesterQueryRequest, IngesterQueryResponseMetadata}, - querier::v1::{AppMetadata, ReadInfo}, -}; -use arrow::{ - array::ArrayRef, - buffer::Buffer, - datatypes::Schema, - ipc::{self, reader}, - record_batch::RecordBatch, -}; -use arrow_flight::{ - flight_service_client::FlightServiceClient, utils::flight_data_to_arrow_batch, FlightData, - HandshakeRequest, Ticket, -}; -use client_util::connection::{Connection, GrpcConnection}; -use futures_util::stream; -use futures_util::stream::StreamExt; -use prost::Message; -use rand::Rng; -use std::{collections::HashMap, convert::TryFrom, marker::PhantomData, str::FromStr, sync::Arc}; -use tonic::{ - codegen::http::header::{HeaderName, HeaderValue}, - Streaming, -}; -use trace::ctx::SpanContext; -use trace_http::ctx::format_jaeger_trace_context; - -/// Metadata that can be send during flight requests. -pub trait ClientMetadata: Message { - /// Response metadata. - type Response: Default + Message; -} - -impl ClientMetadata for ReadInfo { - type Response = AppMetadata; -} - -impl ClientMetadata for IngesterQueryRequest { - type Response = IngesterQueryResponseMetadata; -} - -/// Low-level flight client. -/// -/// # Request and Response Metadata -/// -/// The type parameter `T` -- which must implement [`ClientMetadata`] -- describes the request and -/// response metadata that is sent and received during the flight request. The request is encoded -/// as protobuf and send as the Flight "ticket". The response is received via the so-called "app -/// metadata". -#[derive(Debug)] -pub struct Client<T> -where - T: ClientMetadata, -{ - inner: FlightServiceClient<GrpcConnection>, - _phantom: PhantomData<T>, -} - -impl<T> Client<T> -where - T: ClientMetadata, -{ - /// Creates a new client with the provided connection - #[allow(clippy::mutable_key_type)] // https://github.com/rust-lang/rust-clippy/issues/5812 - pub fn new(connection: Connection, span_context: Option<SpanContext>) -> Self { - let grpc_conn = connection.into_grpc_connection(); - - let grpc_conn = if let Some(ctx) = span_context { - let (service, headers) = grpc_conn.into_parts(); - - let mut headers: HashMap<_, _> = headers.iter().cloned().collect(); - let key = - HeaderName::from_str(trace_exporters::DEFAULT_JAEGER_TRACE_CONTEXT_HEADER_NAME) - .unwrap(); - let value = HeaderValue::from_str(&format_jaeger_trace_context(&ctx)).unwrap(); - headers.insert(key, value); - - GrpcConnection::new(service, headers.into_iter().collect()) - } else { - grpc_conn - }; - - Self { - inner: FlightServiceClient::new(grpc_conn), - _phantom: PhantomData::default(), - } - } - - /// Query the given namespace with the given SQL query, and return a [`PerformQuery`] instance - /// that streams low-level message results. - pub async fn perform_query(&mut self, request: T) -> Result<PerformQuery<T::Response>, Error> { - PerformQuery::<T::Response>::new(self, request).await - } - - /// Perform a handshake with the server, as defined by the Arrow Flight API. - pub async fn handshake(&mut self) -> Result<(), Error> { - let request = HandshakeRequest { - protocol_version: 0, - payload: rand::thread_rng().gen::<[u8; 16]>().to_vec(), - }; - let mut response = self - .inner - .handshake(stream::iter(vec![request.clone()])) - .await? - .into_inner(); - if request.payload.eq(&response - .next() - .await - .ok_or(Error::HandshakeFailed)?? - .payload) - { - Result::Ok(()) - } else { - Result::Err(Error::HandshakeFailed) - } - } -} - -#[derive(Debug)] -struct PerformQueryState { - schema: Arc<Schema>, - dictionaries_by_field: HashMap<i64, ArrayRef>, -} - -/// Low-level message returned by the flight server. -#[derive(Debug)] -pub enum LowLevelMessage { - /// None. - None, - - /// Schema. - Schema(Arc<Schema>), - - /// Record batch. - RecordBatch(RecordBatch), -} - -impl LowLevelMessage { - /// Unwrap none. - pub fn unwrap_none(self) { - match self { - LowLevelMessage::None => (), - LowLevelMessage::Schema(_) => panic!("Contains schema"), - LowLevelMessage::RecordBatch(_) => panic!("Contains record batch"), - } - } - - /// Unwrap schema. - pub fn unwrap_schema(self) -> Arc<Schema> { - match self { - LowLevelMessage::None => panic!("Contains none"), - LowLevelMessage::Schema(schema) => schema, - LowLevelMessage::RecordBatch(_) => panic!("Contains record batch"), - } - } - - /// Unwrap data. - pub fn unwrap_record_batch(self) -> RecordBatch { - match self { - LowLevelMessage::None => panic!("Contains none"), - LowLevelMessage::Schema(_) => panic!("Contains schema"), - LowLevelMessage::RecordBatch(batch) => batch, - } - } -} - -/// A struct that manages sending an Arrow Flight request via `DoGet`, -/// and the stream of Arrow `RecordBatch` that results . -/// -/// Created by calling the `perform_query` method on a Flight [`Client`]. -/// -#[derive(Debug)] -pub struct PerformQuery<T> -where - T: Default + Message, -{ - response: Streaming<FlightData>, - state: Option<PerformQueryState>, - _phantom: PhantomData<T>, -} - -impl<T> PerformQuery<T> -where - T: Default + Message, -{ - pub(crate) async fn new<R>(flight: &mut Client<R>, request: R) -> Result<Self, Error> - where - R: ClientMetadata<Response = T>, - { - let mut bytes = bytes::BytesMut::new(); - prost::Message::encode(&request, &mut bytes)?; - let t = Ticket { - ticket: bytes.to_vec(), - }; - let response = flight.inner.do_get(t).await?.into_inner(); - - Ok(Self { - state: None, - response, - _phantom: Default::default(), - }) - } - - /// Returns next low-level message, or `None` if there are no further results available. - pub async fn next(&mut self) -> Result<Option<(LowLevelMessage, T)>, Error> { - let Self { - state, response, .. - } = self; - - loop { - let data = match response.next().await { - Some(d) => d?, - None => return Ok(None), - }; - - let message = ipc::root_as_message(&data.data_header[..]) - .map_err(|e| Error::InvalidFlatbuffer(e.to_string()))?; - - match message.header_type() { - ipc::MessageHeader::NONE => { - let app_metadata = &data.app_metadata[..]; - let app_metadata = prost::Message::decode(app_metadata)?; - - return Ok(Some((LowLevelMessage::None, app_metadata))); - } - ipc::MessageHeader::Schema => { - let app_metadata = &data.app_metadata[..]; - let app_metadata = prost::Message::decode(app_metadata)?; - - let schema = Arc::new(Schema::try_from(&data)?); - - let dictionaries_by_field = HashMap::new(); - - *state = Some(PerformQueryState { - schema: Arc::clone(&schema), - dictionaries_by_field, - }); - return Ok(Some((LowLevelMessage::Schema(schema), app_metadata))); - } - ipc::MessageHeader::DictionaryBatch => { - let state = if let Some(state) = state.as_mut() { - state - } else { - return Err(Error::NoSchema); - }; - - let buffer: Buffer = data.data_body.into(); - reader::read_dictionary( - &buffer, - message - .header_as_dictionary_batch() - .ok_or(Error::CouldNotGetDictionaryBatch)?, - &state.schema, - &mut state.dictionaries_by_field, - &message.version(), - )?; - } - ipc::MessageHeader::RecordBatch => { - let state = if let Some(state) = state.as_ref() { - state - } else { - return Err(Error::NoSchema); - }; - - let app_metadata = &data.app_metadata[..]; - let app_metadata = prost::Message::decode(app_metadata)?; - - let batch = flight_data_to_arrow_batch( - &data, - Arc::clone(&state.schema), - &state.dictionaries_by_field, - )?; - - return Ok(Some((LowLevelMessage::RecordBatch(batch), app_metadata))); - } - other => { - return Err(Error::UnknownMessageType(other)); - } - } - } - } -} diff --git a/influxdb_iox_client/src/client/flight/mod.rs b/influxdb_iox_client/src/client/flight/mod.rs index 3e3f0f4ce4..4301d3f6ab 100644 --- a/influxdb_iox_client/src/client/flight/mod.rs +++ b/influxdb_iox_client/src/client/flight/mod.rs @@ -1,8 +1,7 @@ //! Client for InfluxDB IOx Flight API -use ::generated_types::influxdata::iox::querier::v1::{ - read_info::QueryType, AppMetadata, ReadInfo, -}; +use ::generated_types::influxdata::iox::querier::v1::{read_info::QueryType, ReadInfo}; +use prost::Message; use thiserror::Error; use arrow::{ @@ -10,6 +9,10 @@ use arrow::{ record_batch::RecordBatch, }; +use rand::Rng; + +use iox_arrow_flight::{FlightClient, FlightError, FlightRecordBatchStream}; + use crate::connection::Connection; /// Re-export generated_types @@ -20,11 +23,6 @@ pub mod generated_types { }; } -pub mod low_level; -pub use low_level::{Client as LowLevelClient, PerformQuery as LowLevelPerformQuery}; - -use self::low_level::LowLevelMessage; - /// Error responses when querying an IOx namespace using the IOx Flight API. #[derive(Debug, Error)] pub enum Error { @@ -37,6 +35,10 @@ pub enum Error { #[error(transparent)] ArrowError(#[from] arrow::error::ArrowError), + /// An error involving an Arrow operation occurred. + #[error(transparent)] + ArrowFlightError(#[from] iox_arrow_flight::FlightError), + /// The data contained invalid Flatbuffers. #[error("Invalid Flatbuffer: `{0}`")] InvalidFlatbuffer(String), @@ -47,14 +49,9 @@ pub enum Error { #[error("Message with header of type dictionary batch could not return a dictionary batch")] CouldNotGetDictionaryBatch, - /// An unknown server error occurred. Contains the `tonic::Status` returned - /// from the server. - #[error("{}", .0.message())] - GrpcError(#[from] tonic::Status), - /// Arrow Flight handshake failed. - #[error("Handshake failed")] - HandshakeFailed, + #[error("Handshake failed: {0}")] + HandshakeFailed(String), /// Serializing the protobuf structs into bytes failed. #[error(transparent)] @@ -73,6 +70,23 @@ pub enum Error { UnexpectedSchemaChange, } +impl Error { + /// Extracts the underlying tonic status, if any + pub fn tonic_status(&self) -> Option<&tonic::Status> { + if let Self::ArrowFlightError(FlightError::Tonic(status)) = self { + Some(status) + } else { + None + } + } +} + +impl From<tonic::Status> for Error { + fn from(status: tonic::Status) -> Self { + Self::ArrowFlightError(status.into()) + } +} + /// InfluxDB IOx Flight API client. /// /// This client can send SQL or InfluxQL queries to an IOx server @@ -84,10 +98,6 @@ pub enum Error { /// For SQL queries, this client yields a stream of [`RecordBatch`]es /// with the same schema. /// -/// Note that [low level interface](low_level), used internally for querier -/// <--> ingester communication, offers more control over the messages -/// that are sent and the batches received. -/// /// # Example /// /// ```rust,no_run @@ -123,53 +133,80 @@ pub enum Error { /// ``` #[derive(Debug)] pub struct Client { - inner: LowLevelClient<ReadInfo>, + inner: FlightClient, } impl Client { - /// Creates a new client with the provided connection + /// Creates a new client with the provided [`Connection`]. Panics + /// if the metadata in connection is invalid for the underlying + /// tonic library. pub fn new(connection: Connection) -> Self { - Self { - inner: LowLevelClient::new(connection, None), + // Extract headers to include with each request + let (channel, headers) = connection.into_grpc_connection().into_parts(); + + let mut inner = FlightClient::new(channel); + + // Copy any headers from IOx Connection + for (name, value) in headers.iter() { + let name = tonic::metadata::MetadataKey::<_>::from_bytes(name.as_str().as_bytes()) + .expect("Invalid metadata name"); + + let value: tonic::metadata::MetadataValue<_> = + value.as_bytes().try_into().expect("Invalid metadata value"); + inner.metadata_mut().insert(name, value); } + + Self { inner } + } + + /// Return the inner arrow flight client + pub fn into_inner(self) -> FlightClient { + self.inner } - /// Query the given namespace with the given SQL query, and return a - /// [`PerformQuery`] instance that streams Arrow [`RecordBatch`] results. + /// Query the given namespace with the given SQL query, returning + /// a struct that can stream Arrow [`RecordBatch`] results. pub async fn sql( &mut self, namespace_name: String, sql_query: String, - ) -> Result<PerformQuery, Error> { + ) -> Result<IOxRecordBatchStream, Error> { let request = ReadInfo { namespace_name, sql_query, query_type: QueryType::Sql.into(), }; - self.perform_read(request).await + self.do_get_with_read_info(request).await } - /// Query the given namespace with the given InfluxQL query, and return a - /// [`PerformQuery`] instance that streams Arrow [`RecordBatch`] results. + /// Query the given namespace with the given InfluxQL query, returning + /// a struct that can stream Arrow [`RecordBatch`] results. pub async fn influxql( &mut self, namespace_name: String, influxql_query: String, - ) -> Result<PerformQuery, Error> { + ) -> Result<IOxRecordBatchStream, Error> { let request = ReadInfo { namespace_name, sql_query: influxql_query, query_type: QueryType::InfluxQl.into(), }; - PerformQuery::new(self, request).await + + self.do_get_with_read_info(request).await } - /// Send the query request described in `request` to the IOx - /// server, returning a [`PerformQuery`] instance that streams - /// Arrow [`RecordBatch`] results. - pub async fn perform_read(&mut self, request: ReadInfo) -> Result<PerformQuery, Error> { - PerformQuery::new(self, request).await + /// Perform a lower level client read with something + async fn do_get_with_read_info( + &mut self, + read_info: ReadInfo, + ) -> Result<IOxRecordBatchStream, Error> { + // encode readinfo as bytes and send it + self.inner + .do_get(read_info.encode_to_vec()) + .await + .map(IOxRecordBatchStream::new) + .map_err(Error::ArrowFlightError) } /// Perform a handshake with the server, returning Ok on success @@ -177,58 +214,58 @@ impl Client { /// /// It is best practice to ensure a successful handshake with IOx /// prior to issuing queries. + + /// Perform a handshake with the server, as defined by the Arrow Flight API. pub async fn handshake(&mut self) -> Result<(), Error> { - self.inner.handshake().await + // handshake is an echo server. Send some random bytes and + // expect the same back. + let payload = rand::thread_rng().gen::<[u8; 16]>().to_vec(); + + let response = self + .inner + .handshake(payload.clone()) + .await + .map_err(|e| e.to_string()) + .map_err(Error::HandshakeFailed)?; + + if payload.eq(&response) { + Ok(()) + } else { + Err(Error::HandshakeFailed("reponse mismatch".into())) + } } } -/// A struct that manages the stream of Arrow [`RecordBatch`] -/// resulting from executing an IOx Flight query. -/// -/// Most users will not interact with this structure directly, but -/// rather should use [`Client::sql`] or [`Client::influxql`] -/// methods. #[derive(Debug)] -pub struct PerformQuery { - inner: LowLevelPerformQuery<AppMetadata>, - got_schema: bool, +/// Translates errors from FlightErrors to IOxErrors +pub struct IOxRecordBatchStream { + inner: FlightRecordBatchStream, } -impl PerformQuery { - pub(crate) async fn new(flight: &mut Client, request: ReadInfo) -> Result<Self, Error> { - let inner = flight.inner.perform_query(request).await?; +impl IOxRecordBatchStream { + /// create a new IOxRecordBatchStream + pub fn new(inner: FlightRecordBatchStream) -> Self { + Self { inner } + } - Ok(Self { - inner, - got_schema: false, - }) + /// Has a message defining the schema been seen yet + pub fn got_schema(&self) -> bool { + self.inner.got_schema() } - /// Returns the next `RecordBatch` available for this query, or `None` if + /// Consume self and return the wrapped [`FlightRecordBatchStream`] + pub fn into_inner(self) -> FlightRecordBatchStream { + self.inner + } + + /// Returns the next `RecordBatch` available in this stream, or `None` if /// there are no further results available. pub async fn next(&mut self) -> Result<Option<RecordBatch>, Error> { - loop { - match self.inner.next().await? { - None => return Ok(None), - Some((LowLevelMessage::Schema(_), _)) => { - if self.got_schema { - return Err(Error::UnexpectedSchemaChange); - } - self.got_schema = true; - } - Some((LowLevelMessage::RecordBatch(batch), _)) => return Ok(Some(batch)), - Some((LowLevelMessage::None, _)) => (), - } - } + Ok(self.inner.next().await?) } /// Collect and return all `RecordBatch`es as a `Vec` pub async fn collect(&mut self) -> Result<Vec<RecordBatch>, Error> { - let mut batches = Vec::new(); - while let Some(data) = self.next().await? { - batches.push(data); - } - - Ok(batches) + Ok(self.inner.collect().await?) } } diff --git a/iox_arrow_flight/Cargo.toml b/iox_arrow_flight/Cargo.toml new file mode 100644 index 0000000000..a4dcae670a --- /dev/null +++ b/iox_arrow_flight/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "iox_arrow_flight" +description = "Extended Apache Arrow Flight (intended to be upstreamed to arrow-rs)" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +arrow = { workspace = true } +arrow-flight = { workspace = true, features=["flight-sql-experimental"] } +futures = "0.3" +futures-util = { version = "0.3" } +prost = { version = "0.11", features = ["prost-derive", "std"] } +prost-types = { version = "0.11", features = ["std"] } +tonic = "0.8" +workspace-hack = { path = "../workspace-hack"} diff --git a/iox_arrow_flight/src/client.rs b/iox_arrow_flight/src/client.rs new file mode 100644 index 0000000000..2dbd66ad41 --- /dev/null +++ b/iox_arrow_flight/src/client.rs @@ -0,0 +1,423 @@ +/// Prototype "Flight Client" that handles underlying details of the flight protocol at a higher level + +/// Based on the "low level client" from IOx client: +use arrow::{array::ArrayRef, datatypes::Schema, ipc, record_batch::RecordBatch}; +use arrow_flight::{ + flight_service_client::FlightServiceClient, utils::flight_data_to_arrow_batch, FlightData, + FlightDescriptor, FlightInfo, HandshakeRequest, Ticket, +}; +use futures_util::stream; +use futures_util::stream::StreamExt; +use std::{collections::HashMap, convert::TryFrom, sync::Arc}; +use tonic::{ + metadata::MetadataMap, + transport::Channel, + //codegen::http::header::{HeaderName, HeaderValue}, + Streaming, +}; + +use crate::error::{FlightError, Result}; + +/// [Apache Arrow Flight](https://arrow.apache.org/docs/format/Flight.html) client. +/// +/// [`FlightClient`] is intended as a convenience for interactions +/// with Arrow Flight servers. If you need more direct control, such +/// as access to the response headers, you can use the +/// [`FlightServiceClient`] directly via methods such as +/// [`Self::inner`] or [`Self::into_inner`]. +/// +// TODO: +// docstring example +// - [ ] make this properly templated so it can take any service (not just a tonic Channel directly) +#[derive(Debug)] +pub struct FlightClient { + /// Optional grpc header metadata to include with each request + metadata: MetadataMap, + + /// The inner client + inner: FlightServiceClient<Channel>, +} + +impl FlightClient { + /// Creates a client client with the provided [`Channel`](tonic::transport::Channel); + pub fn new(channel: Channel) -> Self { + Self::new_from_inner(FlightServiceClient::new(channel)) + } + + /// Creates a new higher level client with the provided lower level client + pub fn new_from_inner(inner: FlightServiceClient<Channel>) -> Self { + Self { + metadata: MetadataMap::new(), + inner, + } + } + + /// Return a reference to gRPC metadata included with each request + pub fn metadata(&self) -> &MetadataMap { + &self.metadata + } + + /// Return a reference to gRPC metadata included with each request + /// + /// These headers can be used, for example, to include + /// authorization or other application specific headers. + pub fn metadata_mut(&mut self) -> &mut MetadataMap { + &mut self.metadata + } + + /// Add the specified header with value to all subsequent + /// requests. See [`Self::metadata_mut`] for fine grained control. + pub fn add_header(&mut self, key: &str, value: &str) -> Result<()> { + let key = tonic::metadata::MetadataKey::<_>::from_bytes(key.as_bytes()) + .map_err(|e| FlightError::ExternalError(Box::new(e)))?; + + let value = value + .parse() + .map_err(|e| FlightError::ExternalError(Box::new(e)))?; + + // ignore previous value + self.metadata.insert(key, value); + + Ok(()) + } + + /// Return a reference to the underlying tonic + /// [`FlightServiceClient`] + pub fn inner(&self) -> &FlightServiceClient<Channel> { + &self.inner + } + + /// Return a mutable reference to the underlying tonic + /// [`FlightServiceClient`] + pub fn inner_mut(&mut self) -> &mut FlightServiceClient<Channel> { + &mut self.inner + } + + /// Consume this client and return the underlying tonic + /// [`FlightServiceClient`] + pub fn into_inner(self) -> FlightServiceClient<Channel> { + self.inner + } + + /// Perform an Arrow Flight handshake with the server, sending + /// `payload` as the [`HandshakeRequest`] payload and returning + /// the [`HandshakeResponse`](arrow_flight::HandshakeResponse) + /// bytes returned from the server + pub async fn handshake(&mut self, payload: Vec<u8>) -> Result<Vec<u8>> { + let request = HandshakeRequest { + protocol_version: 0, + payload, + }; + + let mut response_stream = self + .inner + .handshake(stream::iter(vec![request])) + .await + .map_err(FlightError::Tonic)? + .into_inner(); + + if let Some(response) = response_stream.next().await { + let response = response.map_err(FlightError::Tonic)?; + + // check if there is another response + if response_stream.next().await.is_some() { + return Err(FlightError::protocol( + "Got unexpected second response from handshake", + )); + } + + Ok(response.payload) + } else { + Err(FlightError::protocol("No response from handshake")) + } + } + + /// Make a `DoGet` call to the server with the provided ticket, + /// returning a [`FlightRecordBatchStream`] for reading + /// [`RecordBatch`]es. + pub async fn do_get(&mut self, ticket: Vec<u8>) -> Result<FlightRecordBatchStream> { + let t = Ticket { ticket }; + let request = self.make_request(t); + + let response = self + .inner + .do_get(request) + .await + .map_err(FlightError::Tonic)? + .into_inner(); + + let flight_data_stream = FlightDataStream::new(response); + Ok(FlightRecordBatchStream::new(flight_data_stream)) + } + + /// Make a `GetFlightInfo` call to the server with the provided + /// [`FlightDescriptor`] and return the [`FlightInfo`] from the + /// server + pub async fn get_flight_info(&mut self, descriptor: FlightDescriptor) -> Result<FlightInfo> { + let request = self.make_request(descriptor); + + let response = self + .inner + .get_flight_info(request) + .await + .map_err(FlightError::Tonic)? + .into_inner(); + Ok(response) + } + + /// return a Request, adding any configured metadata + fn make_request<T>(&self, t: T) -> tonic::Request<T> { + // Pass along metadata + let mut request = tonic::Request::new(t); + *request.metadata_mut() = self.metadata.clone(); + request + } +} + +/// A stream of [`RecordBatch`]es from from an Arrow Flight server. +/// +/// To access the lower level Flight messages directly, consider +/// calling [`Self::into_inner`] and using the [`FlightDataStream`] +/// directly. +#[derive(Debug)] +pub struct FlightRecordBatchStream { + inner: FlightDataStream, + got_schema: bool, +} + +// TODO make this a proper futures::Stream +impl FlightRecordBatchStream { + pub fn new(inner: FlightDataStream) -> Self { + Self { + inner, + got_schema: false, + } + } + + /// Has a message defining the schema been received yet? + pub fn got_schema(&self) -> bool { + self.got_schema + } + + /// Consume self and return the wrapped [`FlightDataStream`] + pub fn into_inner(self) -> FlightDataStream { + self.inner + } + + /// Returns the next [`RecordBatch`] available in this stream, or `None` if + /// there are no further results available. + pub async fn next(&mut self) -> Result<Option<RecordBatch>> { + while let Some(data) = self.inner.next().await? { + match data.payload { + DecodedPayload::Schema(_) => { + if self.got_schema { + return Err(FlightError::protocol( + "Unexpectedly saw multiple Schema messages in FlightData stream", + )); + } + self.got_schema = true; + } + DecodedPayload::RecordBatch(batch) => return Ok(Some(batch)), + DecodedPayload::None => { + // loop again + } + } + } + Ok(None) + } + + /// Collect and return all `RecordBatch`es as a `Vec` + pub async fn collect(&mut self) -> Result<Vec<RecordBatch>> { + let mut batches = Vec::new(); + while let Some(data) = self.next().await? { + batches.push(data); + } + + Ok(batches) + } +} + +/// Wrapper around a stream of [`FlightData`] that handles the details +/// of decoding low level Flight messages into [`Schema`] and +/// [`RecordBatch`]es, including details such as dictionaries. +/// +/// # Protocol Details +/// +/// The client handles flight messages as followes: +/// +/// - **None:** This message has no effect. This is useful to +/// transmit metadata without any actual payload. +/// +/// - **Schema:** The schema is (re-)set. Dictionaries are cleared and +/// the decoded schema is returned. +/// +/// - **Dictionary Batch:** A new dictionary for a given column is registered. An existing +/// dictionary for the same column will be overwritten. This +/// message is NOT visible. +/// +/// - **Record Batch:** Record batch is created based on the current +/// schema and dictionaries. This fails if no schema was transmitted +/// yet. +/// +/// All other message types (at the time of writing: e.g. tensor and +/// sparse tensor) lead to an error. +// TODO make this a real futures::Stream +#[derive(Debug)] +pub struct FlightDataStream { + /// Underlying data stream + response: Streaming<FlightData>, + /// Decoding state + state: Option<FlightStreamState>, +} + +impl FlightDataStream { + /// Create a new wrapper around the stream of FlightData + pub fn new(response: Streaming<FlightData>) -> Self { + Self { + state: None, + response, + } + } + + /// Returns the result of decoding the next [`FlightData`] message + /// from the server, or `None` if there are no further results + /// available. + pub async fn next(&mut self) -> Result<Option<DecodedFlightData>> { + while let Some(data) = self.response.next().await { + let data = data.map_err(FlightError::Tonic)?; + + let message = ipc::root_as_message(&data.data_header[..]).map_err(|e| { + FlightError::DecodeError(format!("Error decoding root message: {e}")) + })?; + + match message.header_type() { + ipc::MessageHeader::NONE => { + return Ok(Some(DecodedFlightData::new_none(data))); + } + ipc::MessageHeader::Schema => { + let schema = Schema::try_from(&data).map_err(|e| { + FlightError::DecodeError(format!("Error decoding schema: {e}")) + })?; + + let schema = Arc::new(schema); + let dictionaries_by_field = HashMap::new(); + + self.state = Some(FlightStreamState { + schema: Arc::clone(&schema), + dictionaries_by_field, + }); + return Ok(Some(DecodedFlightData::new_schema(data, schema))); + } + ipc::MessageHeader::DictionaryBatch => { + let state = if let Some(state) = self.state.as_mut() { + state + } else { + return Err(FlightError::protocol( + "Received DictionaryBatch prior to Schema", + )); + }; + + let buffer: arrow::buffer::Buffer = data.data_body.into(); + let dictionary_batch = + message.header_as_dictionary_batch().ok_or_else(|| { + FlightError::protocol( + "Could not get dictionary batch from DictionaryBatch message", + ) + })?; + + ipc::reader::read_dictionary( + &buffer, + dictionary_batch, + &state.schema, + &mut state.dictionaries_by_field, + &message.version(), + ) + .map_err(|e| { + FlightError::DecodeError(format!("Error decoding ipc dictionary: {e}")) + })?; + } + ipc::MessageHeader::RecordBatch => { + let state = if let Some(state) = self.state.as_ref() { + state + } else { + return Err(FlightError::protocol( + "Received RecordBatch prior to Schema", + )); + }; + + let batch = flight_data_to_arrow_batch( + &data, + Arc::clone(&state.schema), + &state.dictionaries_by_field, + ) + .map_err(|e| { + FlightError::DecodeError(format!("Error decoding ipc RecordBatch: {e}")) + })?; + + return Ok(Some(DecodedFlightData::new_record_batch(data, batch))); + } + other => { + let name = other.variant_name().unwrap_or("UNKNOWN"); + return Err(FlightError::protocol(format!("Unexpected message: {name}"))); + } + } + } + Ok(None) + } +} + +/// tracks the state needed to reconstruct [`RecordBatch`]es from a +/// streaming flight response. +#[derive(Debug)] +struct FlightStreamState { + schema: Arc<Schema>, + dictionaries_by_field: HashMap<i64, ArrayRef>, +} + +/// FlightData and the decoded payload (Schema, RecordBatch), if any +#[derive(Debug)] +pub struct DecodedFlightData { + pub inner: FlightData, + pub payload: DecodedPayload, +} + +impl DecodedFlightData { + pub fn new_none(inner: FlightData) -> Self { + Self { + inner, + payload: DecodedPayload::None, + } + } + + pub fn new_schema(inner: FlightData, schema: Arc<Schema>) -> Self { + Self { + inner, + payload: DecodedPayload::Schema(schema), + } + } + + pub fn new_record_batch(inner: FlightData, batch: RecordBatch) -> Self { + Self { + inner, + payload: DecodedPayload::RecordBatch(batch), + } + } + + /// return the metadata field of the inner flight data + pub fn app_metadata(&self) -> &[u8] { + &self.inner.app_metadata + } +} + +/// The result of decoding [`FlightData`] +#[derive(Debug)] +pub enum DecodedPayload { + /// None (no data was sent in the corresponding FlightData) + None, + + /// A decoded Schema message + Schema(Arc<Schema>), + + /// A decoded Record batch. + RecordBatch(RecordBatch), +} diff --git a/iox_arrow_flight/src/error.rs b/iox_arrow_flight/src/error.rs new file mode 100644 index 0000000000..3abfb58ce6 --- /dev/null +++ b/iox_arrow_flight/src/error.rs @@ -0,0 +1,43 @@ +use std::error::Error; + +/// Many different operations in the `arrow` crate return this error type. +#[derive(Debug)] +pub enum FlightError { + /// Returned when functionality is not yet available. + NotYetImplemented(String), + /// Error from the underlying tonic library + Tonic(tonic::Status), + /// Some unexpected message was received + ProtocolError(String), + /// An error occured during decoding + DecodeError(String), + ExternalError(Box<dyn Error + Send + Sync>), +} + +impl FlightError { + pub fn protocol(message: impl Into<String>) -> Self { + Self::ProtocolError(message.into()) + } + + /// Wraps an external error in an `ArrowError`. + pub fn from_external_error(error: Box<dyn Error + Send + Sync>) -> Self { + Self::ExternalError(error) + } +} + +impl std::fmt::Display for FlightError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + // TODO better format / error + write!(f, "{:?}", self) + } +} + +impl std::error::Error for FlightError {} + +impl From<tonic::Status> for FlightError { + fn from(status: tonic::Status) -> Self { + Self::Tonic(status) + } +} + +pub type Result<T> = std::result::Result<T, FlightError>; diff --git a/iox_arrow_flight/src/lib.rs b/iox_arrow_flight/src/lib.rs new file mode 100644 index 0000000000..a35ab019e8 --- /dev/null +++ b/iox_arrow_flight/src/lib.rs @@ -0,0 +1,37 @@ +#![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)] +#![allow(clippy::clone_on_ref_ptr)] +#![warn( + missing_copy_implementations, + missing_debug_implementations, + clippy::explicit_iter_loop, + clippy::future_not_send, + clippy::clone_on_ref_ptr, + clippy::todo, + clippy::dbg_macro +)] + +//! Arrow Flight implementations +//! +//! The goal is to upstream much/all of this to back to arrow-rs, but +//! it is included in the IOx codebase initially for development +//! convenience (so we can develop directly on main without forks of +//! upstream repos) + +pub mod error; +pub use error::FlightError; + +mod client; +pub use client::{ + DecodedFlightData, DecodedPayload, FlightClient, FlightDataStream, FlightRecordBatchStream, +}; + +/// Reexport all of arrow_flight so this crate can masquarade as +/// `arrow-flight` in the IOx codebase (as the aim is to publish this +/// all upstream) +pub use arrow_flight::*; + +/// Publically reexport prost used by flight +pub use prost; + +/// Publically reexport tonic used by flight +pub use tonic; diff --git a/querier/Cargo.toml b/querier/Cargo.toml index c9528753e5..944143f067 100644 --- a/querier/Cargo.toml +++ b/querier/Cargo.toml @@ -18,6 +18,7 @@ datafusion_util = { path = "../datafusion_util" } futures = "0.3" generated_types = { path = "../generated_types" } influxdb_iox_client = { path = "../influxdb_iox_client" } +iox_arrow_flight = { path = "../iox_arrow_flight" } iox_catalog = { path = "../iox_catalog" } iox_query = { path = "../iox_query" } iox_time = { path = "../iox_time" } @@ -41,6 +42,8 @@ tokio = { version = "1.22", features = ["macros", "parking_lot", "rt-multi-threa tokio-util = { version = "0.7.4" } tonic = { version = "0.8" } trace = { path = "../trace" } +trace_exporters = { path = "../trace_exporters" } +trace_http = { path = "../trace_http" } tracker = { path = "../tracker" } uuid = { version = "1", features = ["v4"] } workspace-hack = { path = "../workspace-hack"} diff --git a/querier/src/ingester/circuit_breaker.rs b/querier/src/ingester/circuit_breaker.rs index 1dd6d3c421..c60ff43314 100644 --- a/querier/src/ingester/circuit_breaker.rs +++ b/querier/src/ingester/circuit_breaker.rs @@ -21,9 +21,7 @@ use pin_project::{pin_project, pinned_drop}; use rand::rngs::mock::StepRng; use trace::ctx::SpanContext; -use crate::ingester::flight_client::{ - Error as FlightClientError, FlightClient, FlightError, QueryData, -}; +use crate::ingester::flight_client::{Error as FlightClientError, IngesterFlightClient, QueryData}; /// Wrapper around a [`Future`] that signals if the future was cancelled or not. #[pin_project(PinnedDrop)] @@ -206,13 +204,13 @@ enum Circuit { }, } -/// Wrapper around [`FlightClient`] that implements the [Circuit Breaker Design Pattern]. +/// Wrapper around [`IngesterFlightClient`] that implements the [Circuit Breaker Design Pattern]. /// /// [Circuit Breaker Design Pattern]: https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern #[derive(Debug)] pub struct CircuitBreakerFlightClient { /// The underlying client. - inner: Arc<dyn FlightClient>, + inner: Arc<dyn IngesterFlightClient>, /// After how many consecutive errors shall we open a circuit? open_circuit_after_n_errors: u64, @@ -238,7 +236,7 @@ impl CircuitBreakerFlightClient { /// /// Use `open_circuit_after_n_errors` to determine after how many consecutive errors we shall open a circuit. pub fn new( - inner: Arc<dyn FlightClient>, + inner: Arc<dyn IngesterFlightClient>, time_provider: Arc<dyn TimeProvider>, metric_registry: Arc<Registry>, open_circuit_after_n_errors: u64, @@ -257,7 +255,7 @@ impl CircuitBreakerFlightClient { } #[async_trait] -impl FlightClient for CircuitBreakerFlightClient { +impl IngesterFlightClient for CircuitBreakerFlightClient { async fn query( &self, ingester_addr: Arc<str>, @@ -362,7 +360,10 @@ impl FlightClient for CircuitBreakerFlightClient { let is_error = if let Err(e) = &res { match e { FlightClientError::Flight { - source: _source @ FlightError::GrpcError(e), + source: + _source @ influxdb_iox_client::flight::Error::ArrowFlightError( + iox_arrow_flight::FlightError::Tonic(e), + ), } => !matches!( e.code(), tonic::Code::NotFound | tonic::Code::ResourceExhausted @@ -503,9 +504,8 @@ mod tests { use assert_matches::assert_matches; use data_types::{NamespaceId, TableId}; use generated_types::google::FieldViolation; - use influxdb_iox_client::flight::{ - generated_types::IngesterQueryResponseMetadata, low_level::LowLevelMessage, - }; + use influxdb_iox_client::flight::generated_types::IngesterQueryResponseMetadata; + use iox_arrow_flight::DecodedPayload; use iox_time::MockProvider; use metric::Attributes; use test_helpers::maybe_start_logging; @@ -1038,7 +1038,7 @@ mod tests { } #[async_trait] - impl FlightClient for MockClient { + impl IngesterFlightClient for MockClient { async fn query( &self, _ingester_addr: Arc<str>, @@ -1070,7 +1070,10 @@ mod tests { impl QueryData for MockQueryData { async fn next( &mut self, - ) -> Result<Option<(LowLevelMessage, IngesterQueryResponseMetadata)>, FlightError> { + ) -> Result< + Option<(DecodedPayload, IngesterQueryResponseMetadata)>, + influxdb_iox_client::flight::Error, + > { Ok(None) } } @@ -1142,7 +1145,7 @@ mod tests { #[async_trait] impl<T> FlightClientExt for T where - T: FlightClient, + T: IngesterFlightClient, { async fn assert_query_ok(&self) { self.query(ingester_address(), request(), None) @@ -1177,13 +1180,13 @@ mod tests { fn err_grpc_internal() -> FlightClientError { FlightClientError::Flight { - source: FlightError::GrpcError(tonic::Status::internal("test error")), + source: tonic::Status::internal("test error").into(), } } fn err_grpc_not_found() -> FlightClientError { FlightClientError::Flight { - source: FlightError::GrpcError(tonic::Status::not_found("test error")), + source: tonic::Status::not_found("test error").into(), } } diff --git a/querier/src/ingester/flight_client.rs b/querier/src/ingester/flight_client.rs index 6d48d1deff..b9f0b3233d 100644 --- a/querier/src/ingester/flight_client.rs +++ b/querier/src/ingester/flight_client.rs @@ -1,14 +1,13 @@ use async_trait::async_trait; use client_util::connection::{self, Connection}; use generated_types::ingester::IngesterQueryRequest; -use influxdb_iox_client::flight::{ - generated_types as proto, - low_level::{Client as LowLevelFlightClient, LowLevelMessage, PerformQuery}, -}; +use influxdb_iox_client::flight::generated_types as proto; +use iox_arrow_flight::{prost::Message, DecodedFlightData, DecodedPayload, FlightDataStream}; use observability_deps::tracing::{debug, warn}; use snafu::{ResultExt, Snafu}; use std::{collections::HashMap, fmt::Debug, ops::DerefMut, sync::Arc}; use trace::ctx::SpanContext; +use trace_http::ctx::format_jaeger_trace_context; pub use influxdb_iox_client::flight::Error as FlightError; @@ -39,11 +38,11 @@ pub enum Error { CircuitBroken { ingester_address: String }, } -/// Abstract Flight client. +/// Abstract Flight client interface for Ingester. /// /// May use an internal connection pool. #[async_trait] -pub trait FlightClient: Debug + Send + Sync + 'static { +pub trait IngesterFlightClient: Debug + Send + Sync + 'static { /// Send query to given ingester. async fn query( &self, @@ -53,7 +52,7 @@ pub trait FlightClient: Debug + Send + Sync + 'static { ) -> Result<Box<dyn QueryData>, Error>; } -/// Default [`FlightClient`] implementation that uses a real connection +/// Default [`IngesterFlightClient`] implementation that uses a real connection #[derive(Debug, Default)] pub struct FlightClientImpl { /// Cached connections @@ -90,7 +89,7 @@ impl FlightClientImpl { } #[async_trait] -impl FlightClient for FlightClientImpl { +impl IngesterFlightClient for FlightClientImpl { async fn query( &self, ingester_addr: Arc<str>, @@ -99,14 +98,33 @@ impl FlightClient for FlightClientImpl { ) -> Result<Box<dyn QueryData>, Error> { let connection = self.connect(Arc::clone(&ingester_addr)).await?; - let mut client = - LowLevelFlightClient::<proto::IngesterQueryRequest>::new(connection, span_context); + let mut client = influxdb_iox_client::flight::Client::new(connection) + // use lower level client to send a custom message type + .into_inner(); - debug!(%ingester_addr, ?request, "Sending request to ingester"); - let request = serialize_ingester_query_request(request)?; + // Add the span context header, if any + if let Some(ctx) = span_context { + client + .add_header( + trace_exporters::DEFAULT_JAEGER_TRACE_CONTEXT_HEADER_NAME, + &format_jaeger_trace_context(&ctx), + ) + // wrap in client error type + .map_err(FlightError::ArrowFlightError) + .context(FlightSnafu)?; + } - let perform_query = client.perform_query(request).await.context(FlightSnafu)?; - Ok(Box::new(perform_query)) + debug!(%ingester_addr, ?request, "Sending request to ingester"); + let request = serialize_ingester_query_request(request)?.encode_to_vec(); + + let data_stream = client + .do_get(request) + .await + // wrap in client error type + .map_err(FlightError::ArrowFlightError) + .context(FlightSnafu)? + .into_inner(); + Ok(Box::new(data_stream)) } } @@ -161,14 +179,14 @@ impl SerializeFailureReason { /// Data that is returned by an ingester gRPC query. /// -/// This is mostly the same as [`PerformQuery`] but allows some easier mocking. +/// This is mostly the same as [`FlightDataStream`] but allows mocking in tests #[async_trait] pub trait QueryData: Debug + Send + 'static { - /// Returns the next [`LowLevelMessage`] available for this query, or `None` if + /// Returns the next [`DecodedPayload`] available for this query, or `None` if /// there are no further results available. async fn next( &mut self, - ) -> Result<Option<(LowLevelMessage, proto::IngesterQueryResponseMetadata)>, FlightError>; + ) -> Result<Option<(DecodedPayload, proto::IngesterQueryResponseMetadata)>, FlightError>; } #[async_trait] @@ -178,17 +196,31 @@ where { async fn next( &mut self, - ) -> Result<Option<(LowLevelMessage, proto::IngesterQueryResponseMetadata)>, FlightError> { + ) -> Result<Option<(DecodedPayload, proto::IngesterQueryResponseMetadata)>, FlightError> { self.deref_mut().next().await } } #[async_trait] -impl QueryData for PerformQuery<proto::IngesterQueryResponseMetadata> { +// Extracts the ingester metadata from the streaming FlightData +impl QueryData for FlightDataStream { async fn next( &mut self, - ) -> Result<Option<(LowLevelMessage, proto::IngesterQueryResponseMetadata)>, FlightError> { - self.next().await + ) -> Result<Option<(DecodedPayload, proto::IngesterQueryResponseMetadata)>, FlightError> { + Ok(self + .next() + .await? + .map(|decoded_data| { + let DecodedFlightData { inner, payload } = decoded_data; + + // extract the metadata from the underlying FlightData structure + let app_metadata = &inner.app_metadata[..]; + let app_metadata: proto::IngesterQueryResponseMetadata = + Message::decode(app_metadata)?; + + Ok((payload, app_metadata)) as Result<_, FlightError> + }) + .transpose()?) } } @@ -226,8 +258,7 @@ impl CachedConnection { .context(ConnectingSnafu { ingester_address })?; // sanity check w/ a handshake - let mut client = - LowLevelFlightClient::<proto::IngesterQueryRequest>::new(connection.clone(), None); + let mut client = influxdb_iox_client::flight::Client::new(connection.clone()); // make contact with the ingester client diff --git a/querier/src/ingester/mod.rs b/querier/src/ingester/mod.rs index c5426ea2e3..acb32dda6a 100644 --- a/querier/src/ingester/mod.rs +++ b/querier/src/ingester/mod.rs @@ -1,6 +1,8 @@ use self::{ circuit_breaker::CircuitBreakerFlightClient, - flight_client::{Error as FlightClientError, FlightClient, FlightClientImpl, FlightError}, + flight_client::{ + Error as FlightClientError, FlightClientImpl, FlightError, IngesterFlightClient, + }, test_util::MockIngesterConnection, }; use crate::cache::{namespace::CachedTable, CatalogCache}; @@ -19,9 +21,8 @@ use generated_types::{ ingester::{encode_proto_predicate_as_base64, IngesterQueryRequest}, write_info::merge_responses, }; -use influxdb_iox_client::flight::{ - generated_types::IngesterQueryResponseMetadata, low_level::LowLevelMessage, -}; +use influxdb_iox_client::flight::generated_types::IngesterQueryResponseMetadata; +use iox_arrow_flight::DecodedPayload; use iox_query::{ exec::{stringset::StringSet, IOxSessionContext}, util::{compute_timenanosecond_min_max, create_basic_summary}, @@ -351,7 +352,7 @@ impl<'a> Drop for ObserveIngesterRequest<'a> { pub struct IngesterConnectionImpl { shard_to_ingesters: HashMap<ShardIndex, IngesterMapping>, unique_ingester_addresses: HashSet<Arc<str>>, - flight_client: Arc<dyn FlightClient>, + flight_client: Arc<dyn IngesterFlightClient>, catalog_cache: Arc<CatalogCache>, metrics: Arc<IngesterConnectionMetrics>, backoff_config: BackoffConfig, @@ -402,7 +403,7 @@ impl IngesterConnectionImpl { /// network communication. pub fn by_shard_with_flight_client( shard_to_ingesters: HashMap<ShardIndex, IngesterMapping>, - flight_client: Arc<dyn FlightClient>, + flight_client: Arc<dyn IngesterFlightClient>, catalog_cache: Arc<CatalogCache>, backoff_config: BackoffConfig, ) -> Self { @@ -462,7 +463,7 @@ impl IngesterConnectionImpl { /// Struct that names all parameters to `execute` #[derive(Debug, Clone)] struct GetPartitionForIngester<'a> { - flight_client: Arc<dyn FlightClient>, + flight_client: Arc<dyn IngesterFlightClient>, catalog_cache: Arc<CatalogCache>, ingester_address: Arc<str>, namespace_id: NamespaceId, @@ -512,7 +513,7 @@ async fn execute( return Ok(vec![]); } Err(FlightClientError::Flight { - source: FlightError::GrpcError(status), + source: FlightError::ArrowFlightError(iox_arrow_flight::FlightError::Tonic(status)), }) if status.code() == tonic::Code::NotFound => { debug!( ingester_address = ingester_address.as_ref(), @@ -669,11 +670,11 @@ impl IngesterStreamDecoder { /// Register a new message and its metadata from the Flight stream. async fn register( &mut self, - msg: LowLevelMessage, + msg: DecodedPayload, md: IngesterQueryResponseMetadata, ) -> Result<(), Error> { match msg { - LowLevelMessage::None => { + DecodedPayload::None => { // new partition announced self.flush_partition().await?; @@ -728,7 +729,7 @@ impl IngesterStreamDecoder { ); self.current_partition = Some(partition); } - LowLevelMessage::Schema(schema) => { + DecodedPayload::Schema(schema) => { self.flush_chunk()?; ensure!( self.current_partition.is_some(), @@ -749,7 +750,7 @@ impl IngesterStreamDecoder { .context(ConvertingSchemaSnafu)?; self.current_chunk = Some((schema, vec![])); } - LowLevelMessage::RecordBatch(batch) => { + DecodedPayload::RecordBatch(batch) => { let current_chunk = self.current_chunk .as_mut() @@ -1357,7 +1358,7 @@ mod tests { "addr1", Err(FlightClientError::Handshake { ingester_address: String::from("addr1"), - source: FlightError::GrpcError(tonic::Status::internal("don't know")), + source: tonic::Status::internal("don't know").into(), }), )]) .await, @@ -1373,7 +1374,7 @@ mod tests { MockFlightClient::new([( "addr1", Err(FlightClientError::Flight { - source: FlightError::GrpcError(tonic::Status::internal("cow exploded")), + source: tonic::Status::internal("cow exploded").into(), }), )]) .await, @@ -1389,7 +1390,7 @@ mod tests { MockFlightClient::new([( "addr1", Err(FlightClientError::Flight { - source: FlightError::GrpcError(tonic::Status::not_found("something")), + source: tonic::Status::not_found("something").into(), }), )]) .await, @@ -1406,9 +1407,7 @@ mod tests { MockFlightClient::new([( "addr1", Ok(MockQueryData { - results: vec![Err(FlightError::GrpcError(tonic::Status::internal( - "don't know", - )))], + results: vec![Err(tonic::Status::internal("don't know").into())], }), )]) .await, @@ -1535,7 +1534,7 @@ mod tests { "addr1", Ok(MockQueryData { results: vec![Ok(( - LowLevelMessage::Schema(record_batch.schema()), + DecodedPayload::Schema(record_batch.schema()), IngesterQueryResponseMetadata::default(), ))], }), @@ -1555,7 +1554,7 @@ mod tests { "addr1", Ok(MockQueryData { results: vec![Ok(( - LowLevelMessage::RecordBatch(record_batch), + DecodedPayload::RecordBatch(record_batch), IngesterQueryResponseMetadata::default(), ))], }), @@ -1594,23 +1593,23 @@ mod tests { }), ), Ok(( - LowLevelMessage::Schema(Arc::clone(&schema_1_1)), + DecodedPayload::Schema(Arc::clone(&schema_1_1)), IngesterQueryResponseMetadata::default(), )), Ok(( - LowLevelMessage::RecordBatch(record_batch_1_1_1), + DecodedPayload::RecordBatch(record_batch_1_1_1), IngesterQueryResponseMetadata::default(), )), Ok(( - LowLevelMessage::RecordBatch(record_batch_1_1_2), + DecodedPayload::RecordBatch(record_batch_1_1_2), IngesterQueryResponseMetadata::default(), )), Ok(( - LowLevelMessage::Schema(Arc::clone(&schema_1_2)), + DecodedPayload::Schema(Arc::clone(&schema_1_2)), IngesterQueryResponseMetadata::default(), )), Ok(( - LowLevelMessage::RecordBatch(record_batch_1_2), + DecodedPayload::RecordBatch(record_batch_1_2), IngesterQueryResponseMetadata::default(), )), metadata( @@ -1620,11 +1619,11 @@ mod tests { }), ), Ok(( - LowLevelMessage::Schema(Arc::clone(&schema_2_1)), + DecodedPayload::Schema(Arc::clone(&schema_2_1)), IngesterQueryResponseMetadata::default(), )), Ok(( - LowLevelMessage::RecordBatch(record_batch_2_1), + DecodedPayload::RecordBatch(record_batch_2_1), IngesterQueryResponseMetadata::default(), )), ], @@ -1641,11 +1640,11 @@ mod tests { }), ), Ok(( - LowLevelMessage::Schema(Arc::clone(&schema_3_1)), + DecodedPayload::Schema(Arc::clone(&schema_3_1)), IngesterQueryResponseMetadata::default(), )), Ok(( - LowLevelMessage::RecordBatch(record_batch_3_1), + DecodedPayload::RecordBatch(record_batch_3_1), IngesterQueryResponseMetadata::default(), )), ], @@ -1844,14 +1843,14 @@ mod tests { "addr3", Err(FlightClientError::Handshake { ingester_address: String::from("addr3"), - source: FlightError::GrpcError(tonic::Status::internal("don't know")), + source: tonic::Status::internal("don't know").into(), }), ), ( "addr4", Err(FlightClientError::Handshake { ingester_address: String::from("addr4"), - source: FlightError::GrpcError(tonic::Status::internal("don't know")), + source: tonic::Status::internal("don't know").into(), }), ), ("addr5", Ok(MockQueryData { results: vec![] })), @@ -1948,11 +1947,11 @@ mod tests { }), ), Ok(( - LowLevelMessage::Schema(Arc::clone(&schema_1_1)), + DecodedPayload::Schema(Arc::clone(&schema_1_1)), IngesterQueryResponseMetadata::default(), )), Ok(( - LowLevelMessage::RecordBatch(record_batch_1_1), + DecodedPayload::RecordBatch(record_batch_1_1), IngesterQueryResponseMetadata::default(), )), ], @@ -1961,9 +1960,8 @@ mod tests { ( "addr2", Err(FlightClientError::Flight { - source: FlightError::GrpcError(tonic::Status::internal( - "if this is queried, the test should fail", - )), + source: tonic::Status::internal("if this is queried, the test should fail") + .into(), }), ), ]) @@ -2029,11 +2027,11 @@ mod tests { lp_to_mutable_batch(lp).1.to_arrow(Projection::All).unwrap() } - type MockFlightResult = Result<(LowLevelMessage, IngesterQueryResponseMetadata), FlightError>; + type MockFlightResult = Result<(DecodedPayload, IngesterQueryResponseMetadata), FlightError>; fn metadata(partition_id: i64, status: Option<PartitionStatus>) -> MockFlightResult { Ok(( - LowLevelMessage::None, + DecodedPayload::None, IngesterQueryResponseMetadata { partition_id, status, @@ -2051,7 +2049,7 @@ mod tests { completed_persistence_count: u64, ) -> MockFlightResult { Ok(( - LowLevelMessage::None, + DecodedPayload::None, IngesterQueryResponseMetadata { partition_id, status, @@ -2070,7 +2068,7 @@ mod tests { impl QueryData for MockQueryData { async fn next( &mut self, - ) -> Result<Option<(LowLevelMessage, IngesterQueryResponseMetadata)>, FlightError> { + ) -> Result<Option<(DecodedPayload, IngesterQueryResponseMetadata)>, FlightError> { if self.results.is_empty() { Ok(None) } else { @@ -2149,7 +2147,7 @@ mod tests { } #[async_trait] - impl FlightClient for MockFlightClient { + impl IngesterFlightClient for MockFlightClient { async fn query( &self, ingester_address: Arc<str>, diff --git a/querier/src/lib.rs b/querier/src/lib.rs index a57c2a88f2..7caa8a8b6d 100644 --- a/querier/src/lib.rs +++ b/querier/src/lib.rs @@ -30,7 +30,7 @@ pub use handler::{QuerierHandler, QuerierHandlerImpl}; pub use ingester::{ create_ingester_connection_for_testing, create_ingester_connections, flight_client::{ - Error as IngesterFlightClientError, FlightClient as IngesterFlightClient, + Error as IngesterFlightClientError, IngesterFlightClient, QueryData as IngesterFlightClientQueryData, }, Error as IngesterError, IngesterConnection, IngesterConnectionImpl, IngesterPartition, diff --git a/query_tests/Cargo.toml b/query_tests/Cargo.toml index 3a46b17477..4f0f35f192 100644 --- a/query_tests/Cargo.toml +++ b/query_tests/Cargo.toml @@ -20,6 +20,7 @@ generated_types = { path = "../generated_types" } hashbrown = { workspace = true } influxdb_iox_client = { path = "../influxdb_iox_client" } ingester = { path = "../ingester" } +iox_arrow_flight = { path = "../iox_arrow_flight" } iox_catalog = { path = "../iox_catalog" } iox_query = { path = "../iox_query" } iox_tests = { path = "../iox_tests" } diff --git a/query_tests/src/scenarios/util.rs b/query_tests/src/scenarios/util.rs index e1bcc750fb..430c19e9f0 100644 --- a/query_tests/src/scenarios/util.rs +++ b/query_tests/src/scenarios/util.rs @@ -12,12 +12,13 @@ use generated_types::{ influxdata::iox::ingester::v1::{IngesterQueryResponseMetadata, PartitionStatus}, ingester::IngesterQueryRequest, }; -use influxdb_iox_client::flight::{low_level::LowLevelMessage, Error as FlightError}; +use influxdb_iox_client::flight::Error as FlightError; use ingester::{ data::{DmlApplyAction, IngesterData, Persister}, lifecycle::mock_handle::MockLifecycleHandle, querier_handler::{prepare_data_to_querier, FlatIngesterQueryResponse, IngesterQueryResponse}, }; +use iox_arrow_flight::DecodedPayload; use iox_catalog::interface::get_schema_by_name; use iox_query::exec::{DedicatedExecutors, ExecutorType}; use iox_tests::util::{TestCatalog, TestNamespace, TestShard}; @@ -997,7 +998,7 @@ impl IngesterFlightClient for MockIngester { /// [`IngesterFlightClientQueryData`] (used by the querier) without doing any real gRPC IO. struct QueryDataAdapter { messages: Box< - dyn Iterator<Item = Result<(LowLevelMessage, IngesterQueryResponseMetadata), FlightError>> + dyn Iterator<Item = Result<(DecodedPayload, IngesterQueryResponseMetadata), FlightError>> + Send, >, } @@ -1023,7 +1024,7 @@ impl QueryDataAdapter { partition_id, status, } => ( - LowLevelMessage::None, + DecodedPayload::None, IngesterQueryResponseMetadata { partition_id: partition_id.get(), status: Some(PartitionStatus { @@ -1037,11 +1038,11 @@ impl QueryDataAdapter { }, ), FlatIngesterQueryResponse::StartSnapshot { schema } => ( - LowLevelMessage::Schema(schema), + DecodedPayload::Schema(schema), IngesterQueryResponseMetadata::default(), ), FlatIngesterQueryResponse::RecordBatch { batch } => ( - LowLevelMessage::RecordBatch(batch), + DecodedPayload::RecordBatch(batch), IngesterQueryResponseMetadata::default(), ), }; @@ -1063,7 +1064,7 @@ impl QueryDataAdapter { impl IngesterFlightClientQueryData for QueryDataAdapter { async fn next( &mut self, - ) -> Result<Option<(LowLevelMessage, IngesterQueryResponseMetadata)>, FlightError> { + ) -> Result<Option<(DecodedPayload, IngesterQueryResponseMetadata)>, FlightError> { self.messages.next().transpose() } } diff --git a/test_helpers_end_to_end/src/error.rs b/test_helpers_end_to_end/src/error.rs index 50e0350cfe..a20e4a65f7 100644 --- a/test_helpers_end_to_end/src/error.rs +++ b/test_helpers_end_to_end/src/error.rs @@ -4,7 +4,7 @@ pub fn check_flight_error( expected_error_code: tonic::Code, expected_message: Option<&str>, ) { - if let influxdb_iox_client::flight::Error::GrpcError(status) = err { + if let Some(status) = err.tonic_status() { check_tonic_status(status, expected_error_code, expected_message); } else { panic!("Not a gRPC error: {err}"); @@ -13,7 +13,7 @@ pub fn check_flight_error( /// Check tonic status. pub fn check_tonic_status( - status: tonic::Status, + status: &tonic::Status, expected_error_code: tonic::Code, expected_message: Option<&str>, ) { diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 34b5bb976f..3a112c288f 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -18,6 +18,7 @@ license.workspace = true [dependencies] ahash = { version = "0.8", default-features = false, features = ["getrandom", "runtime-rng"] } arrow = { version = "29", features = ["arrow-csv", "arrow-ipc", "arrow-json", "comfy-table", "csv", "dyn_cmp_dict", "ipc", "json", "prettyprint"] } +arrow-flight = { version = "29", features = ["flight-sql-experimental", "prost-types"] } arrow-ord = { version = "29", default-features = false, features = ["dyn_cmp_dict"] } arrow-string = { version = "29", default-features = false, features = ["dyn_cmp_dict"] } base64 = { version = "0.13", features = ["std"] }
e809ec14d126cab0cdd224c08219a0bdb1d2e7f4
Marco Neumann
2023-04-11 15:01:53
`cargo update` (#7505)
Seems like dependabot is running behind a bit. Let me help: ```console $ cargo update Updating crates.io index Updating git repository `https://github.com/apache/arrow-datafusion.git` Updating git repository `https://github.com/influxdata/rskafka.git` Updating git repository `https://github.com/mkmik/heappy` Updating async-stream v0.3.4 -> v0.3.5 Updating async-stream-impl v0.3.4 -> v0.3.5 Updating axum v0.6.11 -> v0.6.12 Updating concurrent-queue v2.1.0 -> v2.2.0 Updating core-foundation-sys v0.8.3 -> v0.8.4 Updating cxx v1.0.92 -> v1.0.94 Updating cxx-build v1.0.92 -> v1.0.94 Updating cxxbridge-flags v1.0.92 -> v1.0.94 Updating cxxbridge-macro v1.0.92 -> v1.0.94 Removing errno v0.2.8 Removing errno v0.3.0 Adding errno v0.3.1 Updating fd-lock v3.0.10 -> v3.0.12 Updating iana-time-zone v0.1.54 -> v0.1.56 Removing linux-raw-sys v0.1.4 Removing linux-raw-sys v0.3.0 Adding linux-raw-sys v0.3.1 Updating pest v2.5.6 -> v2.5.7 Updating pest_derive v2.5.6 -> v2.5.7 Updating pest_generator v2.5.6 -> v2.5.7 Updating pest_meta v2.5.6 -> v2.5.7 Updating proc-macro2 v1.0.54 -> v1.0.56 Removing rustix v0.36.11 Updating serde v1.0.159 -> v1.0.160 Updating serde_derive v1.0.159 -> v1.0.160 Updating syn v2.0.11 -> v2.0.14 Updating windows v0.46.0 -> v0.48.0 ```
null
chore: `cargo update` (#7505) Seems like dependabot is running behind a bit. Let me help: ```console $ cargo update Updating crates.io index Updating git repository `https://github.com/apache/arrow-datafusion.git` Updating git repository `https://github.com/influxdata/rskafka.git` Updating git repository `https://github.com/mkmik/heappy` Updating async-stream v0.3.4 -> v0.3.5 Updating async-stream-impl v0.3.4 -> v0.3.5 Updating axum v0.6.11 -> v0.6.12 Updating concurrent-queue v2.1.0 -> v2.2.0 Updating core-foundation-sys v0.8.3 -> v0.8.4 Updating cxx v1.0.92 -> v1.0.94 Updating cxx-build v1.0.92 -> v1.0.94 Updating cxxbridge-flags v1.0.92 -> v1.0.94 Updating cxxbridge-macro v1.0.92 -> v1.0.94 Removing errno v0.2.8 Removing errno v0.3.0 Adding errno v0.3.1 Updating fd-lock v3.0.10 -> v3.0.12 Updating iana-time-zone v0.1.54 -> v0.1.56 Removing linux-raw-sys v0.1.4 Removing linux-raw-sys v0.3.0 Adding linux-raw-sys v0.3.1 Updating pest v2.5.6 -> v2.5.7 Updating pest_derive v2.5.6 -> v2.5.7 Updating pest_generator v2.5.6 -> v2.5.7 Updating pest_meta v2.5.6 -> v2.5.7 Updating proc-macro2 v1.0.54 -> v1.0.56 Removing rustix v0.36.11 Updating serde v1.0.159 -> v1.0.160 Updating serde_derive v1.0.159 -> v1.0.160 Updating syn v2.0.11 -> v2.0.14 Updating windows v0.46.0 -> v0.48.0 ```
diff --git a/Cargo.lock b/Cargo.lock index e40fa8ba95..6235c6b534 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -460,9 +460,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", @@ -471,13 +471,13 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.14", ] [[package]] @@ -488,7 +488,7 @@ checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.14", ] [[package]] @@ -531,9 +531,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.11" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d8068b6ccb8b34db9de397c7043f91db8b4c66414952c6db944f238c4d3db3" +checksum = "349f8ccfd9221ee7d1f3d4b33e1f8319b3a81ed8f61f2ea40b37b859794b4491" dependencies = [ "async-trait", "axum-core", @@ -934,7 +934,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.14", ] [[package]] @@ -1087,9 +1087,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ "crossbeam-utils", ] @@ -1173,9 +1173,9 @@ checksum = "13418e745008f7349ec7e449155f419a61b92b58a99cc3616942b926825ec76b" [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpp_demangle" @@ -1386,9 +1386,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" +checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" dependencies = [ "cc", "cxxbridge-flags", @@ -1398,9 +1398,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" +checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" dependencies = [ "cc", "codespan-reporting", @@ -1408,24 +1408,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 1.0.109", + "syn 2.0.14", ] [[package]] name = "cxxbridge-flags" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" +checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" [[package]] name = "cxxbridge-macro" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" +checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.14", ] [[package]] @@ -1773,24 +1773,13 @@ dependencies = [ [[package]] name = "errno" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" -dependencies = [ - "errno-dragonfly", - "libc", - "winapi", -] - -[[package]] -name = "errno" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d6a0976c999d473fe89ad888d5a284e55366d9dc9038b1ba2aa15128c4afa0" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -1845,13 +1834,13 @@ dependencies = [ [[package]] name = "fd-lock" -version = "3.0.10" +version = "3.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ef1a30ae415c3a691a4f41afddc2dbcd6d70baf338368d85ebc1e8ed92cedb9" +checksum = "39ae6b3d9530211fb3b12a95374b8b0823be812f53d09e18c5675c0146b09642" dependencies = [ "cfg-if", - "rustix 0.36.11", - "windows-sys 0.45.0", + "rustix", + "windows-sys 0.48.0", ] [[package]] @@ -2027,7 +2016,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.14", ] [[package]] @@ -2450,9 +2439,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.54" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c17cc76786e99f8d2f055c11159e7f0091c42474dcc3189fbab96072e873e6d" +checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -3265,7 +3254,7 @@ checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", - "rustix 0.37.11", + "rustix", "windows-sys 0.48.0", ] @@ -3412,15 +3401,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" - -[[package]] -name = "linux-raw-sys" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd550e73688e6d578f0ac2119e32b797a327631a42f9433e59d02e139c8df60d" +checksum = "d59d8c75012853d2e872fb56bc8a2e53718e2cafe1a4c823143141c6d90c322f" [[package]] name = "lock_api" @@ -4154,9 +4137,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.6" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" +checksum = "7b1403e8401ad5dedea73c626b99758535b342502f8d1e361f4a2dd952749122" dependencies = [ "thiserror", "ucd-trie", @@ -4164,9 +4147,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.6" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7" +checksum = "be99c4c1d2fc2769b1d00239431d711d08f6efedcecb8b6e30707160aee99c15" dependencies = [ "pest", "pest_generator", @@ -4174,22 +4157,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.6" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" +checksum = "e56094789873daa36164de2e822b3888c6ae4b4f9da555a1103587658c805b1e" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.14", ] [[package]] name = "pest_meta" -version = "2.5.6" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" +checksum = "6733073c7cff3d8459fda0e42f13a047870242aed8b509fe98000928975f359e" dependencies = [ "once_cell", "pest", @@ -4394,9 +4377,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.54" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e472a104799c74b514a57226160104aa483546de37e839ec50e3c2e41dd87534" +checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ "unicode-ident", ] @@ -4886,20 +4869,6 @@ dependencies = [ "semver", ] -[[package]] -name = "rustix" -version = "0.36.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db4165c9963ab29e422d6c26fbc1d37f15bace6b2810221f9d925023480fcf0e" -dependencies = [ - "bitflags", - "errno 0.2.8", - "io-lifetimes", - "libc", - "linux-raw-sys 0.1.4", - "windows-sys 0.45.0", -] - [[package]] name = "rustix" version = "0.37.11" @@ -4907,10 +4876,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85597d61f83914ddeba6a47b3b8ffe7365107221c2e557ed94426489fefb5f77" dependencies = [ "bitflags", - "errno 0.3.0", + "errno", "io-lifetimes", "libc", - "linux-raw-sys 0.3.0", + "linux-raw-sys", "windows-sys 0.48.0", ] @@ -5044,22 +5013,22 @@ checksum = "e6b44e8fc93a14e66336d230954dda83d18b4605ccace8fe09bc7514a71ad0bc" [[package]] name = "serde" -version = "1.0.159" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c04e8343c3daeec41f58990b9d77068df31209f2af111e059e9fe9646693065" +checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.159" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c614d17805b093df4b147b51339e7e44bf05ef59fba1e45d83500bcfb4d8585" +checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.14", ] [[package]] @@ -5656,9 +5625,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.11" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e3787bb71465627110e7d87ed4faaa36c1f61042ee67badb9e2ef173accc40" +checksum = "fcf316d5356ed6847742d036f8a39c3b8435cac10bd528a4bd461928a6ab34d5" dependencies = [ "proc-macro2", "quote", @@ -5686,7 +5655,7 @@ dependencies = [ "cfg-if", "fastrand", "redox_syscall 0.3.5", - "rustix 0.37.11", + "rustix", "windows-sys 0.45.0", ] @@ -5778,7 +5747,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.14", ] [[package]] @@ -5917,7 +5886,7 @@ checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.14", ] [[package]] @@ -6639,11 +6608,11 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.46.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdacb41e6a96a052c6cb63a144f24900236121c6f63f4f8219fef5977ecb0c25" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets 0.42.2", + "windows-targets 0.48.0", ] [[package]] @@ -6877,7 +6846,7 @@ dependencies = [ "regex-syntax", "reqwest", "ring", - "rustix 0.37.11", + "rustix", "rustls", "scopeguard", "serde", @@ -6889,7 +6858,7 @@ dependencies = [ "sqlx-core", "sqlx-macros", "syn 1.0.109", - "syn 2.0.11", + "syn 2.0.14", "thrift", "tokio", "tokio-stream",
0d8e756e1bf88e6e67d228045eb0120a541665ba
Carol (Nichols || Goulding)
2022-11-14 13:36:46
Update docs on the ingester-querier protocol.
I believe this fixes #6049.
null
fix: Update docs on the ingester-querier protocol. I believe this fixes #6049.
diff --git a/docs/ingester_querier_protocol.md b/docs/ingester_querier_protocol.md index 344dc5135c..f3158334e1 100644 --- a/docs/ingester_querier_protocol.md +++ b/docs/ingester_querier_protocol.md @@ -9,8 +9,8 @@ The `DoGet` ticket contains a [Protocol Buffer] message `influxdata.iox.ingester.v1.IngesterQueryRequest` (see our `generated_types` crate). This message contains: -- **namespace:** The namespace of the query. -- **table:** The table that we request. +- **namespace ID:** The catalog namespace ID of the query. +- **table ID:** The catalog table ID that we request. - **columns:** List of columns that the querier wants. If the ingester does NOT know about a specified column, it may just ignore that column (i.e. the resulting data is the intersection of the request and the ingester data).
b9e424582f4bd3fe07f8796c6004c821f6a4eec9
Carol (Nichols || Goulding)
2022-12-01 11:24:58
Extract a clap block for the ingester2 RPC write path
To be able to share it with the coming all-in-one2 command
null
refactor: Extract a clap block for the ingester2 RPC write path To be able to share it with the coming all-in-one2 command
diff --git a/Cargo.lock b/Cargo.lock index 5262b7f2a5..d02a8a99ac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2785,6 +2785,7 @@ name = "ioxd_ingester2" version = "0.1.0" dependencies = [ "async-trait", + "clap_blocks", "hyper", "ingester2", "iox_catalog", diff --git a/clap_blocks/src/ingester2.rs b/clap_blocks/src/ingester2.rs new file mode 100644 index 0000000000..0d321f0dc1 --- /dev/null +++ b/clap_blocks/src/ingester2.rs @@ -0,0 +1,23 @@ +//! CLI config for the ingester using the RPC write path + +use std::path::PathBuf; + +/// CLI config for the ingester using the RPC write path +#[derive(Debug, Clone, clap::Parser)] +#[allow(missing_copy_implementations)] +pub struct Ingester2Config { + /// Where this ingester instance should store its write-ahead log files. Each ingester instance + /// must have its own directory. + #[clap(long = "wal-directory", env = "INFLUXDB_IOX_WAL_DIRECTORY", action)] + pub wal_directory: PathBuf, + + /// Sets how many concurrent requests the ingester will handle before rejecting + /// incoming requests. + #[clap( + long = "concurrent-request-limit", + env = "INFLUXDB_IOX_CONCURRENT_REQUEST_LIMIT", + default_value = "20", + action + )] + pub concurrent_request_limit: usize, +} diff --git a/clap_blocks/src/lib.rs b/clap_blocks/src/lib.rs index 26c7cb6572..09052ccad5 100644 --- a/clap_blocks/src/lib.rs +++ b/clap_blocks/src/lib.rs @@ -15,6 +15,7 @@ pub mod catalog_dsn; pub mod compactor; pub mod ingester; +pub mod ingester2; pub mod object_store; pub mod querier; pub mod router; diff --git a/influxdb_iox/src/commands/run/ingester2.rs b/influxdb_iox/src/commands/run/ingester2.rs index 02288f154e..5027ff9d8a 100644 --- a/influxdb_iox/src/commands/run/ingester2.rs +++ b/influxdb_iox/src/commands/run/ingester2.rs @@ -2,14 +2,16 @@ use super::main; use crate::process_info::setup_metric_registry; -use clap_blocks::{catalog_dsn::CatalogDsnConfig, run_config::RunConfig}; +use clap_blocks::{ + catalog_dsn::CatalogDsnConfig, ingester2::Ingester2Config, run_config::RunConfig, +}; use ioxd_common::{ server_type::{CommonServerState, CommonServerStateError}, Service, }; use ioxd_ingester2::create_ingester_server_type; use observability_deps::tracing::*; -use std::{path::PathBuf, sync::Arc}; +use std::sync::Arc; use thiserror::Error; #[derive(Debug, Error)] @@ -50,20 +52,8 @@ pub struct Config { #[clap(flatten)] pub(crate) catalog_dsn: CatalogDsnConfig, - /// Where this ingester instance should store its write-ahead log files. Each ingester instance - /// must have its own directory. - #[clap(long = "wal-directory", env = "INFLUXDB_IOX_WAL_DIRECTORY", action)] - wal_directory: PathBuf, - - /// Sets how many concurrent requests the ingester will handle before rejecting - /// incoming requests. - #[clap( - long = "concurrent-request-limit", - env = "INFLUXDB_IOX_CONCURRENT_REQUEST_LIMIT", - default_value = "20", - action - )] - pub concurrent_request_limit: usize, + #[clap(flatten)] + pub(crate) ingester_config: Ingester2Config, } pub async fn command(config: Config) -> Result<()> { @@ -79,8 +69,7 @@ pub async fn command(config: Config) -> Result<()> { &common_state, catalog, Arc::clone(&metric_registry), - config.wal_directory, - config.concurrent_request_limit, + &config.ingester_config, ) .await?; diff --git a/influxdb_iox/src/commands/run/router_rpc_write.rs b/influxdb_iox/src/commands/run/router_rpc_write.rs index 9241c240b2..024332c6e5 100644 --- a/influxdb_iox/src/commands/run/router_rpc_write.rs +++ b/influxdb_iox/src/commands/run/router_rpc_write.rs @@ -2,8 +2,8 @@ use super::main; use crate::process_info::setup_metric_registry; use clap_blocks::{ - catalog_dsn::CatalogDsnConfig, object_store::make_object_store, router_rpc_write::RouterRpcWriteConfig, - run_config::RunConfig, + catalog_dsn::CatalogDsnConfig, object_store::make_object_store, + router_rpc_write::RouterRpcWriteConfig, run_config::RunConfig, }; use iox_time::{SystemProvider, TimeProvider}; use ioxd_common::{ diff --git a/ioxd_ingester2/Cargo.toml b/ioxd_ingester2/Cargo.toml index 54ad16cf46..7ca5c13e99 100644 --- a/ioxd_ingester2/Cargo.toml +++ b/ioxd_ingester2/Cargo.toml @@ -7,6 +7,7 @@ license.workspace = true [dependencies] # In alphabetical order async-trait = "0.1" +clap_blocks = { path = "../clap_blocks" } hyper = "0.14" ingester2 = { path = "../ingester2" } iox_catalog = { path = "../iox_catalog" } diff --git a/ioxd_ingester2/src/lib.rs b/ioxd_ingester2/src/lib.rs index 4277224ae0..ce018e886b 100644 --- a/ioxd_ingester2/src/lib.rs +++ b/ioxd_ingester2/src/lib.rs @@ -1,4 +1,5 @@ use async_trait::async_trait; +use clap_blocks::ingester2::Ingester2Config; use hyper::{Body, Request, Response}; use ingester2::IngesterRpcInterface; use iox_catalog::interface::Catalog; @@ -13,7 +14,6 @@ use ioxd_common::{ use metric::Registry; use std::{ fmt::{Debug, Display}, - path::PathBuf, sync::Arc, time::Duration, }; @@ -140,14 +140,13 @@ pub async fn create_ingester_server_type( common_state: &CommonServerState, catalog: Arc<dyn Catalog>, metrics: Arc<Registry>, - wal_directory: PathBuf, - max_simultaneous_requests: usize, + ingester_config: &Ingester2Config, ) -> Result<Arc<dyn ServerType>> { let grpc = ingester2::new( catalog, Arc::clone(&metrics), PERSIST_BACKGROUND_FETCH_TIME, - wal_directory, + ingester_config.wal_directory.clone(), ) .await?; @@ -155,6 +154,6 @@ pub async fn create_ingester_server_type( grpc, metrics, common_state, - max_simultaneous_requests, + ingester_config.concurrent_request_limit, ))) }
a17bd3bdedebbbd79167412f8ba1d070134736f3
Dom Dwyer
2023-07-04 15:32:34
don't Arc-wrap RecordBatch instances
RecordBatch are internally ref-counted, so don't Arc wrap them again.
null
refactor: don't Arc-wrap RecordBatch instances RecordBatch are internally ref-counted, so don't Arc wrap them again.
diff --git a/ingester/src/buffer_tree/partition.rs b/ingester/src/buffer_tree/partition.rs index 98c3c816db..6a71ca3148 100644 --- a/ingester/src/buffer_tree/partition.rs +++ b/ingester/src/buffer_tree/partition.rs @@ -349,7 +349,7 @@ impl PartitionData { #[cfg(test)] mod tests { - use std::{ops::Deref, time::Duration}; + use std::time::Duration; use arrow::compute::SortOptions; use arrow_util::assert_batches_eq; @@ -397,15 +397,7 @@ mod tests { "| London | 2.0 | millions | 1970-01-01T00:00:00.000000010Z |", "+--------+--------+----------+--------------------------------+", ]; - assert_batches_eq!( - expected, - &*data - .record_batches() - .iter() - .map(Deref::deref) - .cloned() - .collect::<Vec<_>>() - ); + assert_batches_eq!(expected, data.record_batches()); } // Perform a another write, adding data to the existing queryable data @@ -427,15 +419,7 @@ mod tests { "| Madrid | 4.0 | none | 1970-01-01T00:00:00.000000020Z |", "+--------+--------+----------+--------------------------------+", ]; - assert_batches_eq!( - expected, - &*data - .record_batches() - .iter() - .map(Deref::deref) - .cloned() - .collect::<Vec<_>>() - ); + assert_batches_eq!(expected, data.record_batches()); } } @@ -468,15 +452,7 @@ mod tests { "| London | 2.0 | millions | 1970-01-01T00:00:00.000000010Z |", "+--------+--------+----------+--------------------------------+", ]; - assert_batches_eq!( - expected, - &*persisting_data - .record_batches() - .iter() - .map(Deref::deref) - .cloned() - .collect::<Vec<_>>() - ); + assert_batches_eq!(expected, persisting_data.record_batches()); // Ensure the started batch ident is increased after a persist call, but not the completed // batch ident. @@ -503,15 +479,7 @@ mod tests { "| Madrid | 4.0 | none | 1970-01-01T00:00:00.000000020Z |", "+--------+--------+----------+--------------------------------+", ]; - assert_batches_eq!( - expected, - &*data - .record_batches() - .iter() - .map(Deref::deref) - .cloned() - .collect::<Vec<_>>() - ); + assert_batches_eq!(expected, data.record_batches()); } // The persist now "completes". @@ -536,15 +504,7 @@ mod tests { "| Madrid | 4.0 | none | 1970-01-01T00:00:00.000000020Z |", "+--------+--------+---------+--------------------------------+", ]; - assert_batches_eq!( - expected, - &*data - .record_batches() - .iter() - .map(Deref::deref) - .cloned() - .collect::<Vec<_>>() - ); + assert_batches_eq!(expected, data.record_batches()); } } @@ -557,12 +517,7 @@ mod tests { // A helper function to dedupe the record batches in [`QueryAdaptor`] // and assert the resulting batch contents. async fn assert_deduped(expect: &[&str], batch: QueryAdaptor) { - let batch = batch - .record_batches() - .iter() - .map(Deref::deref) - .cloned() - .collect::<Vec<_>>(); + let batch = batch.record_batches().to_vec(); let sort_keys = vec![PhysicalSortExpr { expr: col("time", &batch[0].schema()).unwrap(), @@ -787,12 +742,7 @@ mod tests { "| 1970-01-01T00:00:00.000000042Z | 2.0 |", "+--------------------------------+-----+", ], - &*data - .record_batches() - .iter() - .map(Deref::deref) - .cloned() - .collect::<Vec<_>>() + &*data.record_batches().to_vec() ); // Persist again, moving the last write to the persisting state and @@ -816,12 +766,7 @@ mod tests { "| 1970-01-01T00:00:00.000000042Z | 3.0 |", "+--------------------------------+-----+", ], - &*data - .record_batches() - .iter() - .map(Deref::deref) - .cloned() - .collect::<Vec<_>>() + &*data.record_batches().to_vec() ); // Persist again, moving the last write to the persisting state and @@ -846,12 +791,7 @@ mod tests { "| 1970-01-01T00:00:00.000000042Z | 4.0 |", "+--------------------------------+-----+", ], - &*data - .record_batches() - .iter() - .map(Deref::deref) - .cloned() - .collect::<Vec<_>>() + &*data.record_batches().to_vec() ); // Finish persisting the second batch out-of-order! The middle entry, @@ -871,12 +811,7 @@ mod tests { "| 1970-01-01T00:00:00.000000042Z | 4.0 |", "+--------------------------------+-----+", ], - &*data - .record_batches() - .iter() - .map(Deref::deref) - .cloned() - .collect::<Vec<_>>() + &*data.record_batches().to_vec() ); // Finish persisting the last batch. @@ -894,12 +829,7 @@ mod tests { "| 1970-01-01T00:00:00.000000042Z | 4.0 |", "+--------------------------------+-----+", ], - &*data - .record_batches() - .iter() - .map(Deref::deref) - .cloned() - .collect::<Vec<_>>() + &*data.record_batches().to_vec() ); // Finish persisting the first batch. @@ -917,12 +847,7 @@ mod tests { "| 1970-01-01T00:00:00.000000042Z | 4.0 |", "+--------------------------------+-----+", ], - &*data - .record_batches() - .iter() - .map(Deref::deref) - .cloned() - .collect::<Vec<_>>() + &*data.record_batches().to_vec() ); } @@ -1019,12 +944,7 @@ mod tests { "| Madrid | 2.0 | none | 1970-01-01T00:00:00.000000011Z |", "+--------+--------+----------+--------------------------------+", ], - &*data - .record_batches() - .iter() - .map(Deref::deref) - .cloned() - .collect::<Vec<_>>() + &*data.record_batches().to_vec() ); } diff --git a/ingester/src/buffer_tree/partition/buffer.rs b/ingester/src/buffer_tree/partition/buffer.rs index faf00c5393..7e05dcb024 100644 --- a/ingester/src/buffer_tree/partition/buffer.rs +++ b/ingester/src/buffer_tree/partition/buffer.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use arrow::record_batch::RecordBatch; use data_types::SequenceNumber; use mutable_batch::MutableBatch; @@ -63,7 +61,7 @@ impl DataBuffer { /// Return all data for this buffer, ordered by the [`SequenceNumber`] from /// which it was buffered with. - pub(crate) fn get_query_data(&mut self) -> Vec<Arc<RecordBatch>> { + pub(crate) fn get_query_data(&mut self) -> Vec<RecordBatch> { // Take ownership of the FSM and return the data within it. self.0.mutate(|fsm| match fsm { // The buffering state can return data. diff --git a/ingester/src/buffer_tree/partition/buffer/mutable_buffer.rs b/ingester/src/buffer_tree/partition/buffer/mutable_buffer.rs index ae137c1106..1d2ef1d581 100644 --- a/ingester/src/buffer_tree/partition/buffer/mutable_buffer.rs +++ b/ingester/src/buffer_tree/partition/buffer/mutable_buffer.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use arrow::record_batch::RecordBatch; use mutable_batch::MutableBatch; use schema::Projection; @@ -39,12 +37,12 @@ impl Buffer { /// # Panics /// /// If generating the snapshot fails, this method panics. - pub(super) fn snapshot(self) -> Option<Arc<RecordBatch>> { - Some(Arc::new( + pub(super) fn snapshot(self) -> Option<RecordBatch> { + Some( self.buffer? .to_arrow(Projection::All) .expect("failed to snapshot buffer data"), - )) + ) } pub(super) fn is_empty(&self) -> bool { diff --git a/ingester/src/buffer_tree/partition/buffer/state_machine.rs b/ingester/src/buffer_tree/partition/buffer/state_machine.rs index 8d7d633df2..98fa6614ed 100644 --- a/ingester/src/buffer_tree/partition/buffer/state_machine.rs +++ b/ingester/src/buffer_tree/partition/buffer/state_machine.rs @@ -1,6 +1,4 @@ #![allow(dead_code)] -use std::sync::Arc; - use arrow::record_batch::RecordBatch; use data_types::{sequence_number_set::SequenceNumberSet, SequenceNumber}; use mutable_batch::MutableBatch; @@ -122,15 +120,13 @@ where /// Returns the current buffer data. /// /// This is always a cheap method call. - fn get_query_data(&self) -> Vec<Arc<RecordBatch>> { + fn get_query_data(&self) -> Vec<RecordBatch> { self.state.get_query_data() } } #[cfg(test)] mod tests { - use std::ops::Deref; - use arrow_util::assert_batches_eq; use mutable_batch_lp::test_helpers::lp_to_mutable_batch; use schema::Projection; @@ -175,7 +171,7 @@ mod tests { "| true | 42.0 | platanos | 1991-03-10T00:00:42.000000042Z |", "+-------+----------+----------+--------------------------------+", ]; - assert_batches_eq!(&expected, &[w1_data[0].deref().clone()]); + assert_batches_eq!(&expected, &[w1_data[0].clone()]); // Apply another write. buffer @@ -205,7 +201,7 @@ mod tests { "+-------+----------+----------+--------------------------------+", ]; assert_eq!(w2_data.len(), 1); - assert_batches_eq!(&expected, &[w2_data[0].deref().clone()]); + assert_batches_eq!(&expected, &[w2_data[0].clone()]); // Ensure the same data is returned for a second read. { @@ -216,7 +212,7 @@ mod tests { let same_arcs = w2_data .iter() .zip(second_read.iter()) - .all(|(a, b)| Arc::ptr_eq(a, b)); + .all(|(a, b)| a.columns().as_ptr() == b.columns().as_ptr()); assert!(same_arcs); } @@ -231,7 +227,7 @@ mod tests { let same_arcs = w2_data .into_iter() .zip(final_data.into_iter()) - .all(|(a, b)| Arc::ptr_eq(&a, &b)); + .all(|(a, b)| a.columns().as_ptr() == b.columns().as_ptr()); assert!(same_arcs); // Assert the sequence numbers were recorded. @@ -260,14 +256,14 @@ mod tests { assert_eq!(buffer.get_query_data().len(), 1); - let snapshot = &buffer.get_query_data()[0]; + let snapshot = buffer.get_query_data()[0].clone(); // Generate the combined buffer from the original inputs to compare // against. mb1.extend_from(&mb2).unwrap(); let want = mb1.to_arrow(Projection::All).unwrap(); - assert_eq!(&**snapshot, &want); + assert_eq!(snapshot, want); } #[test] diff --git a/ingester/src/buffer_tree/partition/buffer/state_machine/buffering.rs b/ingester/src/buffer_tree/partition/buffer/state_machine/buffering.rs index c48ceb7229..fcc4192aa8 100644 --- a/ingester/src/buffer_tree/partition/buffer/state_machine/buffering.rs +++ b/ingester/src/buffer_tree/partition/buffer/state_machine/buffering.rs @@ -1,7 +1,5 @@ //! A write buffer. -use std::sync::Arc; - use arrow::record_batch::RecordBatch; use mutable_batch::MutableBatch; use schema::Projection; @@ -35,12 +33,10 @@ pub(crate) struct Buffering { /// This method panics if converting the buffered data (if any) into an Arrow /// [`RecordBatch`] fails (a non-transient error). impl Queryable for Buffering { - fn get_query_data(&self) -> Vec<Arc<RecordBatch>> { + fn get_query_data(&self) -> Vec<RecordBatch> { let data = self.buffer.buffer().map(|v| { - Arc::new( - v.to_arrow(Projection::All) - .expect("failed to snapshot buffer data"), - ) + v.to_arrow(Projection::All) + .expect("failed to snapshot buffer data") }); match data { diff --git a/ingester/src/buffer_tree/partition/buffer/state_machine/persisting.rs b/ingester/src/buffer_tree/partition/buffer/state_machine/persisting.rs index 4d2dd672d7..e5557f9c6d 100644 --- a/ingester/src/buffer_tree/partition/buffer/state_machine/persisting.rs +++ b/ingester/src/buffer_tree/partition/buffer/state_machine/persisting.rs @@ -1,7 +1,5 @@ //! A writfield1 buffer, with one or more snapshots. -use std::sync::Arc; - use arrow::record_batch::RecordBatch; use data_types::sequence_number_set::SequenceNumberSet; @@ -14,17 +12,17 @@ pub(crate) struct Persisting { /// Snapshots generated from previous buffer contents to be persisted. /// /// INVARIANT: this array is always non-empty. - snapshots: Vec<Arc<RecordBatch>>, + snapshots: Vec<RecordBatch>, } impl Persisting { - pub(super) fn new(snapshots: Vec<Arc<RecordBatch>>) -> Self { + pub(super) fn new(snapshots: Vec<RecordBatch>) -> Self { Self { snapshots } } } impl Queryable for Persisting { - fn get_query_data(&self) -> Vec<Arc<RecordBatch>> { + fn get_query_data(&self) -> Vec<RecordBatch> { self.snapshots.clone() } } diff --git a/ingester/src/buffer_tree/partition/buffer/state_machine/snapshot.rs b/ingester/src/buffer_tree/partition/buffer/state_machine/snapshot.rs index c530653571..00210abc00 100644 --- a/ingester/src/buffer_tree/partition/buffer/state_machine/snapshot.rs +++ b/ingester/src/buffer_tree/partition/buffer/state_machine/snapshot.rs @@ -1,7 +1,5 @@ //! A writfield1 buffer, with one or more snapshots. -use std::sync::Arc; - use arrow::record_batch::RecordBatch; use super::BufferState; @@ -15,18 +13,18 @@ pub(crate) struct Snapshot { /// Snapshots generated from previous buffer contents. /// /// INVARIANT: this array is always non-empty. - snapshots: Vec<Arc<RecordBatch>>, + snapshots: Vec<RecordBatch>, } impl Snapshot { - pub(super) fn new(snapshots: Vec<Arc<RecordBatch>>) -> Self { + pub(super) fn new(snapshots: Vec<RecordBatch>) -> Self { assert!(!snapshots.is_empty()); Self { snapshots } } } impl Queryable for Snapshot { - fn get_query_data(&self) -> Vec<Arc<RecordBatch>> { + fn get_query_data(&self) -> Vec<RecordBatch> { self.snapshots.clone() } } diff --git a/ingester/src/buffer_tree/partition/buffer/traits.rs b/ingester/src/buffer_tree/partition/buffer/traits.rs index 24241910b6..6f79244251 100644 --- a/ingester/src/buffer_tree/partition/buffer/traits.rs +++ b/ingester/src/buffer_tree/partition/buffer/traits.rs @@ -1,6 +1,6 @@ //! Private traits for state machine states. -use std::{fmt::Debug, sync::Arc}; +use std::fmt::Debug; use arrow::record_batch::RecordBatch; use mutable_batch::MutableBatch; @@ -13,5 +13,5 @@ pub(crate) trait Writeable: Debug { /// A state that can return the contents of the buffer as one or more /// [`RecordBatch`] instances. pub(crate) trait Queryable: Debug { - fn get_query_data(&self) -> Vec<Arc<RecordBatch>>; + fn get_query_data(&self) -> Vec<RecordBatch>; } diff --git a/ingester/src/persist/compact.rs b/ingester/src/persist/compact.rs index 82606ebb26..e1b3c719d4 100644 --- a/ingester/src/persist/compact.rs +++ b/ingester/src/persist/compact.rs @@ -67,10 +67,7 @@ pub(super) async fn compact_persisting_batch( adjust_sort_key_columns(&sk, &batch.schema().primary_key()) } None => { - let sort_key = compute_sort_key( - batch.schema(), - batch.record_batches().iter().map(|sb| sb.as_ref()), - ); + let sort_key = compute_sort_key(batch.schema(), batch.record_batches().iter()); // Use the sort key computed from the cardinality as the sort key for this parquet // file's metadata, also return the sort key to be stored in the catalog (sort_key.clone(), Some(sort_key)) @@ -127,7 +124,7 @@ mod tests { .to_arrow(Projection::All) .unwrap(); - let batch = QueryAdaptor::new(ARBITRARY_PARTITION_ID, vec![Arc::new(batch)]); + let batch = QueryAdaptor::new(ARBITRARY_PARTITION_ID, vec![batch]); // verify PK let schema = batch.schema(); @@ -459,8 +456,7 @@ mod tests { let expected_pk = vec!["tag1", "time"]; assert_eq!(expected_pk, pk); - let sort_key = - compute_sort_key(schema, batch.record_batches().iter().map(|rb| rb.as_ref())); + let sort_key = compute_sort_key(schema, batch.record_batches().iter()); assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"])); // compact @@ -500,8 +496,7 @@ mod tests { let expected_pk = vec!["tag1", "time"]; assert_eq!(expected_pk, pk); - let sort_key = - compute_sort_key(schema, batch.record_batches().iter().map(|rb| rb.as_ref())); + let sort_key = compute_sort_key(schema, batch.record_batches().iter()); assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"])); // compact @@ -549,8 +544,7 @@ mod tests { let expected_pk = vec!["tag1", "time"]; assert_eq!(expected_pk, pk); - let sort_key = - compute_sort_key(schema, batch.record_batches().iter().map(|rb| rb.as_ref())); + let sort_key = compute_sort_key(schema, batch.record_batches().iter()); assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"])); // compact @@ -596,8 +590,7 @@ mod tests { let expected_pk = vec!["tag1", "tag2", "time"]; assert_eq!(expected_pk, pk); - let sort_key = - compute_sort_key(schema, batch.record_batches().iter().map(|rb| rb.as_ref())); + let sort_key = compute_sort_key(schema, batch.record_batches().iter()); assert_eq!(sort_key, SortKey::from_columns(["tag1", "tag2", "time"])); // compact @@ -647,8 +640,7 @@ mod tests { let expected_pk = vec!["tag1", "tag2", "time"]; assert_eq!(expected_pk, pk); - let sort_key = - compute_sort_key(schema, batch.record_batches().iter().map(|rb| rb.as_ref())); + let sort_key = compute_sort_key(schema, batch.record_batches().iter()); assert_eq!(sort_key, SortKey::from_columns(["tag1", "tag2", "time"])); // compact @@ -699,7 +691,7 @@ mod tests { batch.schema(); } - async fn create_one_row_record_batch_with_influxtype() -> Vec<Arc<RecordBatch>> { + async fn create_one_row_record_batch_with_influxtype() -> Vec<RecordBatch> { let chunk1 = Arc::new( TestChunk::new("t") .with_id(1) @@ -723,11 +715,10 @@ mod tests { ]; assert_batches_eq!(&expected, &batches); - let batches: Vec<_> = batches.iter().map(|r| Arc::new(r.clone())).collect(); batches } - async fn create_one_record_batch_with_influxtype_no_duplicates() -> Vec<Arc<RecordBatch>> { + async fn create_one_record_batch_with_influxtype_no_duplicates() -> Vec<RecordBatch> { let chunk1 = Arc::new( TestChunk::new("t") .with_id(1) @@ -753,11 +744,10 @@ mod tests { ]; assert_batches_eq!(&expected, &batches); - let batches: Vec<_> = batches.iter().map(|r| Arc::new(r.clone())).collect(); batches } - async fn create_one_record_batch_with_influxtype_duplicates() -> Vec<Arc<RecordBatch>> { + async fn create_one_record_batch_with_influxtype_duplicates() -> Vec<RecordBatch> { let chunk1 = Arc::new( TestChunk::new("t") .with_id(1) @@ -790,12 +780,11 @@ mod tests { ]; assert_batches_eq!(&expected, &batches); - let batches: Vec<_> = batches.iter().map(|r| Arc::new(r.clone())).collect(); batches } /// RecordBatches with knowledge of influx metadata - async fn create_batches_with_influxtype() -> Vec<Arc<RecordBatch>> { + async fn create_batches_with_influxtype() -> Vec<RecordBatch> { // Use the available TestChunk to create chunks and then convert them to raw RecordBatches let mut batches = vec![]; @@ -826,7 +815,7 @@ mod tests { "+-----------+------+--------------------------------+", ]; assert_batches_eq!(&expected, &[batch1.clone()]); - batches.push(Arc::new(batch1)); + batches.push(batch1); // chunk2 having duplicate data with chunk 1 let chunk2 = Arc::new( @@ -850,7 +839,7 @@ mod tests { "+-----------+------+--------------------------------+", ]; assert_batches_eq!(&expected, &[batch2.clone()]); - batches.push(Arc::new(batch2)); + batches.push(batch2); // verify data from both batches let expected = vec![ @@ -874,14 +863,13 @@ mod tests { "| 5 | MT | 1970-01-01T00:00:00.000005Z |", "+-----------+------+--------------------------------+", ]; - let b: Vec<_> = batches.iter().map(|b| (**b).clone()).collect(); - assert_batches_eq!(&expected, &b); + assert_batches_eq!(&expected, &batches); batches } /// RecordBatches with knowledge of influx metadata - async fn create_batches_with_influxtype_different_columns() -> Vec<Arc<RecordBatch>> { + async fn create_batches_with_influxtype_different_columns() -> Vec<RecordBatch> { // Use the available TestChunk to create chunks and then convert them to raw RecordBatches let mut batches = vec![]; @@ -912,7 +900,7 @@ mod tests { "+-----------+------+--------------------------------+", ]; assert_batches_eq!(&expected, &[batch1.clone()]); - batches.push(Arc::new(batch1)); + batches.push(batch1); // chunk2 having duplicate data with chunk 1 // mmore columns @@ -939,14 +927,14 @@ mod tests { "+-----------+------------+------+------+--------------------------------+", ]; assert_batches_eq!(&expected, &[batch2.clone()]); - batches.push(Arc::new(batch2)); + batches.push(batch2); batches } /// RecordBatches with knowledge of influx metadata - async fn create_batches_with_influxtype_different_columns_different_order( - ) -> Vec<Arc<RecordBatch>> { + async fn create_batches_with_influxtype_different_columns_different_order() -> Vec<RecordBatch> + { // Use the available TestChunk to create chunks and then convert them to raw RecordBatches let mut batches = vec![]; @@ -978,7 +966,7 @@ mod tests { "+-----------+------+------+--------------------------------+", ]; assert_batches_eq!(&expected, &[batch1.clone()]); - batches.push(Arc::new(batch1.clone())); + batches.push(batch1.clone()); // chunk2 having duplicate data with chunk 1 // mmore columns @@ -1003,13 +991,13 @@ mod tests { "+-----------+------+--------------------------------+", ]; assert_batches_eq!(&expected, &[batch2.clone()]); - batches.push(Arc::new(batch2)); + batches.push(batch2); batches } /// Has 2 tag columns; tag1 has a lower cardinality (3) than tag3 (4) - async fn create_batches_with_influxtype_different_cardinality() -> Vec<Arc<RecordBatch>> { + async fn create_batches_with_influxtype_different_cardinality() -> Vec<RecordBatch> { // Use the available TestChunk to create chunks and then convert them to raw RecordBatches let mut batches = vec![]; @@ -1034,7 +1022,7 @@ mod tests { "+-----------+------+------+-----------------------------+", ]; assert_batches_eq!(&expected, &[batch1.clone()]); - batches.push(Arc::new(batch1.clone())); + batches.push(batch1.clone()); let chunk2 = Arc::new( TestChunk::new("t") @@ -1057,13 +1045,13 @@ mod tests { "+-----------+------+------+-----------------------------+", ]; assert_batches_eq!(&expected, &[batch2.clone()]); - batches.push(Arc::new(batch2)); + batches.push(batch2); batches } /// RecordBatches with knowledge of influx metadata - async fn create_batches_with_influxtype_same_columns_different_type() -> Vec<Arc<RecordBatch>> { + async fn create_batches_with_influxtype_same_columns_different_type() -> Vec<RecordBatch> { // Use the available TestChunk to create chunks and then convert them to raw RecordBatches let mut batches = vec![]; @@ -1087,7 +1075,7 @@ mod tests { "+-----------+------+-----------------------------+", ]; assert_batches_eq!(&expected, &[batch1.clone()]); - batches.push(Arc::new(batch1)); + batches.push(batch1); // chunk2 having duplicate data with chunk 1 // mmore columns @@ -1110,7 +1098,7 @@ mod tests { "+-----------+------+-----------------------------+", ]; assert_batches_eq!(&expected, &[batch2.clone()]); - batches.push(Arc::new(batch2)); + batches.push(batch2); batches } diff --git a/ingester/src/query_adaptor.rs b/ingester/src/query_adaptor.rs index 2a4b7a91e1..68a6a440e4 100644 --- a/ingester/src/query_adaptor.rs +++ b/ingester/src/query_adaptor.rs @@ -28,7 +28,7 @@ pub struct QueryAdaptor { /// /// This MUST be non-pub(crate) / closed for modification / immutable to support /// interning the merged schema in [`Self::schema()`]. - data: Vec<Arc<RecordBatch>>, + data: Vec<RecordBatch>, /// The catalog ID of the partition the this data is part of. partition_id: PartitionId, @@ -50,7 +50,7 @@ impl QueryAdaptor { /// /// This constructor panics if `data` contains no [`RecordBatch`], or all /// [`RecordBatch`] are empty. - pub(crate) fn new(partition_id: PartitionId, data: Vec<Arc<RecordBatch>>) -> Self { + pub(crate) fn new(partition_id: PartitionId, data: Vec<RecordBatch>) -> Self { // There must always be at least one record batch and one row. // // This upholds an invariant that simplifies dealing with empty @@ -73,8 +73,7 @@ impl QueryAdaptor { // Project the column selection across all RecordBatch self.data .iter() - .map(|data| { - let batch = data.as_ref(); + .map(|batch| { let schema = batch.schema(); // Apply selection to in-memory batch @@ -96,7 +95,7 @@ impl QueryAdaptor { } /// Returns the [`RecordBatch`] instances in this [`QueryAdaptor`]. - pub(crate) fn record_batches(&self) -> &[Arc<RecordBatch>] { + pub(crate) fn record_batches(&self) -> &[RecordBatch] { self.data.as_ref() } @@ -113,8 +112,7 @@ impl QueryAdaptor { /// Time range, useful for building stats pub(crate) fn ts_min_max(&self) -> TimestampMinMax { - compute_timenanosecond_min_max(self.data.iter().map(|b| b.as_ref())) - .expect("Should have time range") + compute_timenanosecond_min_max(self.data.iter()).expect("Should have time range") } } diff --git a/schema/src/merge.rs b/schema/src/merge.rs index 25c5cd19a4..94735982a4 100644 --- a/schema/src/merge.rs +++ b/schema/src/merge.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use arrow::{datatypes::Field, record_batch::RecordBatch}; use hashbrown::hash_map::RawEntryMut; use hashbrown::HashMap; @@ -44,7 +42,7 @@ pub type Result<T, E = Error> = std::result::Result<T, E>; /// This is infallable because the schemas of chunks within a /// partition are assumed to be compatible because that schema was /// enforced as part of writing into the partition -pub fn merge_record_batch_schemas(batches: &[Arc<RecordBatch>]) -> Schema { +pub fn merge_record_batch_schemas(batches: &[RecordBatch]) -> Schema { let mut merger = SchemaMerger::new(); for batch in batches { let schema = Schema::try_from(batch.schema()).expect("Schema conversion error"); @@ -172,6 +170,8 @@ impl<'a> SchemaMerger<'a> { #[cfg(test)] mod tests { + use std::sync::Arc; + use crate::builder::SchemaBuilder; use crate::InfluxFieldType::Integer;
7322f238fbee05433fbb2a943ffd3544323e37a6
Marco Neumann
2023-06-23 11:13:14
query processing (#8033)
* docs: query processing Closes https://github.com/influxdata/idpe/issues/17770 . * docs: apply recommendations Co-authored-by: Stuart Carnie <[email protected]> Co-authored-by: Andrew Lamb <[email protected]> * docs: improve description of the flight protocol * docs: link `LogicalPlan` * docs: link `ExecutionPlan` * docs: improve wording * docs: improve query planning docs ---------
Co-authored-by: Stuart Carnie <[email protected]> Co-authored-by: Andrew Lamb <[email protected]>
docs: query processing (#8033) * docs: query processing Closes https://github.com/influxdata/idpe/issues/17770 . * docs: apply recommendations Co-authored-by: Stuart Carnie <[email protected]> Co-authored-by: Andrew Lamb <[email protected]> * docs: improve description of the flight protocol * docs: link `LogicalPlan` * docs: link `ExecutionPlan` * docs: improve wording * docs: improve query planning docs --------- Co-authored-by: Stuart Carnie <[email protected]> Co-authored-by: Andrew Lamb <[email protected]>
diff --git a/docs/README.md b/docs/README.md index ae5d188e4b..67ee16c95b 100644 --- a/docs/README.md +++ b/docs/README.md @@ -50,3 +50,4 @@ We hold monthly Tech Talks that explain the project's technical underpinnings. Y * [Notes on the use of local filesystems](local_filesystems.md) * [Querier <> Ingester Query Protocol](ingester_querier_protocol.md) * [Underground Guide to Running IOx Locally](underground_guide.md) +* [Query Processing](query_processing.md) diff --git a/docs/query_processing.md b/docs/query_processing.md new file mode 100644 index 0000000000..8a6d954451 --- /dev/null +++ b/docs/query_processing.md @@ -0,0 +1,348 @@ +# InfluxDB IOx -- Query Processing + +This document illustrates query processing for SQL and InfluxQL. + +> **Note** +> +> There is another query interface called InfluxRPC (implemented in [`iox_query_influxrpc`]) which mostly reflects the old TSM storage API. The planning there works significantly different and this is NOT part of this document. + + +## Basic Flow +1. Query arrives from the user (e.g. SQL, InfluxQL) +2. The query engine creates a [`LogicalPlan`] by consulting the Catalog to find: + - Tables referenced in the query, and their schema and column details +3. The query engine creates a [`ExecutionPlan`] by determining the Chunks that contain data: + 1. Contacts the ingester for any unpersisted data + 2. Consults the catalog for the name/location of parquet files + 3. Prunes (discards at this step) any parquet files +4. Starts the [`ExecutionPlan`] and streams the results back to the client + +Some objects cached, especially the schema information, information about parquet file existence and parquet file +content. + +A graphical representation may look like this: + +```mermaid +flowchart LR + classDef intermediate color:#020A47,fill:#D6F622,stroke-width:0 + classDef processor color:#FFFFFF,fill:#D30971,stroke-width:0 + classDef systemIO color:#020A47,fill:#5EE4E4,stroke-width:0 + + Query[Query Text]:::systemIO + LogicalPlanner[Logical Planner]:::processor + LogicalPlan[Logical Plan]:::intermediate + PhysicalPlanner[Physical Planner]:::processor + ExecutionPlan[Execution Plan]:::intermediate + QueryExec[QueryExecution]:::processor + Result[Result]:::systemIO + + Query --> LogicalPlanner --> LogicalPlan --> PhysicalPlanner --> ExecutionPlan --> QueryExec --> Result +``` + + +## Code Organization +The IOx query layer is responsible for translating query requests from different query languages and planning and +executing them against chunks stored across various IOx storage systems. + +Query Frontends: +- SQL +- InfluxQL +- Others (possibly in the future) + +Sources of chunk data: +- Ingester Data +- Parquet Files +- Others (possibly in the future) + +The goal is to use the shared query / plan representation in order to avoid N*M combinations of language and chunk +source. While each frontend has their own plan construction and each chunk may be lowered to a different +[`ExecutionPlan`], the frontends and the chunks sources should not interact directly. This is achieved by first creating +a [`LogicalPlan`] from the frontend without knowing the chunk sources and only during physical planning -- i.e. when the +[`ExecutionPlan`] is constructed -- the chunks are transformed into appropriate [`DataFusion`] nodes. + +So we should end up with roughly this picture: + +```mermaid +flowchart TB + classDef out color:#020A47,fill:#9394FF,stroke-width:0 + classDef intermediate color:#020A47,fill:#D6F622,stroke-width:0 + classDef in color:#020A47,fill:#5EE4E4,stroke-width:0 + + SQL[SQL]:::in + InfluxQL[InfluxQL]:::in + OtherIn["Other (possibly in the future)"]:::in + + LogicalPlan[Logical Plan]:::intermediate + + IngesterData[Ingester Data]:::out + ParquetFile[Parquet File]:::out + OtherOut["Other (possibly in the future)"]:::out + + SQL --> LogicalPlan + InfluxQL --> LogicalPlan + OtherIn --> LogicalPlan + + LogicalPlan --> IngesterData + LogicalPlan --> ParquetFile + LogicalPlan --> OtherOut +``` + + + +## Frontend +We accept queries via an [Apache Arrow Flight] based native protocol (see [`service_grpc_flight::FlightService`]), or +via the standard [Apache Arrow Flight SQL]. + +Note that we stream data back to the client while [DataFusion] is still executing the query. This way we can emit rather +large results without large buffer usage. + +Also see: + +- ["Flight SQL"] + + +## Logical Planning +Logical planning transforms the query text into a [`LogicalPlan`]. + +The steps are the following: + +1. Parse text representation is parsed into some intermediate representation +2. Lower intermediate representation into [`LogicalPlan`] +3. Apply logical optimizer passes to the [`LogicalPlan`] + +### SQL +For SQL queries, we just use [`datafusion-sql`] to generate the [`LogicalPlan`] from the query text. + +### InfluxQL +For InfluxQL queries, we use [`iox_query_influxql`] to generate the [`LogicalPlan`] from the query text. + +### Logical Optimizer +We have a few logical optimizer passes that are specific to IOx. These can be split into two categories: *optimizing* +and *functional*. + +The *optimizing* only change to plan to make it run faster. They do not implement any functionality. These passes are: + +- [`influx_regex_to_datafusion_regex`]: Replaces InfluxDB-specific regex operator with [DataFusion] regex operator. + +The *functional* passes implement features that are NOT offered by [DataFusion] by transforming the [`LogicalPlan`] +accordingly. These passes are: + +- [`handle_gapfill`]: enables gap-filling semantics for SQL queries that contain calls to `DATE_BIN_GAPFILL()` and + related functions like `LOCF()`. + +The IOx-specific passes are executed AFTER the [DataFusion] builtin passes. + + +## Physical Planning +Physical planning transforms the [`LogicalPlan`] into a [`ExecutionPlan`]. + +These are the steps: + +1. [DataFusion] lowers [`LogicalPlan`] to [`ExecutionPlan`] + - While doing so it calls IOx code to transform table scans into concrete physical operators +2. Apply physical optimizer passes to the [`ExecutionPlan`] + +For more details, see: + +- ["IOx Physical Plan Construction"] +- ["Ingester ⇔ Querier Query Protocol"] +- ["Deduplication"] + + +## Data Flow +This is a detailled data flow from the querier point of view: + +```mermaid +flowchart TB + classDef cache color:#020A47,fill:#9394FF,stroke-width:0 + classDef external color:#FFFFFF,fill:#9B2AFF,stroke-width:0 + classDef intermediate color:#020A47,fill:#D6F622,stroke-width:0 + classDef processor color:#FFFFFF,fill:#D30971,stroke-width:0 + classDef systemIO color:#020A47,fill:#5EE4E4,stroke-width:0 + + NamespaceName[Namespace Name]:::systemIO + SqlQuery[SQL Query]:::systemIO + Result[Result]:::systemIO + + Catalog[/Catalog/]:::external + Ingester[/Ingester/]:::external + ObjectStore[/Object Store/]:::external + + NamespaceCache[Namespace Cache]:::cache + OSCache[Object Store Cache]:::cache + ParquetCache[Parquet File Cache]:::cache + PartitionCache[Partition Cache]:::cache + ProjectedSchemaCache[Projected Schema Cache]:::cache + + CachedNamespace[Cached Namespace]:::intermediate + LogicalPlan[Logical Plan]:::intermediate + ExecutionPlan[Execution Plan]:::intermediate + ParquetBytes[Parquet Bytes]:::intermediate + + LogicalPlanner[LogicalPlanner]:::processor + PhysicalPlanner[PhysicalPlanner]:::processor + QueryExec[Query Execution]:::processor + + %% help layout engine a bit + ProjectedSchemaCache --- PartitionCache + linkStyle 0 stroke-width:0px + + Catalog --> NamespaceCache + Catalog --> ParquetCache + Catalog --> PartitionCache + + ObjectStore --> OSCache + OSCache --> ParquetBytes + + NamespaceName --> NamespaceCache + NamespaceCache --> CachedNamespace + SqlQuery --> LogicalPlanner + LogicalPlanner --> LogicalPlan + + CachedNamespace --> CachedTable + LogicalPlan --> IngesterRequest + IngesterRequest --> Ingester + Ingester --> IngesterResponse + ParquetCache --> ParquetFileMD1 + PartitionCache --> ColumnRanges + PartitionCache --> SortKey + ProjectedSchemaCache --> ProjectedSchema + + subgraph table [Querier Table] + ArrowSchema[ArrowSchema]:::intermediate + CachedTable[Cached Table]:::intermediate + ColumnRanges[Column Ranges]:::intermediate + IngesterChunks[Ingester Chunks]:::intermediate + IngesterRequest[Ingester Request]:::intermediate + IngesterResponse[Ingester Partitions]:::intermediate + IngesterWatermark[Ingester Watermark]:::intermediate + ParquetChunks[Parquet Chunks]:::intermediate + ParquetFileMD1[Parquet File MD]:::intermediate + ParquetFileMD2[Parquet File MD]:::intermediate + ProjectedSchema[ProjectedSchema]:::intermediate + SortKey[SortKey]:::intermediate + QueryChunks1[Query Chunks]:::intermediate + QueryChunks2[Query Chunks]:::intermediate + + ChunkAdapter[ChunkAdapter]:::processor + IngesterDecoder[Ingester Decoder]:::processor + PreFilter[Pre-filter]:::processor + Pruning[Pruning]:::processor + + CachedTable --> ArrowSchema + + ColumnRanges --> IngesterDecoder + IngesterResponse --> IngesterDecoder + IngesterDecoder --> IngesterChunks + IngesterDecoder --> IngesterWatermark + + ParquetFileMD1 --> PreFilter + PreFilter --> ParquetFileMD2 + ParquetFileMD2 --> ChunkAdapter + ColumnRanges --> ChunkAdapter + SortKey --> ChunkAdapter + ProjectedSchema --> ChunkAdapter + ChunkAdapter --> ParquetChunks + + IngesterChunks --> QueryChunks1 + ParquetChunks --> QueryChunks1 + QueryChunks1 --> Pruning + Pruning --> QueryChunks2 + end + + style table color:#020A47,fill:#00000000,stroke:#020A47,stroke-dasharray:20 + + ArrowSchema --> LogicalPlanner + CachedTable --> PartitionCache + CachedTable --> ProjectedSchemaCache + + IngesterChunks -.-> PartitionCache + ParquetFileMD2 -.-> PartitionCache + IngesterWatermark -.-> ParquetCache + LogicalPlan -.-> NamespaceCache + ParquetFileMD1 -.-> NamespaceCache + + QueryChunks2 --> PhysicalPlanner + LogicalPlan --> PhysicalPlanner + PhysicalPlanner --> ExecutionPlan + ExecutionPlan --> QueryExec + ParquetBytes --> QueryExec + QueryExec --> Result +``` + +Legend: + + +```mermaid +flowchart TB + classDef cache color:#020A47,fill:#9394FF,stroke-width:0 + classDef external color:#FFFFFF,fill:#9B2AFF,stroke-width:0 + classDef intermediate color:#020A47,fill:#D6F622,stroke-width:0 + classDef processor color:#FFFFFF,fill:#D30971,stroke-width:0 + classDef systemIO color:#020A47,fill:#5EE4E4,stroke-width:0 + classDef helper color:#020A47,fill:#020A47,stroke-width:0 + + n_c[Cache]:::cache + n_e[/External System/]:::external + n_i[Intermediate Result]:::intermediate + n_p[Processor]:::processor + n_s[System Input and Output]:::systemIO + + a((xxx)):::helper -->|data flow| b((xxx)):::helper + c((xxx)):::helper -.->|cache invalidation| d((xxx)):::helper +``` + +## Caches +Each querier process has a set of in-memory caches. These are: + +| Name | Pool | Backing System | Key | Value | Invalidation / TTL / Refreshes | Notes | +| ---- | ---- | -------------- | --- | ----- | ------------------------------ | ----- | +| Namespace | Metadata | Catalog | Namespace Name | `CachedNamespace` | refresh policy, TTL, invalidation by unknown table/columns | Unknown entries NOT cached (assumes upstream DDoS protection) | +| Object Store | Data | Object Store | Path | Raw object store bytes for the entire object | -- | | +| Parquet File | Metadata | Catalog | Table ID | Parquet files (all the data that the catalog has, i.e. the entire row) for all files that are NOT marked for deletion. | No refresh yet (see #5718), can be invalided by ingester watermark. | | +| Partition | Metadata | Catalog | Partition ID | `CachedPartition` | Invalided if ingester data or any parquet files has columns that are NOT covered by the sort key. | Needs `CachedTable` for access | +| Projected Schema | Metadata | Querier | Table ID, Column IDs | `ProjectedSchema` | -- | Needs `CachedTable` for access | + +Note that ALL caches have a LRU eviction policy bound to the specified pool. + +### Cached Objects +The following objects are stored within the aforementioned caches. + +#### `CachedNamespace` +- namespace ID +- retention policy +- map from `Arc`ed table name to `Arc`ed `CachedTable` + +#### `CachedPartition` +- sort key +- column ranges (decoded from partition key using the partition template) + +#### `CachedTable` +- table ID +- schema +- column ID => colum name map +- column name => column ID map (i.e. the reverse of the above) +- column IDs of primary key columns +- partition template + +#### `ProjectedSchema` +Arrow schema projected from the table schema for a specific subset of columns (since some chunks do not contain all the +columns). Mostly done to optimize memory usage, i.e. some form of interning. + + +[Apache Arrow Flight]: https://arrow.apache.org/docs/format/Flight.html +[Apache Arrow Flight SQL]: https://arrow.apache.org/docs/format/FlightSql.html +[DataFusion]: https://arrow.apache.org/datafusion/ +[`datafusion-sql`]: https://github.com/apache/arrow-datafusion/tree/main/datafusion/sql +["Deduplication"]: ./dedup_and_sort.md +[`ExecutionPlan`]: https://docs.rs/datafusion/26.0.0/datafusion/physical_plan/trait.ExecutionPlan.html +["Flight SQL"]: ./flightsql.md +[`handle_gapfill`]: https://github.com/influxdata/influxdb_iox/blob/main/iox_query/src/logical_optimizer/handle_gapfill.rs +[`influx_regex_to_datafusion_regex`]: + https://github.com/influxdata/influxdb_iox/blob/main/iox_query/src/logical_optimizer/influx_regex_to_datafusion_regex.rs +["Ingester ⇔ Querier Query Protocol"]: ./ingester_querier_protocol.md +["IOx Physical Plan Construction"]: ./physical_plan_construction.md +[`iox_query_influxql`]: https://github.com/influxdata/influxdb_iox/tree/main/iox_query_influxql +[`iox_query_influxrpc`]: https://github.com/influxdata/influxdb_iox/tree/main/iox_query_influxrpc +[`LogicalPlan`]: https://docs.rs/datafusion-expr/26.0.0/datafusion_expr/logical_plan/enum.LogicalPlan.html +[`service_grpc_flight::FlightService`]: https://github.com/influxdata/influxdb_iox/blob/74a48a8f63c2b8602adf3a52fdd49ee009ebfa0b/service_grpc_flight/src/lib.rs#L246-L406 diff --git a/iox_query/README.md b/iox_query/README.md index 1b6a8248f2..c522983475 100644 --- a/iox_query/README.md +++ b/iox_query/README.md @@ -1,92 +1,3 @@ # IOx Query Layer -The IOx query layer is responsible for translating query requests from -different query languages and planning and executing them against -Chunks stored across various IOx storage systems. - - -Query Frontends -* SQL -* Storage gRPC -* Flux (possibly in the future) -* InfluxQL (possibly in the future) -* Others (possibly in the future) - -Sources of Chunk data -* ReadBuffer -* MutableBuffer -* Parquet Files -* Others (possibly in the future, like Remote Chunk?) - -The goal is to use the shared query / plan representation in order to -avoid N*M combinations of language and Chunk source. - -Thus query planning is implemented in terms of traits, and those -traits are implemented by different chunk implementations. - -Among other things, this means that this crate should not depend -directly on the ReadBuffer or the MutableBuffer. - - -```text -┌───────────────┐ ┌────────────────┐ ┌──────────────┐ ┌──────────────┐ -│Mutable Buffer │ │ Read Buffer │ │Parquet Files │ ... │Future Source │ -│ │ │ │ │ │ │ │ -└───────────────┘ └────────────────┘ └──────────────┘ └──────────────┘ - ▲ ▲ ▲ ▲ - └───────────────────┴─────────┬──────────┴─────────────────────┘ - │ - │ - ┌─────────────────────────────────┐ - │ Shared Common │ - │ Predicate, Plans, Execution │ - └─────────────────────────────────┘ - ▲ - │ - │ - ┌──────────────────────┼─────────────────────────┐ - │ │ │ - │ │ │ - │ │ │ - ┌───────────────────┐ ┌──────────────────┐ ┌──────────────────┐ - │ SQL Frontend │ │ gRPC Storage │ ... │ Future Frontend │ - │ │ │ Frontend │ │ (e.g. InfluxQL) │ - └───────────────────┘ └──────────────────┘ └──────────────────┘ -``` - -We are trying to avoid ending up with something like this: - - -``` - ┌─────────────────────────────────────────────────┐ - │ │ - ▼ │ - ┌────────────┐ │ - │Read Buffer │ ┌────────────────────────┤ - ┌──────────┼────────────┼─────┬────────────┼────────────────────────┤ - │ └────────────┘ │ ▼ │ - ▼ ▲ │ ┌──────────────┐ │ -┌───────────────┐ │ │ │Parquet Files │ │ -│Mutable Buffer │ │ ├───▶│ │... │ -│ │◀────────┼───────────┤ └──────────────┘ ┌─────────────┼┐ -└───────────────┘ │ │ ▲ │Future Source││ - ▲ │ ├────────────┼─────────▶│ ││◀─┐ - │ │ │ │ └─────────────┼┘ │ - │ │ │ │ │ │ - │ │ │ │ │ │ - │ ┌──────────┘ │ │ │ │ - │ │ │ │ │ │ - │ ├──────────────────────┼────────────┘ │ │ - └──────┤ │ │ │ - │ │ │ │ - │ │ │ │ - │ │ │ │ - │ │ │ │ - │ │ │ │ - │ │ │ │ - │ │ │ │ - ┌───────────────────┐ ┌──────────────────┐ ┌──────────────────┐ │ │ - │ SQL Frontend │ │ gRPC Storage │ ... │ Future Frontend │ │ │ - │ │ │ Frontend │ │ (e.g. InfluxQL) │──┴───┘ - └───────────────────┘ └──────────────────┘ └──────────────────┘ -``` +See [InfluxDB IOx -- Query Processing](../docs/query_processing.md) for details.
0b40e0d17cc498cf63eb14f626198b1819b1b059
Dom Dwyer
2023-03-01 17:32:22
SequenceNumberSet for rotated file
Changes Wal::rotate() to return the SequenceNumberSet containing the IDs of all writes in the segment file that is rotated out.
null
feat(wal): SequenceNumberSet for rotated file Changes Wal::rotate() to return the SequenceNumberSet containing the IDs of all writes in the segment file that is rotated out.
diff --git a/ingester2/src/wal/rotate_task.rs b/ingester2/src/wal/rotate_task.rs index 19582f1269..6ac94a4857 100644 --- a/ingester2/src/wal/rotate_task.rs +++ b/ingester2/src/wal/rotate_task.rs @@ -26,10 +26,11 @@ pub(crate) async fn periodic_rotation<T, P>( interval.tick().await; info!("rotating wal file"); - let stats = wal.rotate().expect("failed to rotate WAL"); + let (stats, ids) = wal.rotate().expect("failed to rotate WAL"); debug!( closed_id = %stats.id(), segment_bytes = stats.size(), + n_ops = ids.len(), "rotated wal" ); @@ -127,6 +128,8 @@ pub(crate) async fn periodic_rotation<T, P>( info!( closed_id = %stats.id(), + file_bytes = stats.size(), + n_ops = ids.len(), "dropped persisted wal segment" ); } diff --git a/wal/src/lib.rs b/wal/src/lib.rs index 5bf7a1da60..36a087a8b0 100644 --- a/wal/src/lib.rs +++ b/wal/src/lib.rs @@ -14,7 +14,7 @@ use crate::blocking::{ ClosedSegmentFileReader as RawClosedSegmentFileReader, OpenSegmentFileWriter, }; -use data_types::{sequence_number_set::SequenceNumberSet, SequenceNumber}; +use data_types::sequence_number_set::SequenceNumberSet; use generated_types::{ google::{FieldViolation, OptionalField}, influxdata::iox::wal::v1::{ @@ -301,16 +301,18 @@ impl Wal { ClosedSegmentFileReader::from_path(path) } - /// Writes one [`SequencedWalOp`] to the buffer and returns a watch channel for when the buffer - /// is flushed and fsync'd to disk. + /// Writes one [`SequencedWalOp`] to the buffer and returns a watch channel + /// for when the buffer is flushed and fsync'd to disk. pub fn write_op(&self, op: SequencedWalOp) -> watch::Receiver<Option<WriteResult>> { let mut b = self.buffer.lock(); b.ops.push(op); b.flush_notification.clone() } - /// Closes the currently open segment and opens a new one, returning the closed segment details. - pub fn rotate(&self) -> Result<ClosedSegment> { + /// Closes the currently open segment and opens a new one, returning the + /// closed segment details, including the [`SequenceNumberSet`] containing + /// the sequence numbers of the writes within the closed segment. + pub fn rotate(&self) -> Result<(ClosedSegment, SequenceNumberSet)> { let new_open_segment = OpenSegmentFileWriter::new_in_directory(&self.root, Arc::clone(&self.next_id_source)) .context(UnableToCreateSegmentFileSnafu)?; @@ -318,7 +320,7 @@ impl Wal { let mut segments = self.segments.lock(); let closed = std::mem::replace(&mut segments.open_segment, new_open_segment); - let _seqnum_set = std::mem::take(&mut segments.open_segment_ids); + let seqnum_set = std::mem::take(&mut segments.open_segment_ids); let closed = closed.close().expect("should convert to closed segment"); let previous_value = segments.closed_segments.insert(closed.id(), closed.clone()); @@ -327,7 +329,7 @@ impl Wal { "should always add new closed segment entries, not replace" ); - Ok(closed) + Ok((closed, seqnum_set)) } async fn flush_buffer_background_task(&self) { @@ -537,7 +539,7 @@ impl ClosedSegment { #[cfg(test)] mod tests { use super::*; - use data_types::{NamespaceId, TableId}; + use data_types::{NamespaceId, SequenceNumber, TableId}; use dml::DmlWrite; use generated_types::influxdata::{ iox::{delete::v1::DeletePayload, wal::v1::PersistOp}, @@ -575,7 +577,7 @@ mod tests { wal.write_op(op3.clone()); wal.write_op(op4.clone()).changed().await.unwrap(); - let closed = wal.rotate().unwrap(); + let (closed, ids) = wal.rotate().unwrap(); let mut reader = wal.reader_for_segment(closed.id).unwrap(); @@ -584,6 +586,22 @@ mod tests { ops.append(&mut batch); } assert_eq!(vec![op1, op2, op3, op4], ops); + + // Assert the set has recorded the op IDs. + // + // Note that one op has a duplicate sequence number above! + assert_eq!(ids.len(), 3); + + // Assert the sequence number set contains the specified ops. + let ids = ids.iter().collect::<Vec<_>>(); + assert_eq!( + ids, + [ + SequenceNumber::new(0), + SequenceNumber::new(1), + SequenceNumber::new(2), + ] + ) } // open wal with files that aren't segments (should log and skip) @@ -604,8 +622,9 @@ mod tests { ); // No writes, but rotating is totally fine - let closed_segment_details = wal.rotate().unwrap(); + let (closed_segment_details, ids) = wal.rotate().unwrap(); assert_eq!(closed_segment_details.size(), 16); + assert!(ids.is_empty()); // There's one closed segment let closed = wal.closed_segments(); diff --git a/wal/tests/end_to_end.rs b/wal/tests/end_to_end.rs index 5274142fd4..400ea0e306 100644 --- a/wal/tests/end_to_end.rs +++ b/wal/tests/end_to_end.rs @@ -1,4 +1,4 @@ -use data_types::{NamespaceId, TableId}; +use data_types::{NamespaceId, SequenceNumber, TableId}; use dml::DmlWrite; use generated_types::influxdata::{ iox::wal::v1::sequenced_wal_op::Op as WalOp, @@ -41,8 +41,12 @@ async fn crud() { ); // Can't read entries from the open segment; have to rotate first - let closed_segment_details = wal.rotate().unwrap(); + let (closed_segment_details, ids) = wal.rotate().unwrap(); assert_eq!(closed_segment_details.size(), 232); + assert_eq!( + ids.iter().collect::<Vec<_>>(), + [SequenceNumber::new(42), SequenceNumber::new(43)] + ); // There's one closed segment let closed = wal.closed_segments(); @@ -110,11 +114,13 @@ async fn ordering() { let op = arbitrary_sequenced_wal_op(42); let _ = unwrap_summary(wal.write_op(op)).await; - wal.rotate().unwrap(); + let (_, ids) = wal.rotate().unwrap(); + assert_eq!(ids.iter().collect::<Vec<_>>(), [SequenceNumber::new(42)]); let op = arbitrary_sequenced_wal_op(43); let _ = unwrap_summary(wal.write_op(op)).await; - wal.rotate().unwrap(); + let (_, ids) = wal.rotate().unwrap(); + assert_eq!(ids.iter().collect::<Vec<_>>(), [SequenceNumber::new(43)]); let op = arbitrary_sequenced_wal_op(44); let _ = unwrap_summary(wal.write_op(op)).await; @@ -130,12 +136,14 @@ async fn ordering() { assert_eq!(closed_segment_ids, &[0, 1, 2]); // The open segment is next in order - let closed_segment_details = wal.rotate().unwrap(); + let (closed_segment_details, ids) = wal.rotate().unwrap(); assert_eq!(closed_segment_details.id().get(), 3); + assert!(ids.is_empty()); // Creating new files after replay are later in the ordering - let closed_segment_details = wal.rotate().unwrap(); + let (closed_segment_details, ids) = wal.rotate().unwrap(); assert_eq!(closed_segment_details.id().get(), 4); + assert!(ids.is_empty()); } fn arbitrary_sequenced_wal_op(sequence_number: u64) -> SequencedWalOp {
08ef689d2196c79cdd94680f51460929c2f406be
Stuart Carnie
2023-03-23 12:13:15
Teach InfluxQL how to plan an aggregate query (#7230)
* feat: Display failed query Allows a user to immediately identify the failed query. * feat: API improvements to InfluxQL parser * feat: Extend `SchemaProvider` trait to query for UDFs * fix: We don't want the parser to panic on overflows * fix: ensure `map_type` maps the timestamp data type * feat: API to map a InfluxQL duration expression to a DataFusion interval * chore: Copied APIs from DataFusion SQL planner These APIs are private but useful for InfluxQL planning. * feat: Initial aggregate query support * feat: Add an API to fetch a field by name * chore: Fixes to handling NULLs in aggregates * chore: Add ability to test expected failures for InfluxQL * chore: appease rustfmt and clippy 😬 * chore: produce same error as InfluxQL * chore: appease clippy * chore: Improve docs * chore: Simplify aggregate and raw planning * feat: Add support for GROUP BY TIME(stride, offset) * chore: Update docs * chore: remove redundant `is_empty` check Co-authored-by: Christopher M. Wolff <[email protected]> * chore: PR feedback to clarify purpose of function * chore: The series_sort can't be empty, as `time` is always added This was originally intended as an optimisation when executing an aggregate query that did not group by time or tags, as it will produce N rows, where N is the number of measurements queried. * chore: update comment for clarity ---------
Co-authored-by: Christopher M. Wolff <[email protected]>
feat: Teach InfluxQL how to plan an aggregate query (#7230) * feat: Display failed query Allows a user to immediately identify the failed query. * feat: API improvements to InfluxQL parser * feat: Extend `SchemaProvider` trait to query for UDFs * fix: We don't want the parser to panic on overflows * fix: ensure `map_type` maps the timestamp data type * feat: API to map a InfluxQL duration expression to a DataFusion interval * chore: Copied APIs from DataFusion SQL planner These APIs are private but useful for InfluxQL planning. * feat: Initial aggregate query support * feat: Add an API to fetch a field by name * chore: Fixes to handling NULLs in aggregates * chore: Add ability to test expected failures for InfluxQL * chore: appease rustfmt and clippy 😬 * chore: produce same error as InfluxQL * chore: appease clippy * chore: Improve docs * chore: Simplify aggregate and raw planning * feat: Add support for GROUP BY TIME(stride, offset) * chore: Update docs * chore: remove redundant `is_empty` check Co-authored-by: Christopher M. Wolff <[email protected]> * chore: PR feedback to clarify purpose of function * chore: The series_sort can't be empty, as `time` is always added This was originally intended as an optimisation when executing an aggregate query that did not group by time or tags, as it will produce N rows, where N is the number of measurements queried. * chore: update comment for clarity --------- Co-authored-by: Christopher M. Wolff <[email protected]>
diff --git a/influxdb_influxql_parser/src/literal.rs b/influxdb_influxql_parser/src/literal.rs index 24f91a5e94..4de0305251 100644 --- a/influxdb_influxql_parser/src/literal.rs +++ b/influxdb_influxql_parser/src/literal.rs @@ -289,7 +289,8 @@ impl Display for Duration { fn single_duration(i: &str) -> ParseResult<&str, i64> { use DurationUnit::*; - map( + map_fail( + "overflow", pair( integer, alt(( @@ -304,15 +305,18 @@ fn single_duration(i: &str) -> ParseResult<&str, i64> { value(Week, tag("w")), // weeks )), ), - |(v, unit)| match unit { - Nanosecond => v, - Microsecond => v * NANOS_PER_MICRO, - Millisecond => v * NANOS_PER_MILLI, - Second => v * NANOS_PER_SEC, - Minute => v * NANOS_PER_MIN, - Hour => v * NANOS_PER_HOUR, - Day => v * NANOS_PER_DAY, - Week => v * NANOS_PER_WEEK, + |(v, unit)| { + (match unit { + Nanosecond => Some(v), + Microsecond => v.checked_mul(NANOS_PER_MICRO), + Millisecond => v.checked_mul(NANOS_PER_MILLI), + Second => v.checked_mul(NANOS_PER_SEC), + Minute => v.checked_mul(NANOS_PER_MIN), + Hour => v.checked_mul(NANOS_PER_HOUR), + Day => v.checked_mul(NANOS_PER_DAY), + Week => v.checked_mul(NANOS_PER_WEEK), + }) + .ok_or("integer overflow") }, )(i) } @@ -407,6 +411,8 @@ mod test { // Fallible cases integer("hello").unwrap_err(); + + integer("9223372036854775808").expect_err("expected overflow"); } #[test] @@ -487,6 +493,11 @@ mod test { let (_, got) = single_duration("5w").unwrap(); assert_eq!(got, 5 * NANOS_PER_WEEK); + + // Fallible + + // Handle overflow + single_duration("16000w").expect_err("expected overflow"); } #[test] diff --git a/influxdb_influxql_parser/src/select.rs b/influxdb_influxql_parser/src/select.rs index 57e51fc769..c47861798e 100644 --- a/influxdb_influxql_parser/src/select.rs +++ b/influxdb_influxql_parser/src/select.rs @@ -70,6 +70,15 @@ pub struct SelectStatement { pub timezone: Option<TimeZoneClause>, } +impl SelectStatement { + /// Return the `FILL` behaviour for the `SELECT` statement. + /// + /// The default when no `FILL` clause present is `FILL(null)`. + pub fn fill(&self) -> FillClause { + self.fill.unwrap_or_default() + } +} + impl Display for SelectStatement { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "SELECT {} {}", self.fields, self.from)?; @@ -242,6 +251,24 @@ impl Display for GroupByClause { } } +impl GroupByClause { + /// Returns the time dimension for the `GROUP BY` clause. + pub fn time_dimension(&self) -> Option<&TimeDimension> { + self.contents.iter().find_map(|dim| match dim { + Dimension::Time(t) => Some(t), + _ => None, + }) + } + + /// Returns an iterator of all the tag dimensions for the `GROUP BY` clause. + pub fn tags(&self) -> impl Iterator<Item = &Identifier> + '_ { + self.contents.iter().filter_map(|dim| match dim { + Dimension::Tag(i) => Some(i), + _ => None, + }) + } +} + /// Used to parse the interval argument of the TIME function struct TimeCallIntervalArgument; @@ -290,16 +317,30 @@ impl ArithmeticParsers for TimeCallOffsetArgument { } } +/// Represents a `TIME` dimension in a `GROUP BY` clause. +#[derive(Clone, Debug, PartialEq)] +pub struct TimeDimension { + /// The first argument of the `TIME` call. + pub interval: Expr, + /// An optional second argument to specify the offset applied to the `TIME` call. + pub offset: Option<Expr>, +} + +impl Display for TimeDimension { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "TIME({}", self.interval)?; + if let Some(offset) = &self.offset { + write!(f, ", {offset}")?; + } + write!(f, ")") + } +} + /// Represents a dimension of a `GROUP BY` clause. #[derive(Clone, Debug, PartialEq)] pub enum Dimension { /// Represents a `TIME` call in a `GROUP BY` clause. - Time { - /// The first argument of the `TIME` call. - interval: Expr, - /// An optional second argument to specify the offset applied to the `TIME` call. - offset: Option<Expr>, - }, + Time(TimeDimension), /// Represents a literal tag reference in a `GROUP BY` clause. Tag(Identifier), @@ -314,11 +355,7 @@ pub enum Dimension { impl Display for Dimension { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { - Self::Time { - interval, - offset: Some(offset), - } => write!(f, "TIME({interval}, {offset})"), - Self::Time { interval, .. } => write!(f, "TIME({interval})"), + Self::Time(v) => Display::fmt(v, f), Self::Tag(v) => Display::fmt(v, f), Self::Regex(v) => Display::fmt(v, f), Self::Wildcard => f.write_char('*'), @@ -366,7 +403,7 @@ fn time_call_expression(i: &str) -> ParseResult<&str, Dimension> { expect("invalid TIME call, expected ')'", preceded(ws0, char(')'))), ), ), - |(interval, offset)| Dimension::Time { interval, offset }, + |(interval, offset)| Dimension::Time(TimeDimension { interval, offset }), )(i) } @@ -390,9 +427,12 @@ fn group_by_clause(i: &str) -> ParseResult<&str, GroupByClause> { } /// Represents a `FILL` clause, and specifies all possible cases of the argument to the `FILL` clause. -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Default, Clone, Copy, PartialEq)] pub enum FillClause { /// Empty aggregate windows will contain null values and is specified as `fill(null)` + /// + /// This is the default behavior of a `SELECT` statement, when the `FILL` clause is omitted. + #[default] Null, /// Empty aggregate windows will be discarded and is specified as `fill(none)`. @@ -704,6 +744,8 @@ mod test { fn test_select_statement() { let (_, got) = select_statement("SELECT value FROM foo").unwrap(); assert_eq!(got.to_string(), "SELECT value FROM foo"); + // Assert default behaviour when `FILL` is omitted + assert_eq!(got.fill(), FillClause::Null); let (_, got) = select_statement(r#"SELECT f1,/f2/, f3 AS "a field" FROM foo WHERE host =~ /c1/"#) @@ -740,6 +782,7 @@ mod test { got.to_string(), r#"SELECT sum(value) FROM foo GROUP BY TIME(5m), host FILL(PREVIOUS)"# ); + assert_eq!(got.fill(), FillClause::Previous); let (_, got) = select_statement("SELECT value FROM foo ORDER BY DESC").unwrap(); assert_eq!( @@ -1141,6 +1184,20 @@ mod test { ); } + #[test] + fn test_group_by_clause_tags_time_dimension() { + let (_, got) = group_by_clause("GROUP BY *, /foo/, TIME(5m), tag1, tag2").unwrap(); + assert!(got.time_dimension().is_some()); + assert_eq!( + got.tags().cloned().collect::<Vec<_>>(), + vec!["tag1".into(), "tag2".into()] + ); + + let (_, got) = group_by_clause("GROUP BY *, /foo/").unwrap(); + assert!(got.time_dimension().is_none()); + assert_eq!(got.tags().count(), 0); + } + #[test] fn test_time_call_expression() { let (got, _) = time_call_expression("TIME(5m)").unwrap(); diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-6.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-6.snap index 14e6e9497e..df91bcffcc 100644 --- a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-6.snap +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-6.snap @@ -2,8 +2,8 @@ source: influxdb_influxql_parser/src/visit.rs expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE host = \"node1\")\n WHERE region =~ /west/ AND value > 5\n GROUP BY TIME(5m), host\n FILL(previous)\n ORDER BY TIME DESC\n LIMIT 1 OFFSET 2\n SLIMIT 3 SOFFSET 4\n TZ('Australia/Hobart')\n \"#)" --- -- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) })" -- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) }" +- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })), group_by: Some(ZeroOrMore { contents: [Time(TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None }), Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) })" +- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })), group_by: Some(ZeroOrMore { contents: [Time(TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None }), Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) }" - "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }" - "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }" - "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" @@ -66,14 +66,16 @@ expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE - "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) }" - "post_visit_conditional_expression: Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } }" - "post_visit_where_clause: WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })" -- "pre_visit_group_by_clause: ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }" -- "pre_visit_select_dimension: Time { interval: Literal(Duration(Duration(300000000000))), offset: None }" +- "pre_visit_group_by_clause: ZeroOrMore { contents: [Time(TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None }), Tag(Identifier(\"host\"))] }" +- "pre_visit_select_dimension: Time(TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None })" +- "pre_visit_select_time_dimension: TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None }" - "pre_visit_expr: Literal(Duration(Duration(300000000000)))" - "post_visit_expr: Literal(Duration(Duration(300000000000)))" -- "post_visit_select_dimension: Time { interval: Literal(Duration(Duration(300000000000))), offset: None }" +- "post_visit_select_time_dimension: TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None }" +- "post_visit_select_dimension: Time(TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None })" - "pre_visit_select_dimension: Tag(Identifier(\"host\"))" - "post_visit_select_dimension: Tag(Identifier(\"host\"))" -- "post_visit_group_by_clause: ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }" +- "post_visit_group_by_clause: ZeroOrMore { contents: [Time(TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None }), Tag(Identifier(\"host\"))] }" - "pre_visit_fill_clause: Previous" - "post_visit_fill_clause: Previous" - "pre_visit_order_by_clause: Descending" @@ -88,6 +90,6 @@ expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE - "post_visit_soffset_clause: SOffsetClause(4)" - "pre_visit_timezone_clause: TimeZoneClause(Australia/Hobart)" - "post_visit_timezone_clause: TimeZoneClause(Australia/Hobart)" -- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) }" -- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) })" +- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })), group_by: Some(ZeroOrMore { contents: [Time(TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None }), Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) }" +- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })), group_by: Some(ZeroOrMore { contents: [Time(TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None }), Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) })" diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-6.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-6.snap index 8776da84f7..554061f195 100644 --- a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-6.snap +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-6.snap @@ -2,8 +2,8 @@ source: influxdb_influxql_parser/src/visit_mut.rs expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE host = \"node1\")\n WHERE region =~ /west/ AND value > 5\n GROUP BY TIME(5m), host\n FILL(previous)\n ORDER BY TIME DESC\n LIMIT 1 OFFSET 2\n SLIMIT 3 SOFFSET 4\n TZ('Australia/Hobart')\n \"#)" --- -- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) })" -- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) }" +- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })), group_by: Some(ZeroOrMore { contents: [Time(TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None }), Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) })" +- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })), group_by: Some(ZeroOrMore { contents: [Time(TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None }), Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) }" - "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }" - "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }" - "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" @@ -66,14 +66,16 @@ expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE - "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) }" - "post_visit_conditional_expression: Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } }" - "post_visit_where_clause: WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })" -- "pre_visit_group_by_clause: ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }" -- "pre_visit_select_dimension: Time { interval: Literal(Duration(Duration(300000000000))), offset: None }" +- "pre_visit_group_by_clause: ZeroOrMore { contents: [Time(TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None }), Tag(Identifier(\"host\"))] }" +- "pre_visit_select_dimension: Time(TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None })" +- "pre_visit_select_time_dimension: TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None }" - "pre_visit_expr: Literal(Duration(Duration(300000000000)))" - "post_visit_expr: Literal(Duration(Duration(300000000000)))" -- "post_visit_select_dimension: Time { interval: Literal(Duration(Duration(300000000000))), offset: None }" +- "post_visit_select_time_dimension: TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None }" +- "post_visit_select_dimension: Time(TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None })" - "pre_visit_select_dimension: Tag(Identifier(\"host\"))" - "post_visit_select_dimension: Tag(Identifier(\"host\"))" -- "post_visit_group_by_clause: ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }" +- "post_visit_group_by_clause: ZeroOrMore { contents: [Time(TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None }), Tag(Identifier(\"host\"))] }" - "pre_visit_fill_clause: Previous" - "post_visit_fill_clause: Previous" - "pre_visit_order_by_clause: Descending" @@ -88,6 +90,6 @@ expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE - "post_visit_soffset_clause: SOffsetClause(4)" - "pre_visit_timezone_clause: TimeZoneClause(Australia/Hobart)" - "post_visit_timezone_clause: TimeZoneClause(Australia/Hobart)" -- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) }" -- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) })" +- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })), group_by: Some(ZeroOrMore { contents: [Time(TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None }), Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) }" +- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Integer(5))) } })), group_by: Some(ZeroOrMore { contents: [Time(TimeDimension { interval: Literal(Duration(Duration(300000000000))), offset: None }), Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) })" diff --git a/influxdb_influxql_parser/src/visit.rs b/influxdb_influxql_parser/src/visit.rs index c1e5fe391e..ba6e93d973 100644 --- a/influxdb_influxql_parser/src/visit.rs +++ b/influxdb_influxql_parser/src/visit.rs @@ -36,7 +36,8 @@ use crate::expression::arithmetic::Expr; use crate::expression::conditional::ConditionalExpression; use crate::select::{ Dimension, Field, FieldList, FillClause, FromMeasurementClause, GroupByClause, - MeasurementSelection, SLimitClause, SOffsetClause, SelectStatement, TimeZoneClause, + MeasurementSelection, SLimitClause, SOffsetClause, SelectStatement, TimeDimension, + TimeZoneClause, }; use crate::show::{OnClause, ShowDatabasesStatement}; use crate::show_field_keys::ShowFieldKeysStatement; @@ -367,6 +368,19 @@ pub trait Visitor: Sized { Ok(self) } + /// Invoked before `TIME` dimension clause is visited. + fn pre_visit_select_time_dimension( + self, + _n: &TimeDimension, + ) -> Result<Recursion<Self>, Self::Error> { + Ok(Continue(self)) + } + + /// Invoked after `TIME` dimension clause is visited. + fn post_visit_select_time_dimension(self, _n: &TimeDimension) -> Result<Self, Self::Error> { + Ok(self) + } + /// Invoked before any children of the `WHERE` clause are visited. fn pre_visit_where_clause(self, _n: &WhereClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) @@ -1108,14 +1122,7 @@ impl Visitable for Dimension { }; let visitor = match self { - Self::Time { interval, offset } => { - let visitor = interval.accept(visitor)?; - if let Some(offset) = offset { - offset.accept(visitor) - } else { - Ok(visitor) - } - } + Self::Time(v) => v.accept(visitor), Self::Tag(_) | Self::Regex(_) | Self::Wildcard => Ok(visitor), }?; @@ -1123,6 +1130,24 @@ impl Visitable for Dimension { } } +impl Visitable for TimeDimension { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { + let visitor = match visitor.pre_visit_select_time_dimension(self)? { + Continue(visitor) => visitor, + Stop(visitor) => return Ok(visitor), + }; + + let visitor = self.interval.accept(visitor)?; + let visitor = if let Some(offset) = &self.offset { + offset.accept(visitor)? + } else { + visitor + }; + + visitor.post_visit_select_time_dimension(self) + } +} + impl Visitable for WithKeyClause { fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_with_key_clause(self)? { @@ -1218,7 +1243,8 @@ mod test { use crate::expression::conditional::ConditionalExpression; use crate::select::{ Dimension, Field, FieldList, FillClause, FromMeasurementClause, GroupByClause, - MeasurementSelection, SLimitClause, SOffsetClause, SelectStatement, TimeZoneClause, + MeasurementSelection, SLimitClause, SOffsetClause, SelectStatement, TimeDimension, + TimeZoneClause, }; use crate::show::{OnClause, ShowDatabasesStatement}; use crate::show_field_keys::ShowFieldKeysStatement; @@ -1506,6 +1532,17 @@ mod test { Ok(self.push_post("select_dimension", n)) } + fn pre_visit_select_time_dimension( + self, + n: &TimeDimension, + ) -> Result<Recursion<Self>, Self::Error> { + Ok(Continue(self.push_pre("select_time_dimension", n))) + } + + fn post_visit_select_time_dimension(self, n: &TimeDimension) -> Result<Self, Self::Error> { + Ok(self.push_post("select_time_dimension", n)) + } + fn pre_visit_where_clause(self, n: &WhereClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("where_clause", n))) } diff --git a/influxdb_influxql_parser/src/visit_mut.rs b/influxdb_influxql_parser/src/visit_mut.rs index ca5626a8c4..ccbdb550da 100644 --- a/influxdb_influxql_parser/src/visit_mut.rs +++ b/influxdb_influxql_parser/src/visit_mut.rs @@ -36,7 +36,8 @@ use crate::expression::arithmetic::Expr; use crate::expression::conditional::ConditionalExpression; use crate::select::{ Dimension, Field, FieldList, FillClause, FromMeasurementClause, GroupByClause, - MeasurementSelection, SLimitClause, SOffsetClause, SelectStatement, TimeZoneClause, + MeasurementSelection, SLimitClause, SOffsetClause, SelectStatement, TimeDimension, + TimeZoneClause, }; use crate::show::{OnClause, ShowDatabasesStatement}; use crate::show_field_keys::ShowFieldKeysStatement; @@ -380,6 +381,22 @@ pub trait VisitorMut: Sized { Ok(()) } + /// Invoked before `TIME` dimension clause is visited. + fn pre_visit_select_time_dimension( + &mut self, + _n: &mut TimeDimension, + ) -> Result<Recursion, Self::Error> { + Ok(Continue) + } + + /// Invoked after `TIME` dimension clause is visited. + fn post_visit_select_time_dimension( + &mut self, + _n: &mut TimeDimension, + ) -> Result<(), Self::Error> { + Ok(()) + } + /// Invoked before any children of the `WHERE` clause are visited. fn pre_visit_where_clause(&mut self, _n: &mut WhereClause) -> Result<Recursion, Self::Error> { Ok(Continue) @@ -1052,12 +1069,7 @@ impl VisitableMut for Dimension { }; match self { - Self::Time { interval, offset } => { - interval.accept(visitor)?; - if let Some(offset) = offset { - offset.accept(visitor)?; - } - } + Self::Time(v) => v.accept(visitor)?, Self::Tag(_) | Self::Regex(_) | Self::Wildcard => {} }; @@ -1065,6 +1077,21 @@ impl VisitableMut for Dimension { } } +impl VisitableMut for TimeDimension { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { + if let Stop = visitor.pre_visit_select_time_dimension(self)? { + return Ok(()); + }; + + self.interval.accept(visitor)?; + if let Some(offset) = &mut self.offset { + offset.accept(visitor)?; + } + + visitor.post_visit_select_time_dimension(self) + } +} + impl VisitableMut for WithKeyClause { fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_with_key_clause(self)? { @@ -1156,7 +1183,8 @@ mod test { use crate::parse_statements; use crate::select::{ Dimension, Field, FieldList, FillClause, FromMeasurementClause, GroupByClause, - MeasurementSelection, SLimitClause, SOffsetClause, SelectStatement, TimeZoneClause, + MeasurementSelection, SLimitClause, SOffsetClause, SelectStatement, TimeDimension, + TimeZoneClause, }; use crate::show::{OnClause, ShowDatabasesStatement}; use crate::show_field_keys::ShowFieldKeysStatement; @@ -1498,6 +1526,22 @@ mod test { Ok(()) } + fn pre_visit_select_time_dimension( + &mut self, + n: &mut TimeDimension, + ) -> Result<Recursion, Self::Error> { + self.push_pre("select_time_dimension", n); + Ok(Continue) + } + + fn post_visit_select_time_dimension( + &mut self, + n: &mut TimeDimension, + ) -> Result<(), Self::Error> { + self.push_post("select_time_dimension", n); + Ok(()) + } + fn pre_visit_where_clause( &mut self, n: &mut WhereClause, diff --git a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql index 8bb474f614..c3bc395ff3 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql +++ b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql @@ -271,9 +271,58 @@ SELECT cpu, usage_idle FROM cpu; SELECT usage_idle FROM cpu GROUP BY cpu; SELECT usage_idle, cpu FROM cpu GROUP BY cpu; +-- group by a non-existent tag +SELECT usage_idle FROM cpu GROUP BY cpu, non_existent; +-- group by and project a non-existent tag +SELECT usage_idle, non_existent FROM cpu GROUP BY cpu, non_existent; + -- multiple measurements and tags in the group by SELECT usage_idle, bytes_free FROM cpu, disk GROUP BY cpu; +SELECT usage_idle, bytes_free FROM cpu, disk GROUP BY cpu, non_existent; SELECT usage_idle, bytes_free FROM cpu, disk GROUP BY cpu, device; SELECT usage_idle, bytes_free FROM cpu, disk GROUP BY device, cpu; SELECT usage_idle, bytes_free, device, cpu FROM cpu, disk GROUP BY device, cpu; + +-- +-- Aggregate queries +-- + +SELECT COUNT(f64), SUM(f64), stddev(f64) FROM m0 GROUP BY tag0; +SELECT COUNT(f64), SUM(f64), stddev(f64) FROM m0 GROUP BY tag0, non_existent; +SELECT COUNT(f64), SUM(f64), stddev(f64) FROM m0 GROUP BY non_existent; +SELECT COUNT(f64), COUNT(f64) + COUNT(f64), COUNT(f64) * 3 FROM m0; +-- non-existent columns in an aggregate should evaluate to NULL +SELECT COUNT(f64) as the_count, SUM(non_existent) as foo FROM m0; +-- non-existent columns in an aggregate expression should evaluate to NULL +SELECT COUNT(f64) as the_count, SUM(f64) + SUM(non_existent) as foo FROM m0; + +SELECT COUNT(f64), SUM(f64) FROM m0 GROUP BY TIME(30s) FILL(none); +-- supports offset parameter +SELECT COUNT(f64), SUM(f64) FROM m0 GROUP BY TIME(30s, 1s) FILL(none); + +SELECT COUNT(usage_idle), COUNT(bytes_free) FROM cpu, disk; +SELECT COUNT(usage_idle), COUNT(bytes_free) FROM cpu, disk GROUP BY TIME(1s) FILL(none); +SELECT COUNT(usage_idle), COUNT(bytes_free) FROM cpu, disk GROUP BY cpu; +SELECT COUNT(usage_idle) as count_usage_idle, COUNT(bytes_free) as count_bytes_free FROM cpu, disk WHERE cpu = 'cpu0' OR device = 'disk1s1' GROUP BY cpu; + +-- measurements without any matching fields are omitted from the result set +SELECT SUM(usage_idle) FROM cpu, disk WHERE cpu = 'cpu0' GROUP BY cpu; +SELECT SUM(usage_idle) FROM cpu, disk GROUP BY cpu; + +-- Fallible cases + +-- Mixing aggregate and non-aggregate columns +SELECT COUNT(usage_idle) + usage_idle FROM cpu; +SELECT COUNT(usage_idle), usage_idle FROM cpu; + +-- Unimplemented cases + +-- TODO(sgc): No gap filling +-- Default FILL(null) when FILL is omitted +SELECT COUNT(usage_idle) FROM cpu GROUP BY TIME(30s); +SELECT COUNT(usage_idle) FROM cpu GROUP BY TIME(30s) FILL(previous); + +-- LIMIT and OFFSET aren't supported with aggregates and groups +SELECT COUNT(usage_idle) FROM cpu GROUP BY cpu LIMIT 1; +SELECT COUNT(usage_idle) FROM cpu GROUP BY cpu OFFSET 1; \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected index c14a6e8574..ee9c18344e 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected @@ -455,6 +455,28 @@ | cpu | 2022-10-31T02:00:00Z | 1.98 | cpu1 | | cpu | 2022-10-31T02:00:10Z | 1.99 | cpu1 | +------------------+----------------------+------------+-----------+ +-- InfluxQL: SELECT usage_idle FROM cpu GROUP BY cpu, non_existent; ++------------------+----------------------+-----------+--------------+------------+ +| iox::measurement | time | cpu | non_existent | usage_idle | ++------------------+----------------------+-----------+--------------+------------+ +| cpu | 2022-10-31T02:00:00Z | cpu-total | | 2.98 | +| cpu | 2022-10-31T02:00:10Z | cpu-total | | 2.99 | +| cpu | 2022-10-31T02:00:00Z | cpu0 | | 0.98 | +| cpu | 2022-10-31T02:00:10Z | cpu0 | | 0.99 | +| cpu | 2022-10-31T02:00:00Z | cpu1 | | 1.98 | +| cpu | 2022-10-31T02:00:10Z | cpu1 | | 1.99 | ++------------------+----------------------+-----------+--------------+------------+ +-- InfluxQL: SELECT usage_idle, non_existent FROM cpu GROUP BY cpu, non_existent; ++------------------+----------------------+-----------+------------+--------------+ +| iox::measurement | time | cpu | usage_idle | non_existent | ++------------------+----------------------+-----------+------------+--------------+ +| cpu | 2022-10-31T02:00:00Z | cpu-total | 2.98 | | +| cpu | 2022-10-31T02:00:10Z | cpu-total | 2.99 | | +| cpu | 2022-10-31T02:00:00Z | cpu0 | 0.98 | | +| cpu | 2022-10-31T02:00:10Z | cpu0 | 0.99 | | +| cpu | 2022-10-31T02:00:00Z | cpu1 | 1.98 | | +| cpu | 2022-10-31T02:00:10Z | cpu1 | 1.99 | | ++------------------+----------------------+-----------+------------+--------------+ -- InfluxQL: SELECT usage_idle, bytes_free FROM cpu, disk GROUP BY cpu; +------------------+----------------------+-----------+------------+------------+ | iox::measurement | time | cpu | usage_idle | bytes_free | @@ -472,6 +494,23 @@ | disk | 2022-10-31T02:00:10Z | | | 2239.0 | | disk | 2022-10-31T02:00:10Z | | | 3239.0 | +------------------+----------------------+-----------+------------+------------+ +-- InfluxQL: SELECT usage_idle, bytes_free FROM cpu, disk GROUP BY cpu, non_existent; ++------------------+----------------------+-----------+--------------+------------+------------+ +| iox::measurement | time | cpu | non_existent | usage_idle | bytes_free | ++------------------+----------------------+-----------+--------------+------------+------------+ +| cpu | 2022-10-31T02:00:00Z | cpu-total | | 2.98 | | +| cpu | 2022-10-31T02:00:10Z | cpu-total | | 2.99 | | +| cpu | 2022-10-31T02:00:00Z | cpu0 | | 0.98 | | +| cpu | 2022-10-31T02:00:10Z | cpu0 | | 0.99 | | +| cpu | 2022-10-31T02:00:00Z | cpu1 | | 1.98 | | +| cpu | 2022-10-31T02:00:10Z | cpu1 | | 1.99 | | +| disk | 2022-10-31T02:00:00Z | | | | 1234.0 | +| disk | 2022-10-31T02:00:00Z | | | | 2234.0 | +| disk | 2022-10-31T02:00:00Z | | | | 3234.0 | +| disk | 2022-10-31T02:00:10Z | | | | 1239.0 | +| disk | 2022-10-31T02:00:10Z | | | | 2239.0 | +| disk | 2022-10-31T02:00:10Z | | | | 3239.0 | ++------------------+----------------------+-----------+--------------+------------+------------+ -- InfluxQL: SELECT usage_idle, bytes_free FROM cpu, disk GROUP BY cpu, device; +------------------+----------------------+-----------+---------+------------+------------+ | iox::measurement | time | cpu | device | usage_idle | bytes_free | @@ -522,4 +561,116 @@ | disk | 2022-10-31T02:00:10Z | | 2239.0 | disk1s2 | | | disk | 2022-10-31T02:00:00Z | | 3234.0 | disk1s5 | | | disk | 2022-10-31T02:00:10Z | | 3239.0 | disk1s5 | | -+------------------+----------------------+------------+------------+---------+-----------+ \ No newline at end of file ++------------------+----------------------+------------+------------+---------+-----------+ +-- InfluxQL: SELECT COUNT(f64), SUM(f64), stddev(f64) FROM m0 GROUP BY tag0; ++------------------+----------------------+-------+-------+------+-------------------+ +| iox::measurement | time | tag0 | count | sum | stddev | ++------------------+----------------------+-------+-------+------+-------------------+ +| m0 | 1970-01-01T00:00:00Z | val00 | 5 | 80.6 | 5.085961069453836 | +| m0 | 1970-01-01T00:00:00Z | val01 | 1 | 11.3 | | +| m0 | 1970-01-01T00:00:00Z | val02 | 1 | 10.4 | | ++------------------+----------------------+-------+-------+------+-------------------+ +-- InfluxQL: SELECT COUNT(f64), SUM(f64), stddev(f64) FROM m0 GROUP BY tag0, non_existent; ++------------------+----------------------+--------------+-------+-------+------+-------------------+ +| iox::measurement | time | non_existent | tag0 | count | sum | stddev | ++------------------+----------------------+--------------+-------+-------+------+-------------------+ +| m0 | 1970-01-01T00:00:00Z | | val00 | 5 | 80.6 | 5.085961069453836 | +| m0 | 1970-01-01T00:00:00Z | | val01 | 1 | 11.3 | | +| m0 | 1970-01-01T00:00:00Z | | val02 | 1 | 10.4 | | ++------------------+----------------------+--------------+-------+-------+------+-------------------+ +-- InfluxQL: SELECT COUNT(f64), SUM(f64), stddev(f64) FROM m0 GROUP BY non_existent; ++------------------+----------------------+--------------+-------+--------------------+--------------------+ +| iox::measurement | time | non_existent | count | sum | stddev | ++------------------+----------------------+--------------+-------+--------------------+--------------------+ +| m0 | 1970-01-01T00:00:00Z | | 7 | 102.30000000000001 | 4.8912945019454614 | ++------------------+----------------------+--------------+-------+--------------------+--------------------+ +-- InfluxQL: SELECT COUNT(f64), COUNT(f64) + COUNT(f64), COUNT(f64) * 3 FROM m0; ++------------------+----------------------+-------+---------------------+-----------+ +| iox::measurement | time | count | count_f64_count_f64 | count_f64 | ++------------------+----------------------+-------+---------------------+-----------+ +| m0 | 1970-01-01T00:00:00Z | 7 | 14 | 21 | ++------------------+----------------------+-------+---------------------+-----------+ +-- InfluxQL: SELECT COUNT(f64) as the_count, SUM(non_existent) as foo FROM m0; ++------------------+----------------------+-----------+-----+ +| iox::measurement | time | the_count | foo | ++------------------+----------------------+-----------+-----+ +| m0 | 1970-01-01T00:00:00Z | 7 | | ++------------------+----------------------+-----------+-----+ +-- InfluxQL: SELECT COUNT(f64) as the_count, SUM(f64) + SUM(non_existent) as foo FROM m0; ++------------------+----------------------+-----------+-----+ +| iox::measurement | time | the_count | foo | ++------------------+----------------------+-----------+-----+ +| m0 | 1970-01-01T00:00:00Z | 7 | | ++------------------+----------------------+-----------+-----+ +-- InfluxQL: SELECT COUNT(f64), SUM(f64) FROM m0 GROUP BY TIME(30s) FILL(none); ++------------------+----------------------+-------+------+ +| iox::measurement | time | count | sum | ++------------------+----------------------+-------+------+ +| m0 | 2022-10-31T02:00:00Z | 6 | 83.1 | +| m0 | 2022-10-31T02:00:30Z | 1 | 19.2 | ++------------------+----------------------+-------+------+ +-- InfluxQL: SELECT COUNT(f64), SUM(f64) FROM m0 GROUP BY TIME(30s, 1s) FILL(none); ++------------------+----------------------+-------+--------------------+ +| iox::measurement | time | count | sum | ++------------------+----------------------+-------+--------------------+ +| m0 | 2022-10-31T01:59:31Z | 3 | 31.799999999999997 | +| m0 | 2022-10-31T02:00:01Z | 4 | 70.5 | ++------------------+----------------------+-------+--------------------+ +-- InfluxQL: SELECT COUNT(usage_idle), COUNT(bytes_free) FROM cpu, disk; ++------------------+----------------------+-------+---------+ +| iox::measurement | time | count | count_1 | ++------------------+----------------------+-------+---------+ +| cpu | 1970-01-01T00:00:00Z | 6 | | +| disk | 1970-01-01T00:00:00Z | | 6 | ++------------------+----------------------+-------+---------+ +-- InfluxQL: SELECT COUNT(usage_idle), COUNT(bytes_free) FROM cpu, disk GROUP BY TIME(1s) FILL(none); ++------------------+----------------------+-------+---------+ +| iox::measurement | time | count | count_1 | ++------------------+----------------------+-------+---------+ +| cpu | 2022-10-31T02:00:00Z | 3 | | +| cpu | 2022-10-31T02:00:10Z | 3 | | +| disk | 2022-10-31T02:00:00Z | | 3 | +| disk | 2022-10-31T02:00:10Z | | 3 | ++------------------+----------------------+-------+---------+ +-- InfluxQL: SELECT COUNT(usage_idle), COUNT(bytes_free) FROM cpu, disk GROUP BY cpu; ++------------------+----------------------+-----------+-------+---------+ +| iox::measurement | time | cpu | count | count_1 | ++------------------+----------------------+-----------+-------+---------+ +| cpu | 1970-01-01T00:00:00Z | cpu-total | 2 | | +| cpu | 1970-01-01T00:00:00Z | cpu0 | 2 | | +| cpu | 1970-01-01T00:00:00Z | cpu1 | 2 | | +| disk | 1970-01-01T00:00:00Z | | | 6 | ++------------------+----------------------+-----------+-------+---------+ +-- InfluxQL: SELECT COUNT(usage_idle) as count_usage_idle, COUNT(bytes_free) as count_bytes_free FROM cpu, disk WHERE cpu = 'cpu0' OR device = 'disk1s1' GROUP BY cpu; ++------------------+----------------------+------+------------------+------------------+ +| iox::measurement | time | cpu | count_usage_idle | count_bytes_free | ++------------------+----------------------+------+------------------+------------------+ +| cpu | 1970-01-01T00:00:00Z | cpu0 | 2 | | +| disk | 1970-01-01T00:00:00Z | | | 2 | ++------------------+----------------------+------+------------------+------------------+ +-- InfluxQL: SELECT SUM(usage_idle) FROM cpu, disk WHERE cpu = 'cpu0' GROUP BY cpu; ++------------------+----------------------+------+------+ +| iox::measurement | time | cpu | sum | ++------------------+----------------------+------+------+ +| cpu | 1970-01-01T00:00:00Z | cpu0 | 1.97 | ++------------------+----------------------+------+------+ +-- InfluxQL: SELECT SUM(usage_idle) FROM cpu, disk GROUP BY cpu; ++------------------+----------------------+-----------+--------------------+ +| iox::measurement | time | cpu | sum | ++------------------+----------------------+-----------+--------------------+ +| cpu | 1970-01-01T00:00:00Z | cpu-total | 5.970000000000001 | +| cpu | 1970-01-01T00:00:00Z | cpu0 | 1.97 | +| cpu | 1970-01-01T00:00:00Z | cpu1 | 3.9699999999999998 | ++------------------+----------------------+-----------+--------------------+ +-- InfluxQL: SELECT COUNT(usage_idle) + usage_idle FROM cpu; +Error while planning query: Error during planning: mixing aggregate and non-aggregate columns is not supported +-- InfluxQL: SELECT COUNT(usage_idle), usage_idle FROM cpu; +Error while planning query: Error during planning: mixing aggregate and non-aggregate columns is not supported +-- InfluxQL: SELECT COUNT(usage_idle) FROM cpu GROUP BY TIME(30s); +Error while planning query: This feature is not implemented: FILL(NULL) +-- InfluxQL: SELECT COUNT(usage_idle) FROM cpu GROUP BY TIME(30s) FILL(previous); +Error while planning query: This feature is not implemented: FILL(PREVIOUS) +-- InfluxQL: SELECT COUNT(usage_idle) FROM cpu GROUP BY cpu LIMIT 1; +Error while planning query: This feature is not implemented: GROUP BY combined with LIMIT or OFFSET clause +-- InfluxQL: SELECT COUNT(usage_idle) FROM cpu GROUP BY cpu OFFSET 1; +Error while planning query: This feature is not implemented: GROUP BY combined with LIMIT or OFFSET clause \ No newline at end of file diff --git a/iox_query_influxql/src/frontend/planner.rs b/iox_query_influxql/src/frontend/planner.rs index a586a29011..27a778e377 100644 --- a/iox_query_influxql/src/frontend/planner.rs +++ b/iox_query_influxql/src/frontend/planner.rs @@ -9,8 +9,8 @@ use std::sync::Arc; use crate::plan::{parse_regex, InfluxQLToLogicalPlan, SchemaProvider}; use datafusion::common::Statistics; use datafusion::datasource::provider_as_source; -use datafusion::execution::context::TaskContext; -use datafusion::logical_expr::{LogicalPlan, TableSource}; +use datafusion::execution::context::{SessionState, TaskContext}; +use datafusion::logical_expr::{AggregateUDF, LogicalPlan, ScalarUDF, TableSource}; use datafusion::physical_expr::PhysicalSortExpr; use datafusion::physical_plan::{Partitioning, SendableRecordBatchStream}; use datafusion::{ @@ -25,11 +25,12 @@ use iox_query::exec::IOxSessionContext; use observability_deps::tracing::debug; use schema::Schema; -struct ContextSchemaProvider { +struct ContextSchemaProvider<'a> { + state: &'a SessionState, tables: HashMap<String, (Arc<dyn TableSource>, Schema)>, } -impl SchemaProvider for ContextSchemaProvider { +impl<'a> SchemaProvider for ContextSchemaProvider<'a> { fn get_table_provider(&self, name: &str) -> Result<Arc<dyn TableSource>> { self.tables .get(name) @@ -37,6 +38,14 @@ impl SchemaProvider for ContextSchemaProvider { .ok_or_else(|| DataFusionError::Plan(format!("measurement does not exist: {name}"))) } + fn get_function_meta(&self, name: &str) -> Option<Arc<ScalarUDF>> { + self.state.scalar_functions().get(name).cloned() + } + + fn get_aggregate_meta(&self, name: &str) -> Option<Arc<AggregateUDF>> { + self.state.aggregate_functions().get(name).cloned() + } + fn table_names(&self) -> Vec<&'_ str> { self.tables.keys().map(|k| k.as_str()).collect::<Vec<_>>() } @@ -171,6 +180,7 @@ impl InfluxQLQueryPlanner { let query_tables = find_all_measurements(&statement, &names)?; let mut sp = ContextSchemaProvider { + state: &ctx.inner().state(), tables: HashMap::with_capacity(query_tables.len()), }; diff --git a/iox_query_influxql/src/plan/field_mapper.rs b/iox_query_influxql/src/plan/field_mapper.rs index 751b315b4c..3fdc8398e9 100644 --- a/iox_query_influxql/src/plan/field_mapper.rs +++ b/iox_query_influxql/src/plan/field_mapper.rs @@ -36,12 +36,10 @@ pub(crate) fn map_type( field: &str, ) -> Result<Option<VarRefDataType>> { match s.table_schema(measurement_name) { - Some(iox) => Ok(match iox.find_index_of(field) { - Some(i) => match iox.field(i).0 { - InfluxColumnType::Field(ft) => Some(field_type_to_var_ref_data_type(ft)), - InfluxColumnType::Tag => Some(VarRefDataType::Tag), - InfluxColumnType::Timestamp => None, - }, + Some(iox) => Ok(match iox.field_by_name(field) { + Some((InfluxColumnType::Field(ft), _)) => Some(field_type_to_var_ref_data_type(ft)), + Some((InfluxColumnType::Tag, _)) => Some(VarRefDataType::Tag), + Some((InfluxColumnType::Timestamp, _)) => Some(VarRefDataType::Timestamp), None => None, }), None => Ok(None), @@ -87,6 +85,10 @@ mod test { map_type(&namespace, "cpu", "host").unwrap(), Some(VarRefDataType::Tag) ); + assert_matches!( + map_type(&namespace, "cpu", "time").unwrap(), + Some(VarRefDataType::Timestamp) + ); // Returns None for nonexistent field assert!(map_type(&namespace, "cpu", "nonexistent") .unwrap() diff --git a/iox_query_influxql/src/plan/mod.rs b/iox_query_influxql/src/plan/mod.rs index d40197a594..abe2c9879c 100644 --- a/iox_query_influxql/src/plan/mod.rs +++ b/iox_query_influxql/src/plan/mod.rs @@ -8,6 +8,7 @@ mod rewriter; mod test_utils; mod timestamp; mod util; +mod util_copy; mod var_ref; pub use planner::InfluxQLToLogicalPlan; diff --git a/iox_query_influxql/src/plan/planner.rs b/iox_query_influxql/src/plan/planner.rs index 54a6ed8e6f..e54137369a 100644 --- a/iox_query_influxql/src/plan/planner.rs +++ b/iox_query_influxql/src/plan/planner.rs @@ -1,29 +1,39 @@ +mod select; + +use crate::plan::planner::select::{ + check_exprs_satisfy_columns, make_tag_key_column_meta, plan_with_sort, +}; use crate::plan::planner_rewrite_expression::{rewrite_conditional, rewrite_expr}; -use crate::plan::planner_time_range_expression::time_range_to_df_expr; +use crate::plan::planner_time_range_expression::{ + duration_expr_to_nanoseconds, expr_to_df_interval_dt, time_range_to_df_expr, +}; use crate::plan::rewriter::rewrite_statement; use crate::plan::util::{binary_operator_to_df_operator, Schemas}; +use crate::plan::util_copy::rebase_expr; use crate::plan::var_ref::{column_type_to_var_ref_data_type, var_ref_data_type_to_data_type}; use arrow::datatypes::DataType; +use chrono_tz::Tz; use datafusion::catalog::TableReference; use datafusion::common::{DFSchema, DFSchemaRef, DataFusionError, Result, ScalarValue, ToDFSchema}; use datafusion::logical_expr::expr_rewriter::{normalize_col, ExprRewritable, ExprRewriter}; use datafusion::logical_expr::logical_plan::builder::project; use datafusion::logical_expr::logical_plan::Analyze; +use datafusion::logical_expr::utils::{expr_as_column_expr, find_aggregate_exprs}; use datafusion::logical_expr::{ - binary_expr, lit, BinaryExpr, BuiltinScalarFunction, Explain, Expr, ExprSchemable, LogicalPlan, - LogicalPlanBuilder, Operator, PlanType, Projection, TableSource, ToStringifiedPlan, + binary_expr, date_bin, expr, lit, lit_timestamp_nano, AggregateFunction, AggregateUDF, + BinaryExpr, BuiltinScalarFunction, Explain, Expr, ExprSchemable, LogicalPlan, + LogicalPlanBuilder, Operator, PlanType, ScalarUDF, TableSource, ToStringifiedPlan, }; use datafusion_util::{lit_dict, AsExpr}; -use generated_types::influxdata::iox::querier::v1::{ - influx_ql_metadata::TagKeyColumn, InfluxQlMetadata, -}; -use influxdb_influxql_parser::common::OrderByClause; +use generated_types::influxdata::iox::querier::v1::InfluxQlMetadata; use influxdb_influxql_parser::explain::{ExplainOption, ExplainStatement}; use influxdb_influxql_parser::expression::walk::walk_expr; use influxdb_influxql_parser::expression::{ BinaryOperator, ConditionalExpression, ConditionalOperator, VarRefDataType, }; -use influxdb_influxql_parser::select::{Dimension, SLimitClause, SOffsetClause}; +use influxdb_influxql_parser::select::{ + FillClause, GroupByClause, SLimitClause, SOffsetClause, TimeZoneClause, +}; use influxdb_influxql_parser::{ common::{LimitClause, MeasurementName, OffsetClause, WhereClause}, expression::Expr as IQLExpr, @@ -39,9 +49,8 @@ use schema::{ InfluxColumnType, InfluxFieldType, Schema, INFLUXQL_MEASUREMENT_COLUMN_NAME, INFLUXQL_METADATA_KEY, }; -use std::collections::{HashMap, HashSet, VecDeque}; +use std::collections::{HashSet, VecDeque}; use std::fmt::Debug; -use std::iter; use std::ops::{ControlFlow, Deref}; use std::str::FromStr; use std::sync::Arc; @@ -55,6 +64,12 @@ pub trait SchemaProvider { /// Getter for a datasource fn get_table_provider(&self, name: &str) -> Result<Arc<dyn TableSource>>; + /// Getter for a UDF description + fn get_function_meta(&self, name: &str) -> Option<Arc<ScalarUDF>>; + + /// Getter for a UDAF description + fn get_aggregate_meta(&self, name: &str) -> Option<Arc<AggregateUDF>>; + /// The collection of tables for this schema. fn table_names(&self) -> Vec<&'_ str>; @@ -72,16 +87,64 @@ pub trait SchemaProvider { /// /// Specifically, the scope of available functions is narrowed to mathematical scalar functions /// when processing the `WHERE` clause. -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Default, Clone, Copy, PartialEq)] enum ExprScope { /// Signals that expressions should be transformed in the context of /// the `WHERE` clause. + #[default] Where, /// Signals that expressions should be transformed in the context of /// the `SELECT` projection list. Projection, } +/// State used to inform the planner. +#[allow(dead_code)] +#[derive(Debug, Default, Clone)] +struct Context<'a> { + /// `true` if this is a subquery `SELECT` statement. + is_subquery: bool, + scope: ExprScope, + tz: Option<Tz>, + + // + is_aggregate: bool, + + // GROUP BY information + group_by: Option<&'a GroupByClause>, + fill: Option<FillClause>, +} + +impl<'a> Context<'a> { + fn new() -> Self { + Default::default() + } + + fn with_scope(&self, scope: ExprScope) -> Self { + Self { scope, ..*self } + } + + fn with_timezone(&self, timezone: Option<TimeZoneClause>) -> Self { + let tz = timezone.as_deref().cloned(); + Self { tz, ..*self } + } + + fn with_group_by_fill(&self, select: &'a SelectStatement) -> Self { + Self { + group_by: select.group_by.as_ref(), + fill: select.fill, + ..*self + } + } + + fn with_is_aggregate(&self, is_aggregate: bool) -> Self { + Self { + is_aggregate, + ..*self + } + } +} + #[allow(missing_debug_implementations)] /// InfluxQL query planner pub struct InfluxQLToLogicalPlan<'a> { @@ -103,9 +166,10 @@ impl<'a> InfluxQLToLogicalPlan<'a> { Err(DataFusionError::NotImplemented("DROP MEASUREMENT".into())) } Statement::Explain(explain) => self.explain_statement_to_plan(*explain), - Statement::Select(select) => { - self.select_statement_to_plan(&self.rewrite_select_statement(*select)?) - } + Statement::Select(select) => self.select_statement_to_plan( + &Context::new(), + &self.rewrite_select_statement(*select)?, + ), Statement::ShowDatabases(_) => { Err(DataFusionError::NotImplemented("SHOW DATABASES".into())) } @@ -128,8 +192,10 @@ impl<'a> InfluxQLToLogicalPlan<'a> { } fn explain_statement_to_plan(&self, explain: ExplainStatement) -> Result<LogicalPlan> { - let plan = - self.select_statement_to_plan(&self.rewrite_select_statement(*explain.select)?)?; + let plan = self.select_statement_to_plan( + &Context::new(), + &self.rewrite_select_statement(*explain.select)?, + )?; let plan = Arc::new(plan); let schema = LogicalPlan::explain_schema(); let schema = schema.to_dfschema_ref()?; @@ -164,200 +230,134 @@ impl<'a> InfluxQLToLogicalPlan<'a> { } /// Create a [`LogicalPlan`] from the specified InfluxQL `SELECT` statement. - fn select_statement_to_plan(&self, select: &SelectStatement) -> Result<LogicalPlan> { + fn select_statement_to_plan( + &self, + ctx: &Context<'_>, + select: &SelectStatement, + ) -> Result<LogicalPlan> { let mut plans = self.plan_from_tables(&select.from)?; - // Aggregate functions are currently not supported. - // - // See: https://github.com/influxdata/influxdb_iox/issues/6919 - if has_aggregate_exprs(&select.fields) { - return Err(DataFusionError::NotImplemented( - "aggregate functions".to_owned(), - )); - } - - let mut meta = InfluxQlMetadata { - measurement_column_index: MEASUREMENT_COLUMN_INDEX, - tag_key_columns: Vec::new(), - }; + let ctx = ctx + .with_timezone(select.timezone) + .with_group_by_fill(select) + .with_is_aggregate( + has_aggregate_exprs(&select.fields) + || (select.group_by.is_some() + && select.group_by.as_ref().unwrap().time_dimension().is_some()), + ); // The `time` column is always present in the result set - let mut fields = if !has_time_column(&select.fields) { + let mut fields = if find_time_column_index(&select.fields).is_none() { vec![Field { expr: IQLExpr::VarRef { name: "time".into(), data_type: Some(VarRefDataType::Timestamp), }, - alias: None, + alias: Some("time".into()), }] } else { vec![] }; - let (group_by_tag_set, projection_tag_set) = if let Some(group_by) = &select.group_by { - let mut tag_columns = find_tag_columns::<HashSet<_>>(&select.fields); - - // Contains the list of tag keys specified in the `GROUP BY` clause - let (tag_set, is_projected): (Vec<_>, Vec<_>) = group_by - .iter() - .map(|dimension| match dimension { - Dimension::Tag(t) => { - Ok((t.deref().as_str(), tag_columns.contains(t.deref().as_str()))) - } - // TODO(sgc): https://github.com/influxdata/influxdb_iox/issues/6915 - Dimension::Time { .. } => { - Err(DataFusionError::NotImplemented("GROUP BY time".to_owned())) - } - // Inconsistent state, as these variants should have been expanded by `rewrite_select_statement` - Dimension::Regex(_) | Dimension::Wildcard => Err(DataFusionError::Internal( - "unexpected regular expression or wildcard found in GROUP BY".into(), - )), - }) - .collect::<Result<Vec<_>>>()? - .into_iter() - // We sort the tag set, to ensure correct ordering of the results. The tag columns - // referenced in the `tag_set` variable are added to the sort operator in - // lexicographically ascending order. - .sorted_by(|a, b| a.0.cmp(b.0)) - .unzip(); - - // Tags specified in the `GROUP BY` clause that are not already added to the - // projection must be projected, so they key be used in the group key. - // - // At the end of the loop, the `tag_columns` set will contain the tag columns that - // exist in the projection and not in the `GROUP BY`. - for col in &tag_set { - if tag_columns.remove(*col) { - continue; - } - - fields.push(Field { - expr: IQLExpr::VarRef { - name: (*col).into(), - data_type: Some(VarRefDataType::Tag), - }, - alias: Some((*col).into()), - }); - } + // group_by_tag_set : a list of tag columns specified in the GROUP BY clause + // projection_tag_set : a list of tag columns specified exclusively in the SELECT projection + // is_projected : a list of booleans indicating whether matching elements in the + // group_by_tag_set are also projected in the query + let (group_by_tag_set, projection_tag_set, is_projected) = + if let Some(group_by) = &select.group_by { + let mut tag_columns = + find_tag_and_unknown_columns(&select.fields).collect::<HashSet<_>>(); + + // Find the list of tag keys specified in the `GROUP BY` clause, and + // whether any of the tag keys are also projected in the SELECT list. + let (tag_set, is_projected): (Vec<_>, Vec<_>) = group_by + .tags() + .map(|t| t.deref().as_str()) + .map(|s| (s, tag_columns.contains(s))) + // We sort the tag set, to ensure correct ordering of the results. The tag columns + // referenced in the `tag_set` variable are added to the sort operator in + // lexicographically ascending order. + .sorted_by(|a, b| a.0.cmp(b.0)) + .unzip(); + + // Tags specified in the `GROUP BY` clause that are not already added to the + // projection must be projected, so they can be used in the group key. + // + // At the end of the loop, the `tag_columns` set will contain the tag columns that + // exist in the projection and not in the `GROUP BY`. + fields.extend( + tag_set + .iter() + .filter_map(|col| match tag_columns.remove(*col) { + true => None, + false => Some(Field { + expr: IQLExpr::VarRef { + name: (*col).into(), + data_type: Some(VarRefDataType::Tag), + }, + alias: Some((*col).into()), + }), + }), + ); - // Add the remaining columns to be projected - fields.extend(select.fields.iter().cloned()); - - /// There is always a [INFLUXQL_MEASUREMENT_COLUMN_NAME] column projected in the LogicalPlan, - /// therefore the start index is 1 for determining the offsets of the - /// tag key columns in the column projection list. - const START_INDEX: usize = 1; - - // Create a map of tag key columns to their respective index in the projection - let index_map = fields - .iter() - .enumerate() - .filter_map(|(index, f)| match &f.expr { - IQLExpr::VarRef { - name, - data_type: Some(VarRefDataType::Tag), - } => Some((name.deref().as_str(), index + START_INDEX)), - _ => None, - }) - .collect::<HashMap<_, _>>(); - - // tag_set was previously sorted, so tag_key_columns will be in the correct order - meta.tag_key_columns = tag_set - .iter() - .zip(is_projected) - .map(|(tag_key, is_projected)| TagKeyColumn { - tag_key: (*tag_key).to_owned(), - column_index: *index_map.get(*tag_key).unwrap() as u32, + ( + tag_set, + tag_columns.into_iter().sorted().collect::<Vec<_>>(), is_projected, - }) - .collect(); + ) + } else { + let tag_columns = find_tag_and_unknown_columns(&select.fields) + .sorted() + .collect::<Vec<_>>(); + (vec![], tag_columns, vec![]) + }; - ( - tag_set, - tag_columns.into_iter().sorted().collect::<Vec<_>>(), - ) - } else { - let mut tag_columns = find_tag_columns::<Vec<_>>(&select.fields); - tag_columns.sort(); - // Add the remaining columns to be projected - fields.extend(select.fields.iter().cloned()); - (vec![], tag_columns) - }; + fields.extend(select.fields.iter().cloned()); - let Some(plan) = plans.pop_front() else { return LogicalPlanBuilder::empty(false).build(); }; - let plan = self.project_select(plan, select, &fields)?; - - // If there are multiple measurements, we need to sort by the measurement column - // NOTE: Ideally DataFusion would maintain the order of the UNION ALL, which would eliminate - // the need to sort by measurement. - // See: https://github.com/influxdata/influxdb_iox/issues/7062 - let mut series_sort = if !plans.is_empty() { - vec![Expr::sort( - INFLUXQL_MEASUREMENT_COLUMN_NAME.as_expr(), - true, - false, - )] - } else { - vec![] + // Build the first non-empty plan + let plan = { + loop { + match plans.pop_front() { + Some((plan, proj)) => match self.project_select( + &ctx, + plan, + proj, + select, + &fields, + &group_by_tag_set, + )? { + LogicalPlan::EmptyRelation(_) => continue, + plan => break plan, + }, + None => return LogicalPlanBuilder::empty(false).build(), + } + } }; // UNION the remaining plans - let plan = plans.into_iter().try_fold(plan, |prev, next| { - let next = self.project_select(next, select, &fields)?; - LogicalPlanBuilder::from(prev).union(next)?.build() + let plan = plans.into_iter().try_fold(plan, |prev, (next, proj)| { + let next = self.project_select(&ctx, next, proj, select, &fields, &group_by_tag_set)?; + if let LogicalPlan::EmptyRelation(_) = next { + // No sense union-ing an empty plan, so drop it + Ok(prev) + } else { + LogicalPlanBuilder::from(prev).union(next)?.build() + } })?; - let plan = plan_with_metadata(plan, &meta)?; - - // Construct the sort logical operator - // - // The ordering of the results is as follows: - // - // iox::measurement, [group by tag 0, .., group by tag n], time, [projection tag 0, .., projection tag n] - // - // NOTE: - // - // Sort expressions referring to tag keys are always specified in lexicographically ascending order. - let plan = { - if !group_by_tag_set.is_empty() { - // Adding `LIMIT` or `OFFSET` with a `GROUP BY tag, ...` clause is not supported - // - // See: https://github.com/influxdata/influxdb_iox/issues/6920 - if !group_by_tag_set.is_empty() - && (select.offset.is_some() || select.limit.is_some()) - { - return Err(DataFusionError::NotImplemented( - "GROUP BY combined with LIMIT or OFFSET clause".to_owned(), - )); - } - - series_sort.extend( - group_by_tag_set - .into_iter() - .map(|f| Expr::sort(f.as_expr(), true, false)), - ); - }; - - series_sort.push(Expr::sort( - "time".as_expr(), - match select.order_by { - // Default behaviour is to sort by time in ascending order if there is no ORDER BY - None | Some(OrderByClause::Ascending) => true, - Some(OrderByClause::Descending) => false, - }, - false, - )); - - if !projection_tag_set.is_empty() { - series_sort.extend( - projection_tag_set - .into_iter() - .map(|f| Expr::sort(f.as_expr(), true, false)), - ); - } + let plan = plan_with_metadata( + plan, + &InfluxQlMetadata { + measurement_column_index: MEASUREMENT_COLUMN_INDEX, + tag_key_columns: make_tag_key_column_meta( + &fields, + &group_by_tag_set, + &is_projected, + ), + }, + )?; - LogicalPlanBuilder::from(plan).sort(series_sort)?.build() - }?; + let plan = plan_with_sort(plan, select, &group_by_tag_set, &projection_tag_set)?; let plan = self.limit(plan, select.offset, select.limit)?; @@ -368,28 +368,154 @@ impl<'a> InfluxQLToLogicalPlan<'a> { fn project_select( &self, - plan: LogicalPlan, + ctx: &Context<'_>, + input: LogicalPlan, + proj: Vec<Expr>, select: &SelectStatement, fields: &[Field], + group_by_tag_set: &[&str], ) -> Result<LogicalPlan> { - let (proj, plan) = match plan { - LogicalPlan::Projection(Projection { expr, input, .. }) => { - (expr, input.deref().clone()) + let schemas = Schemas::new(input.schema())?; + + // To be consistent with InfluxQL, exclude measurements + // when there are no matching fields. + if !fields.iter().any(|f| { + // Walk the expression tree for the field + // looking for a reference to one column that + // is a field + walk_expr(&f.expr, &mut |e| match e { + IQLExpr::VarRef { name, .. } => { + match schemas.iox_schema.field_by_name(name.deref().as_str()) { + Some((InfluxColumnType::Field(_), _)) => ControlFlow::Break(()), + _ => ControlFlow::Continue(()), + } + } + _ => ControlFlow::Continue(()), + }) + .is_break() + }) { + return LogicalPlanBuilder::empty(false).build(); + } + + let plan = self.plan_where_clause(ctx, &select.condition, input, &schemas)?; + + // Transform InfluxQL AST field expressions to a list of DataFusion expressions. + let select_exprs = self.field_list_to_exprs(ctx, &plan, fields, &schemas)?; + + let (plan, select_exprs_post_aggr) = self.select_aggregate( + plan, + fields, + select_exprs, + select.group_by.as_ref(), + group_by_tag_set, + &schemas, + )?; + + // Wrap the plan in a `LogicalPlan::Projection` from the select expressions + project( + plan, + // proj includes the `iox::measurement` column + proj.into_iter().chain(select_exprs_post_aggr.into_iter()), + ) + } + + fn select_aggregate( + &self, + input: LogicalPlan, + fields: &[Field], + select_exprs: Vec<Expr>, + group_by: Option<&GroupByClause>, + group_by_tag_set: &[&str], + schemas: &Schemas, + ) -> Result<(LogicalPlan, Vec<Expr>)> { + // Find a list of unique aggregate expressions from the projection. + // + // For example, a projection such as: + // + // SELECT SUM(foo), SUM(foo) / COUNT(foo) .. + // + // will produce two aggregate expressions: + // + // [SUM(foo), COUNT(foo)] + let aggr_exprs = find_aggregate_exprs(&select_exprs); + if aggr_exprs.is_empty() { + return Ok((input, select_exprs)); + } + + let aggr_group_by_exprs = if let Some(group_by) = group_by { + let mut group_by_exprs = Vec::new(); + + if group_by.time_dimension().is_some() { + // Include the GROUP BY TIME(..) expression + if let Some(index) = find_time_column_index(fields) { + group_by_exprs.push(select_exprs[index].clone()); + } } - // TODO: Review when we support subqueries, as this shouldn't be the case - _ => (vec![], plan), + + // Exclude tags that do not exist in the current table schema. + group_by_exprs.extend(group_by_tag_set.iter().filter_map(|name| { + if schemas + .iox_schema + .field_by_name(name) + .map_or(false, |(dt, _)| dt == InfluxColumnType::Tag) + { + Some(name.as_expr()) + } else { + None + } + })); + + group_by_exprs + } else { + vec![] }; - let schemas = Schemas::new(plan.schema())?; + let plan = LogicalPlanBuilder::from(input) + .aggregate(aggr_group_by_exprs.clone(), aggr_exprs.clone())? + .build()?; - let tz = select.timezone.as_deref().cloned(); - let plan = self.plan_where_clause(&select.condition, plan, &schemas, tz)?; + // Combine the aggregate columns and group by expressions, which represents + // the final projection from the aggregate operator. + let aggr_projection_exprs = [aggr_group_by_exprs, aggr_exprs].concat(); - // Process and validate the field expressions in the SELECT projection list - let select_exprs = self.field_list_to_exprs(&plan, fields, &schemas)?; + // Replace any expressions that are not a column with a column referencing + // an output column from the aggregate schema. + let column_exprs_post_aggr = aggr_projection_exprs + .iter() + .map(|expr| expr_as_column_expr(expr, &plan)) + .collect::<Result<Vec<Expr>>>()?; - // Wrap the plan in a `LogicalPlan::Projection` from the select expressions - project(plan, proj.into_iter().chain(select_exprs.into_iter())) + // Rewrite the aggregate columns from the projection, so that the expressions + // refer to the columns from the aggregate projection + let select_exprs_post_aggr = select_exprs + .iter() + .map(|expr| rebase_expr(expr, &aggr_projection_exprs, &plan)) + .collect::<Result<Vec<Expr>>>()?; + + // Strip the NULL columns, which are tags that do not exist in the aggregate + // table schema. The NULL columns are projected as scalar values in the final + // projection. + let select_exprs_post_aggr_no_nulls = select_exprs_post_aggr + .iter() + .filter(|expr| match expr { + Expr::Alias(expr, _) => !matches!(**expr, Expr::Literal(ScalarValue::Null)), + _ => true, + }) + .cloned() + .collect::<Vec<_>>(); + + // Finally, we ensure that the re-written projection can be resolved + // from the aggregate output columns and that there are no + // column references that are not aggregates. + // + // This will identify issues such as: + // + // SELECT COUNT(field), field FROM foo + // + // where the field without the aggregate is not valid. + check_exprs_satisfy_columns(&column_exprs_post_aggr, &select_exprs_post_aggr_no_nulls)?; + + Ok((plan, select_exprs_post_aggr)) } /// Optionally wrap the input logical plan in a [`LogicalPlan::Limit`] node using the specified @@ -434,13 +560,14 @@ impl<'a> InfluxQLToLogicalPlan<'a> { /// Map the InfluxQL `SELECT` projection list into a list of DataFusion expressions. fn field_list_to_exprs( &self, + ctx: &Context<'_>, plan: &LogicalPlan, fields: &[Field], schemas: &Schemas, ) -> Result<Vec<Expr>> { fields .iter() - .map(|field| self.field_to_df_expr(field, plan, schemas)) + .map(|field| self.field_to_df_expr(ctx, field, plan, schemas)) .collect() } @@ -449,11 +576,13 @@ impl<'a> InfluxQLToLogicalPlan<'a> { /// A [`Field`] is analogous to a column in a SQL `SELECT` projection. fn field_to_df_expr( &self, + ctx: &Context<'_>, field: &Field, plan: &LogicalPlan, schemas: &Schemas, ) -> Result<Expr> { - let expr = self.expr_to_df_expr(ExprScope::Projection, &field.expr, schemas)?; + let expr = + self.expr_to_df_expr(&ctx.with_scope(ExprScope::Projection), &field.expr, schemas)?; let expr = rewrite_field_expr(expr, schemas)?; normalize_col( if let Some(alias) = &field.alias { @@ -468,29 +597,27 @@ impl<'a> InfluxQLToLogicalPlan<'a> { /// Map an InfluxQL [`ConditionalExpression`] to a DataFusion [`Expr`]. fn conditional_to_df_expr( &self, + ctx: &Context<'_>, iql: &ConditionalExpression, schemas: &Schemas, - tz: Option<chrono_tz::Tz>, ) -> Result<Expr> { match iql { - ConditionalExpression::Expr(expr) => { - self.expr_to_df_expr(ExprScope::Where, expr, schemas) - } + ConditionalExpression::Expr(expr) => self.expr_to_df_expr(ctx, expr, schemas), ConditionalExpression::Binary { lhs, op, rhs } => { - self.binary_conditional_to_df_expr(lhs, *op, rhs, schemas, tz) + self.binary_conditional_to_df_expr(ctx, lhs, *op, rhs, schemas) } - ConditionalExpression::Grouped(e) => self.conditional_to_df_expr(e, schemas, tz), + ConditionalExpression::Grouped(e) => self.conditional_to_df_expr(ctx, e, schemas), } } /// Map an InfluxQL binary conditional expression to a DataFusion [`Expr`]. fn binary_conditional_to_df_expr( &self, + ctx: &Context<'_>, lhs: &ConditionalExpression, op: ConditionalOperator, rhs: &ConditionalExpression, schemas: &Schemas, - tz: Option<chrono_tz::Tz>, ) -> Result<Expr> { let op = conditional_op_to_operator(op)?; @@ -509,19 +636,19 @@ impl<'a> InfluxQLToLogicalPlan<'a> { { if lhs_time { ( - self.conditional_to_df_expr(lhs, schemas, tz)?, - time_range_to_df_expr(find_expr(rhs)?, tz)?, + self.conditional_to_df_expr(ctx, lhs, schemas)?, + time_range_to_df_expr(find_expr(rhs)?, ctx.tz)?, ) } else { ( - time_range_to_df_expr(find_expr(lhs)?, tz)?, - self.conditional_to_df_expr(rhs, schemas, tz)?, + time_range_to_df_expr(find_expr(lhs)?, ctx.tz)?, + self.conditional_to_df_expr(ctx, rhs, schemas)?, ) } } else { ( - self.conditional_to_df_expr(lhs, schemas, tz)?, - self.conditional_to_df_expr(rhs, schemas, tz)?, + self.conditional_to_df_expr(ctx, lhs, schemas)?, + self.conditional_to_df_expr(ctx, rhs, schemas)?, ) }; @@ -529,7 +656,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> { } /// Map an InfluxQL [`IQLExpr`] to a DataFusion [`Expr`]. - fn expr_to_df_expr(&self, scope: ExprScope, iql: &IQLExpr, schemas: &Schemas) -> Result<Expr> { + fn expr_to_df_expr(&self, ctx: &Context<'_>, iql: &IQLExpr, schemas: &Schemas) -> Result<Expr> { let iox_schema = &schemas.iox_schema; match iql { // rewriter is expected to expand wildcard expressions @@ -542,19 +669,50 @@ impl<'a> InfluxQLToLogicalPlan<'a> { } => { let name = normalize_identifier(name); Ok( - // Per the Go implementation, the time column is case-insensitive in the - // `WHERE` clause and disregards any postfix type cast operator. - // - // See: https://github.com/influxdata/influxql/blob/1ba470371ec093d57a726b143fe6ccbacf1b452b/ast.go#L5751-L5753 - if scope == ExprScope::Where && name.eq_ignore_ascii_case("time") { + if ctx.scope == ExprScope::Where && name.eq_ignore_ascii_case("time") { + // Per the Go implementation, the time column is case-insensitive in the + // `WHERE` clause and disregards any postfix type cast operator. + // + // See: https://github.com/influxdata/influxql/blob/1ba470371ec093d57a726b143fe6ccbacf1b452b/ast.go#L5751-L5753 "time".as_expr() + } else if ctx.scope == ExprScope::Projection && name == "time" { + if ctx.is_aggregate { + // In the projection, determine whether the query is projecting the time column + // or binning the time. + if let Some(group_by) = ctx.group_by { + if let Some(dim) = group_by.time_dimension() { + // Not supported until date_bin_gapfill is complete + let fill = ctx.fill.unwrap_or_default(); + if fill != FillClause::None { + return Err(DataFusionError::NotImplemented(format!( + "{fill}" + ))); + } + + let stride = expr_to_df_interval_dt(&dim.interval)?; + let offset = if let Some(offset) = &dim.offset { + duration_expr_to_nanoseconds(offset)? + } else { + 0 + }; + + return Ok(date_bin( + stride, + "time".as_expr(), + lit(ScalarValue::TimestampNanosecond(Some(offset), None)), + )); + } + } + lit_timestamp_nano(0) + } else { + "time".as_expr() + } } else { - match iox_schema.find_index_of(&name) { - Some(idx) => { + match iox_schema.field_by_name(&name) { + Some((col_type, _)) => { let column = name.as_expr(); match opt_dst_type { Some(dst_type) => { - let (col_type, _) = iox_schema.field(idx); let src_type = column_type_to_var_ref_data_type(col_type); if src_type == *dst_type { column @@ -596,7 +754,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> { Literal::Duration(_) => { Err(DataFusionError::NotImplemented("duration literal".into())) } - Literal::Regex(re) => match scope { + Literal::Regex(re) => match ctx.scope { // a regular expression in a projection list is unexpected, // as it should have been expanded by the rewriter. ExprScope::Projection => Err(DataFusionError::Internal( @@ -606,46 +764,111 @@ impl<'a> InfluxQLToLogicalPlan<'a> { }, }, IQLExpr::Distinct(_) => Err(DataFusionError::NotImplemented("DISTINCT".into())), - IQLExpr::Call { name, args } => self.call_to_df_expr(scope, name, args, schemas), + IQLExpr::Call { name, args } => self.call_to_df_expr(ctx, name, args, schemas), IQLExpr::Binary { lhs, op, rhs } => { - self.arithmetic_expr_to_df_expr(scope, lhs, *op, rhs, schemas) + self.arithmetic_expr_to_df_expr(ctx, lhs, *op, rhs, schemas) } - IQLExpr::Nested(e) => self.expr_to_df_expr(scope, e, schemas), + IQLExpr::Nested(e) => self.expr_to_df_expr(ctx, e, schemas), } } /// Map an InfluxQL function call to a DataFusion expression. + /// + /// A full list of supported functions available via the [InfluxQL documentation][docs]. + /// + /// > **Note** + /// > + /// > These are not necessarily implemented, and are tracked by the following + /// > issues: + /// > + /// > * <https://github.com/influxdata/influxdb_iox/issues/6934> + /// > * <https://github.com/influxdata/influxdb_iox/issues/6935> + /// > * <https://github.com/influxdata/influxdb_iox/issues/6937> + /// > * <https://github.com/influxdata/influxdb_iox/issues/6938> + /// > * <https://github.com/influxdata/influxdb_iox/issues/6939> + /// + /// [docs]: https://docs.influxdata.com/influxdb/v1.8/query_language/functions/ fn call_to_df_expr( &self, - scope: ExprScope, + ctx: &Context<'_>, name: &str, args: &[IQLExpr], schemas: &Schemas, ) -> Result<Expr> { if is_scalar_math_function(name) { - self.scalar_math_func_to_df_expr(scope, name, args, schemas) - } else { - match scope { - ExprScope::Projection => Err(DataFusionError::NotImplemented( - "aggregate and selector functions in projection list".into(), - )), - ExprScope::Where => { - if name.eq_ignore_ascii_case("now") { - Err(DataFusionError::NotImplemented("now".into())) - } else { - Err(DataFusionError::External( - format!("invalid function call in condition: {name}").into(), - )) - } + return self.scalar_math_func_to_df_expr(ctx, name, args, schemas); + } + + match ctx.scope { + ExprScope::Where => { + if name.eq_ignore_ascii_case("now") { + Err(DataFusionError::NotImplemented("now".into())) + } else { + Err(DataFusionError::External( + format!("invalid function call in condition: {name}").into(), + )) + } + } + ExprScope::Projection => self.function_to_df_expr(ctx, name, args, schemas), + } + } + + fn function_to_df_expr( + &self, + ctx: &Context<'_>, + name: &str, + args: &[IQLExpr], + schemas: &Schemas, + ) -> Result<Expr> { + fn check_arg_count(name: &str, args: &[IQLExpr], count: usize) -> Result<()> { + let got = args.len(); + if got != count { + Err(DataFusionError::Plan(format!( + "invalid number of arguments for {name}: expected {count}, got {got}" + ))) + } else { + Ok(()) + } + } + + match name { + "count" => { + // TODO(sgc): Handle `COUNT DISTINCT` variants + let distinct = false; + + check_arg_count("count", args, 1)?; + let expr = self.expr_to_df_expr(ctx, &args[0], schemas)?; + match &expr { + Expr::Literal(ScalarValue::Null) => Ok(expr), + _ => Ok(Expr::AggregateFunction(expr::AggregateFunction::new( + AggregateFunction::Count, + vec![expr], + distinct, + None, + ))), + } + } + "sum" | "stddev" | "mean" | "median" => { + check_arg_count(name, args, 1)?; + let expr = self.expr_to_df_expr(ctx, &args[0], schemas)?; + match &expr { + Expr::Literal(ScalarValue::Null) => Ok(expr), + _ => Ok(Expr::AggregateFunction(expr::AggregateFunction::new( + AggregateFunction::from_str(name)?, + vec![expr], + false, + None, + ))), } } + _ => Err(DataFusionError::Plan(format!("Invalid function '{name}'"))), } } /// Map the InfluxQL scalar function call to a DataFusion scalar function expression. fn scalar_math_func_to_df_expr( &self, - scope: ExprScope, + ctx: &Context<'_>, name: &str, args: &[IQLExpr], schemas: &Schemas, @@ -653,7 +876,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> { let fun = BuiltinScalarFunction::from_str(name)?; let args = args .iter() - .map(|e| self.expr_to_df_expr(scope, e, schemas)) + .map(|e| self.expr_to_df_expr(ctx, e, schemas)) .collect::<Result<Vec<Expr>>>()?; Ok(Expr::ScalarFunction { fun, args }) } @@ -661,16 +884,16 @@ impl<'a> InfluxQLToLogicalPlan<'a> { /// Map an InfluxQL arithmetic expression to a DataFusion [`Expr`]. fn arithmetic_expr_to_df_expr( &self, - scope: ExprScope, + ctx: &Context<'_>, lhs: &IQLExpr, op: BinaryOperator, rhs: &IQLExpr, schemas: &Schemas, ) -> Result<Expr> { Ok(binary_expr( - self.expr_to_df_expr(scope, lhs, schemas)?, + self.expr_to_df_expr(ctx, lhs, schemas)?, binary_operator_to_df_operator(op), - self.expr_to_df_expr(scope, rhs, schemas)?, + self.expr_to_df_expr(ctx, rhs, schemas)?, )) } @@ -678,14 +901,18 @@ impl<'a> InfluxQLToLogicalPlan<'a> { /// optional InfluxQL conditional expression. fn plan_where_clause( &self, + ctx: &Context<'_>, condition: &Option<WhereClause>, plan: LogicalPlan, schemas: &Schemas, - tz: Option<chrono_tz::Tz>, ) -> Result<LogicalPlan> { match condition { Some(where_clause) => { - let filter_expr = self.conditional_to_df_expr(where_clause, schemas, tz)?; + let filter_expr = self.conditional_to_df_expr( + &ctx.with_scope(ExprScope::Where), + where_clause, + schemas, + )?; let filter_expr = rewrite_conditional_expr(filter_expr, schemas)?; let plan = LogicalPlanBuilder::from(plan) .filter(filter_expr)? @@ -698,10 +925,14 @@ impl<'a> InfluxQLToLogicalPlan<'a> { /// Generate a list of logical plans for each of the tables references in the `FROM` /// clause. - fn plan_from_tables(&self, from: &FromMeasurementClause) -> Result<VecDeque<LogicalPlan>> { - let mut plans = VecDeque::new(); + fn plan_from_tables( + &self, + from: &FromMeasurementClause, + ) -> Result<VecDeque<(LogicalPlan, Vec<Expr>)>> { + // A list of scans and their initial projections + let mut table_projs = VecDeque::new(); for ms in from.iter() { - let Some(plan) = match ms { + let Some(table_proj) = match ms { MeasurementSelection::Name(qn) => match qn.name { MeasurementName::Name(ref ident) => { self.create_table_ref(normalize_identifier(ident)) @@ -715,22 +946,22 @@ impl<'a> InfluxQLToLogicalPlan<'a> { "subquery in FROM clause".into(), )), }? else { continue }; - plans.push_back(plan); + table_projs.push_back(table_proj); } - Ok(plans) + Ok(table_projs) } /// Create a [LogicalPlan] that refers to the specified `table_name`. /// /// Normally, this functions will not return a `None`, as tables have been matched] /// by the [`rewrite_statement`] function. - fn create_table_ref(&self, table_name: String) -> Result<Option<LogicalPlan>> { + fn create_table_ref(&self, table_name: String) -> Result<Option<(LogicalPlan, Vec<Expr>)>> { Ok(if let Ok(source) = self.s.get_table_provider(&table_name) { let table_ref = TableReference::bare(table_name.to_string()); - Some(project( + Some(( LogicalPlanBuilder::scan(table_ref, source, None)?.build()?, - iter::once(lit_dict(&table_name).alias(INFLUXQL_MEASUREMENT_COLUMN_NAME)), - )?) + vec![lit_dict(&table_name).alias(INFLUXQL_MEASUREMENT_COLUMN_NAME)], + )) } else { None }) @@ -768,6 +999,7 @@ fn plan_with_metadata(plan: LogicalPlan, metadata: &InfluxQlMetadata) -> Result< u.schema = make_schema(u.schema, metadata)?; LogicalPlan::Union(u) } + LogicalPlan::EmptyRelation(p) => LogicalPlan::EmptyRelation(p), _ => { return Err(DataFusionError::Internal( "unexpected LogicalPlan".to_owned(), @@ -787,22 +1019,16 @@ fn has_aggregate_exprs(fields: &FieldList) -> bool { }) } -/// Find all the tag columns projected in the `SELECT` from the field list. -fn find_tag_columns<'a, T: FromIterator<&'a str>>(fields: &'a FieldList) -> T { - fields - .iter() - .filter_map(|f| { - if let IQLExpr::VarRef { - name, - data_type: Some(VarRefDataType::Tag), - } = &f.expr - { - Some(name.deref().as_str()) - } else { - None - } - }) - .collect() +/// Find all the columns where the resolved data type +/// is a tag or is [`None`], which is unknown. +fn find_tag_and_unknown_columns(fields: &FieldList) -> impl Iterator<Item = &str> { + fields.iter().filter_map(|f| match &f.expr { + IQLExpr::VarRef { + name, + data_type: Some(VarRefDataType::Tag) | None, + } => Some(name.deref().as_str()), + _ => None, + }) } /// Perform a series of passes to rewrite `expr` in compliance with InfluxQL behavior @@ -834,25 +1060,18 @@ impl<'a> ExprRewriter for FixRegularExpressions<'a> { right, }) => { if let Expr::Column(ref col) = *left { - if let Some(idx) = self.schemas.iox_schema.find_index_of(&col.name) { - let (col_type, _) = self.schemas.iox_schema.field(idx); - match col_type { - InfluxColumnType::Tag => { - // Regular expressions expect to be compared with a Utf8 - let left = Box::new( - left.cast_to(&DataType::Utf8, &self.schemas.df_schema)?, - ); - Ok(Expr::BinaryExpr(BinaryExpr { left, op, right })) - } - InfluxColumnType::Field(InfluxFieldType::String) => { - Ok(Expr::BinaryExpr(BinaryExpr { left, op, right })) - } - // Any other column type should evaluate to false - _ => Ok(lit(false)), + match self.schemas.iox_schema.field_by_name(&col.name) { + Some((InfluxColumnType::Tag, _)) => { + // Regular expressions expect to be compared with a Utf8 + let left = + Box::new(left.cast_to(&DataType::Utf8, &self.schemas.df_schema)?); + Ok(Expr::BinaryExpr(BinaryExpr { left, op, right })) } - } else { - // If the field does not exist, evaluate to false - Ok(lit(false)) + Some((InfluxColumnType::Field(InfluxFieldType::String), _)) => { + Ok(Expr::BinaryExpr(BinaryExpr { left, op, right })) + } + // Any other column type should evaluate to false + _ => Ok(lit(false)), } } else { // If this is not a simple column expression, evaluate to false, @@ -902,95 +1121,82 @@ fn normalize_identifier(ident: &Identifier) -> String { ident.deref().clone() } -/// Returns true if the field list contains a `time` column. +/// Find the index of the time column in the fields list. /// /// > **Note** /// > /// > To match InfluxQL, the `time` column must not exist as part of a /// > complex expression. -fn has_time_column(fields: &[Field]) -> bool { +pub(crate) fn find_time_column_index(fields: &[Field]) -> Option<usize> { fields .iter() - .any(|f| matches!(&f.expr, IQLExpr::VarRef { name, .. } if name.deref() == "time")) + .find_position( + |f| matches!(&f.expr, IQLExpr::VarRef { name, .. } if name.deref() == "time"), + ) + .map(|(i, _)| i) } -static SCALAR_MATH_FUNCTIONS: Lazy<HashSet<&'static str>> = Lazy::new(|| { - HashSet::from([ - "abs", "sin", "cos", "tan", "asin", "acos", "atan", "atan2", "exp", "log", "ln", "log2", - "log10", "sqrt", "pow", "floor", "ceil", "round", - ]) -}); - /// Returns `true` if `name` is a mathematical scalar function /// supported by InfluxQL. fn is_scalar_math_function(name: &str) -> bool { - SCALAR_MATH_FUNCTIONS.contains(name) + static FUNCTIONS: Lazy<HashSet<&'static str>> = Lazy::new(|| { + HashSet::from([ + "abs", "sin", "cos", "tan", "asin", "acos", "atan", "atan2", "exp", "log", "ln", + "log2", "log10", "sqrt", "pow", "floor", "ceil", "round", + ]) + }); + + FUNCTIONS.contains(name) } -/// A list of valid aggregate and aggregate-like functions supported by InfluxQL. -/// -/// A full list is available via the [InfluxQL documentation][docs]. -/// -/// > **Note** -/// > -/// > These are not necessarily implemented, and are tracked by the following -/// > issues: -/// > -/// > * <https://github.com/influxdata/influxdb_iox/issues/6934> -/// > * <https://github.com/influxdata/influxdb_iox/issues/6935> -/// > * <https://github.com/influxdata/influxdb_iox/issues/6937> -/// > * <https://github.com/influxdata/influxdb_iox/issues/6938> -/// > * <https://github.com/influxdata/influxdb_iox/issues/6939> -/// -/// [docs]: https://docs.influxdata.com/influxdb/v1.8/query_language/functions/ -static AGGREGATE_FUNCTIONS: Lazy<HashSet<&'static str>> = Lazy::new(|| { - HashSet::from([ - // Scalar-like functions - "cumulative_sum", - "derivative", - "difference", - "elapsed", - "moving_average", - "non_negative_derivative", - "non_negative_difference", - // Selector functions - "bottom", - "first", - "last", - "max", - "min", - "percentile", - "sample", - "top", - // Aggregate functions - "count", - "count", - "integral", - "mean", - "median", - "mode", - "spread", - "stddev", - "sum", - // Prediction functions - "holt_winters", - "holt_winters_with_fit", - // Technical analysis functions - "chande_momentum_oscillator", - "exponential_moving_average", - "double_exponential_moving_average", - "kaufmans_efficiency_ratio", - "kaufmans_adaptive_moving_average", - "triple_exponential_moving_average", - "triple_exponential_derivative", - "relative_strength_index", - ]) -}); - /// Returns `true` if `name` is an aggregate or aggregate function /// supported by InfluxQL. fn is_aggregate_function(name: &str) -> bool { - AGGREGATE_FUNCTIONS.contains(name) + static FUNCTIONS: Lazy<HashSet<&'static str>> = Lazy::new(|| { + HashSet::from([ + // Scalar-like functions + "cumulative_sum", + "derivative", + "difference", + "elapsed", + "moving_average", + "non_negative_derivative", + "non_negative_difference", + // Selector functions + "bottom", + "first", + "last", + "max", + "min", + "percentile", + "sample", + "top", + // Aggregate functions + "count", + "count", + "integral", + "mean", + "median", + "mode", + "spread", + "stddev", + "sum", + // Prediction functions + "holt_winters", + "holt_winters_with_fit", + // Technical analysis functions + "chande_momentum_oscillator", + "exponential_moving_average", + "double_exponential_moving_average", + "kaufmans_efficiency_ratio", + "kaufmans_adaptive_moving_average", + "triple_exponential_moving_average", + "triple_exponential_derivative", + "relative_strength_index", + ]) + }); + + FUNCTIONS.contains(name) } /// Returns true if the conditional expression is a single node that @@ -1125,45 +1331,46 @@ mod test { } // validate metadata is empty when there is no group by - let md = metadata("SELECT free FROM disk").unwrap(); + let md = metadata("SELECT bytes_free FROM disk").unwrap(); assert_eq!(md.measurement_column_index, 0); assert!(md.tag_key_columns.is_empty()); - let md = metadata("SELECT free FROM disk, cpu").unwrap(); + let md = metadata("SELECT bytes_free FROM disk, cpu").unwrap(); assert_eq!(md.measurement_column_index, 0); assert!(md.tag_key_columns.is_empty()); - let md = metadata("SELECT free FROM disk GROUP BY device").unwrap(); + let md = metadata("SELECT bytes_free FROM disk GROUP BY device").unwrap(); assert_eq!(md.measurement_column_index, 0); assert_tag_keys!(md, ("device", 2, false)); // validate tag in projection is not included in metadata - let md = - metadata("SELECT cpu, usage_idle, free FROM cpu, disk GROUP BY device").unwrap(); + let md = metadata("SELECT cpu, usage_idle, bytes_free FROM cpu, disk GROUP BY device") + .unwrap(); assert_eq!(md.measurement_column_index, 0); assert_tag_keys!(md, ("device", 2, false)); // validate multiple tags from different measurements - let md = - metadata("SELECT usage_idle, free FROM cpu, disk GROUP BY cpu, device").unwrap(); + let md = metadata("SELECT usage_idle, bytes_free FROM cpu, disk GROUP BY cpu, device") + .unwrap(); assert_eq!(md.measurement_column_index, 0); assert_tag_keys!(md, ("cpu", 2, false), ("device", 3, false)); // validate multiple tags from different measurements, and key order is maintained - let md = - metadata("SELECT usage_idle, free FROM cpu, disk GROUP BY device, cpu").unwrap(); + let md = metadata("SELECT usage_idle, bytes_free FROM cpu, disk GROUP BY device, cpu") + .unwrap(); assert_eq!(md.measurement_column_index, 0); assert_tag_keys!(md, ("cpu", 2, false), ("device", 3, false)); // validate that with cpu tag explicitly listed in project, tag-key order is maintained and column index // is valid - let md = metadata("SELECT usage_idle, free, cpu FROM cpu, disk GROUP BY cpu, device") - .unwrap(); + let md = + metadata("SELECT usage_idle, bytes_free, cpu FROM cpu, disk GROUP BY cpu, device") + .unwrap(); assert_eq!(md.measurement_column_index, 0); assert_tag_keys!(md, ("cpu", 5, true), ("device", 2, false)); // validate region tag, shared by both measurements, is still correctly handled let md = metadata( - "SELECT region, usage_idle, free, cpu FROM cpu, disk GROUP BY region, cpu, device", + "SELECT region, usage_idle, bytes_free, cpu FROM cpu, disk GROUP BY region, cpu, device", ) .unwrap(); assert_eq!(md.measurement_column_index, 0); @@ -1179,29 +1386,29 @@ mod test { #[test] fn test_from_zero_to_many() { assert_snapshot!(plan("SELECT host, cpu, device, usage_idle, bytes_used FROM cpu, disk"), @r###" - Sort: iox::measurement ASC NULLS LAST, cpu.time ASC NULLS LAST, cpu ASC NULLS LAST, device ASC NULLS LAST, host ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_used:Int64;N] + Sort: iox::measurement ASC NULLS LAST, time ASC NULLS LAST, cpu ASC NULLS LAST, device ASC NULLS LAST, host ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_used:Int64;N] Union [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_used:Int64;N] - Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, cpu.host AS host, CAST(cpu.cpu AS Utf8) AS cpu, CAST(NULL AS Utf8) AS device, cpu.usage_idle AS usage_idle, CAST(NULL AS Int64) AS bytes_used [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_used:Int64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, cpu.host AS host, CAST(cpu.cpu AS Utf8) AS cpu, CAST(NULL AS Utf8) AS device, cpu.usage_idle AS usage_idle, CAST(NULL AS Int64) AS bytes_used [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_used:Int64;N] TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] - Projection: Dictionary(Int32, Utf8("disk")) AS iox::measurement, disk.time, disk.host AS host, CAST(NULL AS Utf8) AS cpu, CAST(disk.device AS Utf8) AS device, CAST(NULL AS Float64) AS usage_idle, disk.bytes_used AS bytes_used [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_used:Int64;N] + Projection: Dictionary(Int32, Utf8("disk")) AS iox::measurement, disk.time AS time, disk.host AS host, CAST(NULL AS Utf8) AS cpu, CAST(disk.device AS Utf8) AS device, CAST(NULL AS Float64) AS usage_idle, disk.bytes_used AS bytes_used [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_used:Int64;N] TableScan: disk [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] "###); // nonexistent assert_snapshot!(plan("SELECT host, usage_idle FROM non_existent"), @"EmptyRelation []"); assert_snapshot!(plan("SELECT host, usage_idle FROM cpu, non_existent"), @r###" - Sort: cpu.time ASC NULLS LAST, host ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] - Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, cpu.host AS host, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Sort: time ASC NULLS LAST, host ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, cpu.host AS host, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] "###); // multiple of same measurement assert_snapshot!(plan("SELECT host, usage_idle FROM cpu, cpu"), @r###" - Sort: iox::measurement ASC NULLS LAST, cpu.time ASC NULLS LAST, host ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Sort: iox::measurement ASC NULLS LAST, time ASC NULLS LAST, host ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] Union [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] - Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, cpu.host AS host, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, cpu.host AS host, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] - Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, cpu.host AS host, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, cpu.host AS host, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), host:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] "###); } @@ -1210,16 +1417,16 @@ mod test { fn test_time_range_in_where() { assert_snapshot!( plan("SELECT foo, f64_field FROM data where time > now() - 10s"), @r###" - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: data.time > now() - IntervalMonthDayNano("10000000000") [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "### ); assert_snapshot!( plan("SELECT foo, f64_field FROM data where time > '2004-04-09T02:33:45Z'"), @r###" - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: data.time > TimestampNanosecond(1081478025000000000, None) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "### @@ -1231,8 +1438,8 @@ mod test { // time on the right-hand side assert_snapshot!( plan("SELECT foo, f64_field FROM data where now() - 10s < time"), @r###" - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: now() - IntervalMonthDayNano("10000000000") < data.time [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "### @@ -1241,16 +1448,16 @@ mod test { // Regular expression equality tests assert_snapshot!(plan("SELECT foo, f64_field FROM data where foo =~ /f/"), @r###" - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: CAST(data.foo AS Utf8) ~ Utf8("f") [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); // regular expression for a numeric field is rewritten to `false` assert_snapshot!(plan("SELECT foo, f64_field FROM data where f64_field =~ /f/"), @r###" - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: Boolean(false) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); @@ -1258,8 +1465,8 @@ mod test { // regular expression for a non-existent field is rewritten to `false` assert_snapshot!( plan("SELECT foo, f64_field FROM data where non_existent =~ /f/"), @r###" - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: Boolean(false) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "### @@ -1268,16 +1475,16 @@ mod test { // Regular expression inequality tests assert_snapshot!(plan("SELECT foo, f64_field FROM data where foo !~ /f/"), @r###" - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: CAST(data.foo AS Utf8) !~ Utf8("f") [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); // regular expression for a numeric field is rewritten to `false` assert_snapshot!(plan("SELECT foo, f64_field FROM data where f64_field !~ /f/"), @r###" - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: Boolean(false) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); @@ -1285,8 +1492,8 @@ mod test { // regular expression for a non-existent field is rewritten to `false` assert_snapshot!( plan("SELECT foo, f64_field FROM data where non_existent !~ /f/"), @r###" - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] Filter: Boolean(false) [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "### @@ -1297,49 +1504,41 @@ mod test { fn test_column_matching_rules() { // Cast between numeric types assert_snapshot!(plan("SELECT f64_field::integer FROM data"), @r###" - Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Int64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, CAST(data.f64_field AS Int64) AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Int64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Int64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, CAST(data.f64_field AS Int64) AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Int64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT i64_field::float FROM data"), @r###" - Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, CAST(data.i64_field AS Float64) AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Float64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, CAST(data.i64_field AS Float64) AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); // use field selector assert_snapshot!(plan("SELECT bool_field::field FROM data"), @r###" - Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Boolean;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.bool_field AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Boolean;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Boolean;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.bool_field AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Boolean;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); - // invalid column reverence - assert_snapshot!(plan("SELECT not_exists::tag FROM data"), @r###" - Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), not_exists:Null;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, NULL AS not_exists [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), not_exists:Null;N] - TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] - "###); - assert_snapshot!(plan("SELECT not_exists::field FROM data"), @r###" - Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), not_exists:Null;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, NULL AS not_exists [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), not_exists:Null;N] - TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] - "###); + // invalid column reference + assert_snapshot!(plan("SELECT not_exists::tag FROM data"), @"EmptyRelation []"); + assert_snapshot!(plan("SELECT not_exists::field FROM data"), @"EmptyRelation []"); // Returns NULL for invalid casts assert_snapshot!(plan("SELECT f64_field::string FROM data"), @r###" - Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, NULL AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, NULL AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT f64_field::boolean FROM data"), @r###" - Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, NULL AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, NULL AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT str_field::boolean FROM data"), @r###" - Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, NULL AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, NULL AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); } @@ -1348,26 +1547,26 @@ mod test { fn test_explain() { assert_snapshot!(plan("EXPLAIN SELECT foo, f64_field FROM data"), @r###" Explain [plan_type:Utf8, plan:Utf8] - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("EXPLAIN VERBOSE SELECT foo, f64_field FROM data"), @r###" Explain [plan_type:Utf8, plan:Utf8] - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("EXPLAIN ANALYZE SELECT foo, f64_field FROM data"), @r###" Analyze [plan_type:Utf8, plan:Utf8] - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("EXPLAIN ANALYZE VERBOSE SELECT foo, f64_field FROM data"), @r###" Analyze [plan_type:Utf8, plan:Utf8] - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); } @@ -1376,162 +1575,259 @@ mod test { fn test_select_cast_postfix_operator() { // Float casting assert_snapshot!(plan("SELECT f64_field::float FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, all_types.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, all_types.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT f64_field::unsigned FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:UInt64;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, CAST(all_types.f64_field AS UInt64) AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:UInt64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:UInt64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, CAST(all_types.f64_field AS UInt64) AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:UInt64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT f64_field::integer FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Int64;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, CAST(all_types.f64_field AS Int64) AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Int64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Int64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, CAST(all_types.f64_field AS Int64) AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Int64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT f64_field::string FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, NULL AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT f64_field::boolean FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, NULL AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); // Integer casting assert_snapshot!(plan("SELECT i64_field::float FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, CAST(all_types.i64_field AS Float64) AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Float64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, CAST(all_types.i64_field AS Float64) AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Float64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT i64_field::unsigned FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:UInt64;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, CAST(all_types.i64_field AS UInt64) AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:UInt64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:UInt64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, CAST(all_types.i64_field AS UInt64) AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:UInt64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT i64_field::integer FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Int64;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, all_types.i64_field AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Int64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Int64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, all_types.i64_field AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Int64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT i64_field::string FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Null;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, NULL AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT i64_field::boolean FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Null;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, NULL AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), i64_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); // Unsigned casting assert_snapshot!(plan("SELECT u64_field::float FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, CAST(all_types.u64_field AS Float64) AS u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Float64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, CAST(all_types.u64_field AS Float64) AS u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Float64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT u64_field::unsigned FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:UInt64;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, all_types.u64_field AS u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:UInt64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, all_types.u64_field AS u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:UInt64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT u64_field::integer FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Int64;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, CAST(all_types.u64_field AS Int64) AS u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Int64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Int64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, CAST(all_types.u64_field AS Int64) AS u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Int64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT u64_field::string FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Null;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, NULL AS u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT u64_field::boolean FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Null;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, NULL AS u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), u64_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); // String casting assert_snapshot!(plan("SELECT str_field::float FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, NULL AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT str_field::unsigned FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, NULL AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT str_field::integer FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, NULL AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT str_field::string FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Utf8;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, all_types.str_field AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Utf8;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Utf8;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, all_types.str_field AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Utf8;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT str_field::boolean FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, NULL AS str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), str_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); // Boolean casting assert_snapshot!(plan("SELECT bool_field::float FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, NULL AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT bool_field::unsigned FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, NULL AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT bool_field::integer FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, NULL AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT bool_field::string FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, NULL AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT bool_field::boolean FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Boolean;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, all_types.bool_field AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Boolean;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Boolean;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, all_types.bool_field AS bool_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), bool_field:Boolean;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); // Validate various projection expressions with casts assert_snapshot!(plan("SELECT f64_field::integer + i64_field + u64_field::integer FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field_i64_field_u64_field:Int64;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, CAST(all_types.f64_field AS Int64) + all_types.i64_field + CAST(all_types.u64_field AS Int64) AS f64_field_i64_field_u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field_i64_field_u64_field:Int64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field_i64_field_u64_field:Int64;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, CAST(all_types.f64_field AS Int64) + all_types.i64_field + CAST(all_types.u64_field AS Int64) AS f64_field_i64_field_u64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field_i64_field_u64_field:Int64;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); assert_snapshot!(plan("SELECT f64_field::integer + i64_field + str_field::integer FROM all_types"), @r###" - Sort: all_types.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field_i64_field_str_field:Null;N] - Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time, NULL AS f64_field_i64_field_str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field_i64_field_str_field:Null;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field_i64_field_str_field:Null;N] + Projection: Dictionary(Int32, Utf8("all_types")) AS iox::measurement, all_types.time AS time, NULL AS f64_field_i64_field_str_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field_i64_field_str_field:Null;N] TableScan: all_types [bool_field:Boolean;N, f64_field:Float64;N, i64_field:Int64;N, str_field:Utf8;N, tag0:Dictionary(Int32, Utf8);N, tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), u64_field:UInt64;N] "###); } } - /// Tests to validate InfluxQL `SELECT` statements that utilise aggregate functions. + /// Tests to validate InfluxQL `SELECT` statements that project aggregate functions, such as `COUNT` or `SUM`. mod select_aggregate { use super::*; #[test] - fn test_aggregates_are_not_yet_supported() { - assert_snapshot!(plan("SELECT count(f64_field) FROM data"), @"This feature is not implemented: aggregate functions"); + fn test_single_measurement() { + assert_snapshot!(plan("SELECT COUNT(f64_field) FROM data"), @r###" + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, COUNT(data.f64_field) AS count [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N] + Aggregate: groupBy=[[]], aggr=[[COUNT(data.f64_field)]] [COUNT(data.f64_field):Int64;N] + TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + "###); + + assert_snapshot!(plan("SELECT COUNT(f64_field) FROM data GROUP BY non_existent"), @r###" + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), non_existent:Null;N, count:Int64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, NULL AS non_existent, COUNT(data.f64_field) AS count [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), non_existent:Null;N, count:Int64;N] + Aggregate: groupBy=[[]], aggr=[[COUNT(data.f64_field)]] [COUNT(data.f64_field):Int64;N] + TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + "###); + assert_snapshot!(plan("SELECT COUNT(f64_field) FROM data GROUP BY foo"), @r###" + Sort: foo ASC NULLS LAST, time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, count:Int64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, data.foo AS foo, COUNT(data.f64_field) AS count [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, count:Int64;N] + Aggregate: groupBy=[[data.foo]], aggr=[[COUNT(data.f64_field)]] [foo:Dictionary(Int32, Utf8);N, COUNT(data.f64_field):Int64;N] + TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + "###); + + // The `COUNT(f64_field)` aggregate is only projected ones in the Aggregate and reused in the projection + assert_snapshot!(plan("SELECT COUNT(f64_field), COUNT(f64_field) + COUNT(f64_field), COUNT(f64_field) * 3 FROM data"), @r###" + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N, count_f64_field_count_f64_field:Int64;N, count_f64_field:Int64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, COUNT(data.f64_field) AS count, COUNT(data.f64_field) + COUNT(data.f64_field) AS count_f64_field_count_f64_field, COUNT(data.f64_field) * Int64(3) AS count_f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N, count_f64_field_count_f64_field:Int64;N, count_f64_field:Int64;N] + Aggregate: groupBy=[[]], aggr=[[COUNT(data.f64_field)]] [COUNT(data.f64_field):Int64;N] + TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + "###); + + // non-existent tags are excluded from the Aggregate groupBy and Sort operators + assert_snapshot!(plan("SELECT COUNT(f64_field) FROM data GROUP BY foo, non_existent"), @r###" + Sort: foo ASC NULLS LAST, time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, non_existent:Null;N, count:Int64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, data.foo AS foo, NULL AS non_existent, COUNT(data.f64_field) AS count [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, non_existent:Null;N, count:Int64;N] + Aggregate: groupBy=[[data.foo]], aggr=[[COUNT(data.f64_field)]] [foo:Dictionary(Int32, Utf8);N, COUNT(data.f64_field):Int64;N] + TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + "###); + + // Fallible + + // Cannot combine aggregate and non-aggregate columns in the projection + assert_snapshot!(plan("SELECT COUNT(f64_field), f64_field FROM data"), @"Error during planning: mixing aggregate and non-aggregate columns is not supported"); + assert_snapshot!(plan("SELECT COUNT(f64_field) + f64_field FROM data"), @"Error during planning: mixing aggregate and non-aggregate columns is not supported"); + } + + #[test] + fn test_single_measurement_group_by_time() { + assert_snapshot!(plan("SELECT COUNT(f64_field) FROM data GROUP BY TIME(10s) FILL(none)"), @r###" + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, count:Int64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, time, COUNT(data.f64_field) AS count [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, count:Int64;N] + Aggregate: groupBy=[[datebin(IntervalDayTime("10000"), data.time, TimestampNanosecond(0, None)) AS time]], aggr=[[COUNT(data.f64_field)]] [time:Timestamp(Nanosecond, None);N, COUNT(data.f64_field):Int64;N] + TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + "###); + + // supports offset parameter + assert_snapshot!(plan("SELECT COUNT(f64_field) FROM data GROUP BY TIME(10s, 5s) FILL(none)"), @r###" + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, count:Int64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, time, COUNT(data.f64_field) AS count [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, count:Int64;N] + Aggregate: groupBy=[[datebin(IntervalDayTime("10000"), data.time, TimestampNanosecond(5000000000, None)) AS time]], aggr=[[COUNT(data.f64_field)]] [time:Timestamp(Nanosecond, None);N, COUNT(data.f64_field):Int64;N] + TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + "###); + } + + /// These tests validate the planner returns an error when using features that + /// are not implemented. + mod not_implemented { + use super::*; + + #[test] + fn test_with_limit_or_offset() { + assert_snapshot!(plan("SELECT COUNT(f64_field) FROM data LIMIT 1"), @r###" + Limit: skip=0, fetch=1 [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, COUNT(data.f64_field) AS count [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N] + Aggregate: groupBy=[[]], aggr=[[COUNT(data.f64_field)]] [COUNT(data.f64_field):Int64;N] + TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + "###); + assert_snapshot!(plan("SELECT COUNT(f64_field) FROM data OFFSET 1"), @r###" + Limit: skip=1, fetch=None [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, TimestampNanosecond(0, None) AS time, COUNT(data.f64_field) AS count [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), count:Int64;N] + Aggregate: groupBy=[[]], aggr=[[COUNT(data.f64_field)]] [COUNT(data.f64_field):Int64;N] + TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] + "###); + } + + #[test] + fn test_group_by_time_precision() { + assert_snapshot!(plan("SELECT COUNT(f64_field) FROM data GROUP BY TIME(10u) FILL(none)"), @"This feature is not implemented: interval limited to a precision of milliseconds. See https://github.com/influxdata/influxdb_iox/issues/7204"); + } + + #[test] + fn test_single_measurement_group_by_time_gapfill() { + // Default is FILL(null) + assert_snapshot!(plan("SELECT COUNT(f64_field) FROM data GROUP BY TIME(10s)"), @"This feature is not implemented: FILL(NULL)"); + assert_snapshot!(plan("SELECT COUNT(f64_field) FROM data GROUP BY TIME(10s) FILL(null)"), @"This feature is not implemented: FILL(NULL)"); + assert_snapshot!(plan("SELECT COUNT(f64_field) FROM data GROUP BY TIME(10s) FILL(linear)"), @"This feature is not implemented: FILL(LINEAR)"); + assert_snapshot!(plan("SELECT COUNT(f64_field) FROM data GROUP BY TIME(10s) FILL(previous)"), @"This feature is not implemented: FILL(PREVIOUS)"); + assert_snapshot!(plan("SELECT COUNT(f64_field) FROM data GROUP BY TIME(10s) FILL(0)"), @"This feature is not implemented: FILL(0)"); + } } } @@ -1544,8 +1840,8 @@ mod test { #[test] fn test_single_measurement() { assert_snapshot!(plan("SELECT f64_field FROM data"), @r###" - Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT time, f64_field FROM data"), @r###" @@ -1560,28 +1856,28 @@ mod test { TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT foo, f64_field FROM data"), @r###" - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT foo, f64_field, i64_field FROM data"), @r###" - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N, i64_field:Int64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field AS f64_field, data.i64_field AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N, i64_field:Int64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N, i64_field:Int64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field AS f64_field, data.i64_field AS i64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N, i64_field:Int64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT /^f/ FROM data"), @r###" - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.f64_field AS f64_field, data.foo AS foo [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.f64_field AS f64_field, data.foo AS foo [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT * FROM data"), @r###" - Sort: data.time ASC NULLS LAST, bar ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, with space:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.TIME AS TIME, data.bar AS bar, data.bool_field AS bool_field, data.f64_field AS f64_field, data.foo AS foo, data.i64_field AS i64_field, data.mixedCase AS mixedCase, data.str_field AS str_field, data.with space AS with space [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, with space:Float64;N] + Sort: time ASC NULLS LAST, bar ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, with space:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.TIME AS TIME, data.bar AS bar, data.bool_field AS bool_field, data.f64_field AS f64_field, data.foo AS foo, data.i64_field AS i64_field, data.mixedCase AS mixedCase, data.str_field AS str_field, data.with space AS with space [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, with space:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT TIME FROM data"), @r###" - Sort: data.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), TIME:Boolean;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.TIME AS TIME [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), TIME:Boolean;N] + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), TIME:Boolean;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.TIME AS TIME [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), TIME:Boolean;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); // TIME is a field } @@ -1590,23 +1886,23 @@ mod test { #[test] fn test_simple_arithmetic_in_projection() { assert_snapshot!(plan("SELECT foo, f64_field + f64_field FROM data"), @r###" - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field_f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field + data.f64_field AS f64_field_f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field_f64_field:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field_f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field + data.f64_field AS f64_field_f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field_f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT foo, sin(f64_field) FROM data"), @r###" - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, sin:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, sin(data.f64_field) AS sin [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, sin:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, sin:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, sin(data.f64_field) AS sin [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, sin:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT foo, atan2(f64_field, 2) FROM data"), @r###" - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, atan2:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, atan2(data.f64_field, Int64(2)) AS atan2 [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, atan2:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, atan2:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, atan2(data.f64_field, Int64(2)) AS atan2 [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, atan2:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); assert_snapshot!(plan("SELECT foo, f64_field + 0.5 FROM data"), @r###" - Sort: data.time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] - Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time, data.foo AS foo, data.f64_field + Float64(0.5) AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Sort: time ASC NULLS LAST, foo ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] + Projection: Dictionary(Int32, Utf8("data")) AS iox::measurement, data.time AS time, data.foo AS foo, data.f64_field + Float64(0.5) AS f64_field [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), foo:Dictionary(Int32, Utf8);N, f64_field:Float64;N] TableScan: data [TIME:Boolean;N, bar:Dictionary(Int32, Utf8);N, bool_field:Boolean;N, f64_field:Float64;N, foo:Dictionary(Int32, Utf8);N, i64_field:Int64;N, mixedCase:Float64;N, str_field:Utf8;N, time:Timestamp(Nanosecond, None), with space:Float64;N] "###); } @@ -1615,36 +1911,57 @@ mod test { fn test_select_single_measurement_group_by() { // Sort should be cpu, time assert_snapshot!(plan("SELECT usage_idle FROM cpu GROUP BY cpu"), @r###" - Sort: cpu ASC NULLS LAST, cpu.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] - Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, cpu.cpu AS cpu, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Sort: cpu ASC NULLS LAST, time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, cpu.cpu AS cpu, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] "###); // Sort should be cpu, time assert_snapshot!(plan("SELECT cpu, usage_idle FROM cpu GROUP BY cpu"), @r###" - Sort: cpu ASC NULLS LAST, cpu.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] - Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, cpu.cpu AS cpu, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Sort: cpu ASC NULLS LAST, time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, cpu.cpu AS cpu, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] "###); // Sort should be cpu, region, time assert_snapshot!(plan("SELECT usage_idle FROM cpu GROUP BY cpu, region"), @r###" - Sort: cpu ASC NULLS LAST, region ASC NULLS LAST, cpu.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] - Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, cpu.cpu AS cpu, cpu.region AS region, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Sort: cpu ASC NULLS LAST, region ASC NULLS LAST, time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, cpu.cpu AS cpu, cpu.region AS region, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] "###); // Sort should be cpu, region, time assert_snapshot!(plan("SELECT usage_idle FROM cpu GROUP BY region, cpu"), @r###" - Sort: cpu ASC NULLS LAST, region ASC NULLS LAST, cpu.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] - Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, cpu.cpu AS cpu, cpu.region AS region, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Sort: cpu ASC NULLS LAST, region ASC NULLS LAST, time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, cpu.cpu AS cpu, cpu.region AS region, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] "###); // Sort should be cpu, time, region assert_snapshot!(plan("SELECT region, usage_idle FROM cpu GROUP BY cpu"), @r###" - Sort: cpu ASC NULLS LAST, cpu.time ASC NULLS LAST, region ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] - Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, cpu.cpu AS cpu, cpu.region AS region, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Sort: cpu ASC NULLS LAST, time ASC NULLS LAST, region ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, cpu.cpu AS cpu, cpu.region AS region, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, usage_idle:Float64;N] + TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] + "###); + + // If a tag specified in a GROUP BY does not exist in the measurement, it should be omitted from the sort + assert_snapshot!(plan("SELECT usage_idle FROM cpu GROUP BY cpu, non_existent"), @r###" + Sort: cpu ASC NULLS LAST, time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, non_existent:Null;N, usage_idle:Float64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, cpu.cpu AS cpu, NULL AS non_existent, cpu.usage_idle AS usage_idle [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, non_existent:Null;N, usage_idle:Float64;N] + TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] + "###); + + // If a tag specified in a projection does not exist in the measurement, it should be omitted from the sort + assert_snapshot!(plan("SELECT usage_idle, cpu, non_existent FROM cpu GROUP BY cpu"), @r###" + Sort: cpu ASC NULLS LAST, time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), usage_idle:Float64;N, cpu:Dictionary(Int32, Utf8);N, non_existent:Null;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, cpu.usage_idle AS usage_idle, cpu.cpu AS cpu, NULL AS non_existent [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), usage_idle:Float64;N, cpu:Dictionary(Int32, Utf8);N, non_existent:Null;N] + TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] + "###); + + // If a non-existent field is included in the GROUP BY and projection, it should not be duplicated + assert_snapshot!(plan("SELECT usage_idle, non_existent FROM cpu GROUP BY cpu, non_existent"), @r###" + Sort: cpu ASC NULLS LAST, time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, usage_idle:Float64;N, non_existent:Null;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, cpu.cpu AS cpu, cpu.usage_idle AS usage_idle, NULL AS non_existent [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Dictionary(Int32, Utf8);N, usage_idle:Float64;N, non_existent:Null;N] TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] "###); } @@ -1652,52 +1969,72 @@ mod test { #[test] fn test_select_multiple_measurements_group_by() { // Sort should be iox::measurement, cpu, time - assert_snapshot!(plan("SELECT usage_idle, free FROM cpu, disk GROUP BY cpu"), @r###" - Sort: iox::measurement ASC NULLS LAST, cpu ASC NULLS LAST, cpu.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, usage_idle:Float64;N, free:Null;N] - Union [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, usage_idle:Float64;N, free:Null;N] - Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, CAST(cpu.cpu AS Utf8) AS cpu, cpu.usage_idle AS usage_idle, NULL AS free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, usage_idle:Float64;N, free:Null;N] + assert_snapshot!(plan("SELECT usage_idle, bytes_free FROM cpu, disk GROUP BY cpu"), @r###" + Sort: iox::measurement ASC NULLS LAST, cpu ASC NULLS LAST, time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] + Union [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, CAST(cpu.cpu AS Utf8) AS cpu, cpu.usage_idle AS usage_idle, CAST(NULL AS Int64) AS bytes_free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] - Projection: Dictionary(Int32, Utf8("disk")) AS iox::measurement, disk.time, CAST(NULL AS Utf8) AS cpu, CAST(NULL AS Float64) AS usage_idle, NULL AS free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, usage_idle:Float64;N, free:Null;N] + Projection: Dictionary(Int32, Utf8("disk")) AS iox::measurement, disk.time AS time, CAST(NULL AS Utf8) AS cpu, CAST(NULL AS Float64) AS usage_idle, disk.bytes_free AS bytes_free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] TableScan: disk [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] "###); // Sort should be iox::measurement, cpu, device, time - assert_snapshot!(plan("SELECT usage_idle, free FROM cpu, disk GROUP BY device, cpu"), @r###" - Sort: iox::measurement ASC NULLS LAST, cpu ASC NULLS LAST, device ASC NULLS LAST, cpu.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, free:Null;N] - Union [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, free:Null;N] - Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, CAST(cpu.cpu AS Utf8) AS cpu, CAST(NULL AS Utf8) AS device, cpu.usage_idle AS usage_idle, NULL AS free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, free:Null;N] + assert_snapshot!(plan("SELECT usage_idle, bytes_free FROM cpu, disk GROUP BY device, cpu"), @r###" + Sort: iox::measurement ASC NULLS LAST, cpu ASC NULLS LAST, device ASC NULLS LAST, time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] + Union [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, CAST(cpu.cpu AS Utf8) AS cpu, CAST(NULL AS Utf8) AS device, cpu.usage_idle AS usage_idle, CAST(NULL AS Int64) AS bytes_free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] - Projection: Dictionary(Int32, Utf8("disk")) AS iox::measurement, disk.time, CAST(NULL AS Utf8) AS cpu, CAST(disk.device AS Utf8) AS device, CAST(NULL AS Float64) AS usage_idle, NULL AS free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, free:Null;N] + Projection: Dictionary(Int32, Utf8("disk")) AS iox::measurement, disk.time AS time, CAST(NULL AS Utf8) AS cpu, CAST(disk.device AS Utf8) AS device, CAST(NULL AS Float64) AS usage_idle, disk.bytes_free AS bytes_free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] TableScan: disk [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] "###); // Sort should be iox::measurement, cpu, time, device - assert_snapshot!(plan("SELECT device, usage_idle, free FROM cpu, disk GROUP BY cpu"), @r###" - Sort: iox::measurement ASC NULLS LAST, cpu ASC NULLS LAST, cpu.time ASC NULLS LAST, device ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, free:Null;N] - Union [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, free:Null;N] - Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, CAST(cpu.cpu AS Utf8) AS cpu, CAST(NULL AS Utf8) AS device, cpu.usage_idle AS usage_idle, NULL AS free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, free:Null;N] + assert_snapshot!(plan("SELECT device, usage_idle, bytes_free FROM cpu, disk GROUP BY cpu"), @r###" + Sort: iox::measurement ASC NULLS LAST, cpu ASC NULLS LAST, time ASC NULLS LAST, device ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] + Union [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, CAST(cpu.cpu AS Utf8) AS cpu, CAST(NULL AS Utf8) AS device, cpu.usage_idle AS usage_idle, CAST(NULL AS Int64) AS bytes_free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] - Projection: Dictionary(Int32, Utf8("disk")) AS iox::measurement, disk.time, CAST(NULL AS Utf8) AS cpu, CAST(disk.device AS Utf8) AS device, CAST(NULL AS Float64) AS usage_idle, NULL AS free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, free:Null;N] + Projection: Dictionary(Int32, Utf8("disk")) AS iox::measurement, disk.time AS time, CAST(NULL AS Utf8) AS cpu, CAST(disk.device AS Utf8) AS device, CAST(NULL AS Float64) AS usage_idle, disk.bytes_free AS bytes_free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), cpu:Utf8;N, device:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] TableScan: disk [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] "###); // Sort should be iox::measurement, cpu, device, time - assert_snapshot!(plan("SELECT cpu, usage_idle, free FROM cpu, disk GROUP BY cpu, device"), @r###" - Sort: iox::measurement ASC NULLS LAST, cpu ASC NULLS LAST, device ASC NULLS LAST, cpu.time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, free:Null;N] - Union [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, free:Null;N] - Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, CAST(NULL AS Utf8) AS device, CAST(cpu.cpu AS Utf8) AS cpu, cpu.usage_idle AS usage_idle, NULL AS free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, free:Null;N] + assert_snapshot!(plan("SELECT cpu, usage_idle, bytes_free FROM cpu, disk GROUP BY cpu, device"), @r###" + Sort: iox::measurement ASC NULLS LAST, cpu ASC NULLS LAST, device ASC NULLS LAST, time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] + Union [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, CAST(NULL AS Utf8) AS device, CAST(cpu.cpu AS Utf8) AS cpu, cpu.usage_idle AS usage_idle, CAST(NULL AS Int64) AS bytes_free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] - Projection: Dictionary(Int32, Utf8("disk")) AS iox::measurement, disk.time, CAST(disk.device AS Utf8) AS device, CAST(NULL AS Utf8) AS cpu, CAST(NULL AS Float64) AS usage_idle, NULL AS free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, free:Null;N] + Projection: Dictionary(Int32, Utf8("disk")) AS iox::measurement, disk.time AS time, CAST(disk.device AS Utf8) AS device, CAST(NULL AS Utf8) AS cpu, CAST(NULL AS Float64) AS usage_idle, disk.bytes_free AS bytes_free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] TableScan: disk [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] "###); // Sort should be iox::measurement, device, time, cpu - assert_snapshot!(plan("SELECT cpu, usage_idle, free FROM cpu, disk GROUP BY device"), @r###" - Sort: iox::measurement ASC NULLS LAST, device ASC NULLS LAST, cpu.time ASC NULLS LAST, cpu ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, free:Null;N] - Union [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, free:Null;N] - Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time, CAST(NULL AS Utf8) AS device, CAST(cpu.cpu AS Utf8) AS cpu, cpu.usage_idle AS usage_idle, NULL AS free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, free:Null;N] + assert_snapshot!(plan("SELECT cpu, usage_idle, bytes_free FROM cpu, disk GROUP BY device"), @r###" + Sort: iox::measurement ASC NULLS LAST, device ASC NULLS LAST, time ASC NULLS LAST, cpu ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] + Union [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, CAST(NULL AS Utf8) AS device, CAST(cpu.cpu AS Utf8) AS cpu, cpu.usage_idle AS usage_idle, CAST(NULL AS Int64) AS bytes_free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] + TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] + Projection: Dictionary(Int32, Utf8("disk")) AS iox::measurement, disk.time AS time, CAST(disk.device AS Utf8) AS device, CAST(NULL AS Utf8) AS cpu, CAST(NULL AS Float64) AS usage_idle, disk.bytes_free AS bytes_free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] + TableScan: disk [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] + "###); + + // If a tag specified in a GROUP BY does not exist across all measurements, it should be omitted from the sort + assert_snapshot!(plan("SELECT cpu, usage_idle, bytes_free FROM cpu, disk GROUP BY device, non_existent"), @r###" + Sort: iox::measurement ASC NULLS LAST, device ASC NULLS LAST, time ASC NULLS LAST, cpu ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, non_existent:Null;N, cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] + Union [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, non_existent:Null;N, cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, CAST(NULL AS Utf8) AS device, NULL AS non_existent, CAST(cpu.cpu AS Utf8) AS cpu, cpu.usage_idle AS usage_idle, CAST(NULL AS Int64) AS bytes_free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, non_existent:Null;N, cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] + TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] + Projection: Dictionary(Int32, Utf8("disk")) AS iox::measurement, disk.time AS time, CAST(disk.device AS Utf8) AS device, NULL AS non_existent, CAST(NULL AS Utf8) AS cpu, CAST(NULL AS Float64) AS usage_idle, disk.bytes_free AS bytes_free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, non_existent:Null;N, cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N] + TableScan: disk [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] + "###); + + // If a tag specified in a projection does not exist across all measurements, it should be omitted from the sort + assert_snapshot!(plan("SELECT cpu, usage_idle, bytes_free, non_existent FROM cpu, disk GROUP BY device"), @r###" + Sort: iox::measurement ASC NULLS LAST, device ASC NULLS LAST, time ASC NULLS LAST, cpu ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N, non_existent:Null;N] + Union [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N, non_existent:Null;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, cpu.time AS time, CAST(NULL AS Utf8) AS device, CAST(cpu.cpu AS Utf8) AS cpu, cpu.usage_idle AS usage_idle, CAST(NULL AS Int64) AS bytes_free, NULL AS non_existent [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N, non_existent:Null;N] TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] - Projection: Dictionary(Int32, Utf8("disk")) AS iox::measurement, disk.time, CAST(disk.device AS Utf8) AS device, CAST(NULL AS Utf8) AS cpu, CAST(NULL AS Float64) AS usage_idle, NULL AS free [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, free:Null;N] + Projection: Dictionary(Int32, Utf8("disk")) AS iox::measurement, disk.time AS time, CAST(disk.device AS Utf8) AS device, CAST(NULL AS Utf8) AS cpu, CAST(NULL AS Float64) AS usage_idle, disk.bytes_free AS bytes_free, NULL AS non_existent [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None), device:Utf8;N, cpu:Utf8;N, usage_idle:Float64;N, bytes_free:Int64;N, non_existent:Null;N] TableScan: disk [bytes_free:Int64;N, bytes_used:Int64;N, device:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] "###); } diff --git a/iox_query_influxql/src/plan/planner/select.rs b/iox_query_influxql/src/plan/planner/select.rs new file mode 100644 index 0000000000..ceb846dec3 --- /dev/null +++ b/iox_query_influxql/src/plan/planner/select.rs @@ -0,0 +1,141 @@ +use arrow::datatypes::DataType; +use datafusion::common::{DFSchemaRef, DataFusionError, Result}; +use datafusion::logical_expr::utils::find_column_exprs; +use datafusion::logical_expr::{Expr, LogicalPlan, LogicalPlanBuilder}; +use datafusion_util::AsExpr; +use generated_types::influxdata::iox::querier::v1::influx_ql_metadata::TagKeyColumn; +use influxdb_influxql_parser::common::OrderByClause; +use influxdb_influxql_parser::expression::{Expr as IQLExpr, VarRefDataType}; +use influxdb_influxql_parser::select::{Field, SelectStatement}; +use schema::INFLUXQL_MEASUREMENT_COLUMN_NAME; +use std::collections::HashMap; +use std::ops::Deref; + +/// Determines that all [`Expr::Column`] references in `exprs` refer to a +/// column in `columns`. +pub(crate) fn check_exprs_satisfy_columns(columns: &[Expr], exprs: &[Expr]) -> Result<()> { + if !columns.iter().all(|c| matches!(c, Expr::Column(_))) { + return Err(DataFusionError::Internal( + "expected Expr::Column".to_owned(), + )); + } + let column_exprs = find_column_exprs(exprs); + if column_exprs.iter().any(|expr| !columns.contains(expr)) { + return Err(DataFusionError::Plan( + "mixing aggregate and non-aggregate columns is not supported".to_owned(), + )); + } + Ok(()) +} + +pub(super) fn make_tag_key_column_meta( + fields: &[Field], + tag_set: &[&str], + is_projected: &[bool], +) -> Vec<TagKeyColumn> { + /// There is always a [INFLUXQL_MEASUREMENT_COLUMN_NAME] and `time` column projected in the LogicalPlan, + /// therefore the start index is 2 for determining the offsets of the + /// tag key columns in the column projection list. + const START_INDEX: usize = 1; + + // Create a map of tag key columns to their respective index in the projection + let index_map = fields + .iter() + .enumerate() + .filter_map(|(index, f)| match &f.expr { + IQLExpr::VarRef { + name, + data_type: Some(VarRefDataType::Tag) | None, + } => Some((name.deref().as_str(), index + START_INDEX)), + _ => None, + }) + .collect::<HashMap<_, _>>(); + + // tag_set was previously sorted, so tag_key_columns will be in the correct order + tag_set + .iter() + .zip(is_projected) + .map(|(tag_key, is_projected)| TagKeyColumn { + tag_key: (*tag_key).to_owned(), + column_index: *index_map.get(*tag_key).unwrap() as _, + is_projected: *is_projected, + }) + .collect() +} + +/// Create a plan that sorts the input plan. +/// +/// The ordering of the results is as follows: +/// +/// iox::measurement, [group by tag 0, .., group by tag n], time, [projection tag 0, .., projection tag n] +/// +/// ## NOTE +/// +/// Sort expressions referring to tag keys are always specified in lexicographically ascending order. +pub(super) fn plan_with_sort( + plan: LogicalPlan, + select: &SelectStatement, + group_by_tag_set: &[&str], + projection_tag_set: &[&str], +) -> Result<LogicalPlan> { + // If there are multiple measurements, we need to sort by the measurement column + // NOTE: Ideally DataFusion would maintain the order of the UNION ALL, which would eliminate + // the need to sort by measurement. + // See: https://github.com/influxdata/influxdb_iox/issues/7062 + let mut series_sort = if matches!(plan, LogicalPlan::Union(_)) { + vec![Expr::sort( + INFLUXQL_MEASUREMENT_COLUMN_NAME.as_expr(), + true, + false, + )] + } else { + vec![] + }; + + /// Map the fields to DataFusion [`Expr::Sort`] expressions, excluding those columns that + /// are [`DataType::Null`]'s, as sorting these column types is not supported and unnecessary. + fn map_to_expr<'a>( + schema: &'a DFSchemaRef, + fields: &'a [&str], + ) -> impl Iterator<Item = Expr> + 'a { + fields + .iter() + .filter(|f| { + if let Ok(df) = schema.field_with_unqualified_name(f) { + *df.data_type() != DataType::Null + } else { + false + } + }) + .map(|f| Expr::sort(f.as_expr(), true, false)) + } + + let schema = plan.schema(); + + if !group_by_tag_set.is_empty() { + // Adding `LIMIT` or `OFFSET` with a `GROUP BY tag, ...` clause is not supported + // + // See: https://github.com/influxdata/influxdb_iox/issues/6920 + if select.offset.is_some() || select.limit.is_some() { + return Err(DataFusionError::NotImplemented( + "GROUP BY combined with LIMIT or OFFSET clause".to_owned(), + )); + } + + series_sort.extend(map_to_expr(schema, group_by_tag_set)); + }; + + series_sort.push(Expr::sort( + "time".as_expr(), + match select.order_by { + // Default behaviour is to sort by time in ascending order if there is no ORDER BY + None | Some(OrderByClause::Ascending) => true, + Some(OrderByClause::Descending) => false, + }, + false, + )); + + series_sort.extend(map_to_expr(schema, projection_tag_set)); + + LogicalPlanBuilder::from(plan).sort(series_sort)?.build() +} diff --git a/iox_query_influxql/src/plan/planner_time_range_expression.rs b/iox_query_influxql/src/plan/planner_time_range_expression.rs index 573dbfb385..0ad1a70301 100644 --- a/iox_query_influxql/src/plan/planner_time_range_expression.rs +++ b/iox_query_influxql/src/plan/planner_time_range_expression.rs @@ -1,5 +1,6 @@ use crate::plan::timestamp::parse_timestamp; use crate::plan::util::binary_operator_to_df_operator; +use arrow::temporal_conversions::MILLISECONDS_IN_DAY; use datafusion::common::{DataFusionError, Result, ScalarValue}; use datafusion::logical_expr::{binary_expr, lit, now, BinaryExpr, Expr as DFExpr, Operator}; use influxdb_influxql_parser::expression::BinaryOperator; @@ -70,6 +71,48 @@ pub(in crate::plan) fn time_range_to_df_expr(expr: &Expr, tz: Option<chrono_tz:: }) } +/// Simplifies `expr` to an InfluxQL duration and returns a DataFusion interval. +/// +/// Returns an error if `expr` is not a duration expression. +/// +/// ## NOTE +/// +/// The returned interval is limited to a precision of milliseconds, +/// due to [issue #7204][] +/// +/// [issue #7204]: https://github.com/influxdata/influxdb_iox/issues/7204 +pub(super) fn expr_to_df_interval_dt(expr: &Expr) -> ExprResult { + let v = duration_expr_to_nanoseconds(expr)?; + if v % 1_000_000 != 0 { + Err(DataFusionError::NotImplemented("interval limited to a precision of milliseconds. See https://github.com/influxdata/influxdb_iox/issues/7204".to_owned())) + } else { + let v = v / 1_000_000; + let days = v / MILLISECONDS_IN_DAY; + // keep the sign on `days` and remove it from `millis` + let millis = (v - days * MILLISECONDS_IN_DAY).abs(); + + // It is not possible for an InfluxQL duration to overflow an IntervalDayTime. + // An InfluxQL duration encodes a number of nanoseconds into a 64-bit signed integer, + // which is a maximum of 15,250.2845 days. An IntervalDayTime can encode days + // as a signed 32-bit number. + Ok(lit(ScalarValue::new_interval_dt( + days as i32, + millis as i32, + ))) + } +} + +/// Reduces an InfluxQL duration `expr` to a nanosecond interval. +pub(super) fn duration_expr_to_nanoseconds(expr: &Expr) -> Result<i64> { + let df_expr = reduce_expr(expr, None)?; + match df_expr { + DFExpr::Literal(ScalarValue::IntervalMonthDayNano(Some(v))) => Ok(v as i64), + DFExpr::Literal(ScalarValue::Float64(Some(v))) => Ok(v as i64), + DFExpr::Literal(ScalarValue::Int64(Some(v))) => Ok(v), + _ => Err(DataFusionError::Plan("invalid duration expression".into())), + } +} + fn map_expr_err(expr: &Expr) -> impl Fn(DataFusionError) -> DataFusionError + '_ { move |err| { DataFusionError::Plan(format!( @@ -393,6 +436,7 @@ fn parse_timestamp_df_expr(s: &str, tz: Option<chrono_tz::Tz>) -> ExprResult { #[cfg(test)] mod test { use super::*; + use assert_matches::assert_matches; use influxdb_influxql_parser::expression::ConditionalExpression; use test_helpers::assert_error; @@ -545,4 +589,33 @@ mod test { "TimestampNanosecond(1081505100123456789, None)" // 2004-04-09T10:05:00.123456789Z ); } + + #[test] + fn test_expr_to_df_interval_dt() { + fn parse(s: &str) -> ExprResult { + let expr = s + .parse::<ConditionalExpression>() + .unwrap() + .expr() + .unwrap() + .clone(); + expr_to_df_interval_dt(&expr) + } + + use ScalarValue::IntervalDayTime; + + assert_matches!(parse("10s").unwrap(), DFExpr::Literal(IntervalDayTime(v)) if IntervalDayTime(v) == ScalarValue::new_interval_dt(0, 10_000)); + assert_matches!(parse("10s + 1d").unwrap(), DFExpr::Literal(IntervalDayTime(v)) if IntervalDayTime(v) == ScalarValue::new_interval_dt(1, 10_000)); + assert_matches!(parse("5d10ms").unwrap(), DFExpr::Literal(IntervalDayTime(v)) if IntervalDayTime(v) == ScalarValue::new_interval_dt(5, 10)); + assert_matches!(parse("-2d10ms").unwrap(), DFExpr::Literal(IntervalDayTime(v)) if IntervalDayTime(v) == ScalarValue::new_interval_dt(-2, 10)); + + // Fallible + + use DataFusionError::NotImplemented; + + // Don't support a precision greater than milliseconds. + // + // See: https://github.com/influxdata/influxdb_iox/issues/7204 + assert_error!(parse("-2d10ns"), NotImplemented(ref s) if s == "interval limited to a precision of milliseconds. See https://github.com/influxdata/influxdb_iox/issues/7204"); + } } diff --git a/iox_query_influxql/src/plan/rewriter.rs b/iox_query_influxql/src/plan/rewriter.rs index a49ef51ba9..2ede32d22f 100644 --- a/iox_query_influxql/src/plan/rewriter.rs +++ b/iox_query_influxql/src/plan/rewriter.rs @@ -180,9 +180,11 @@ fn has_wildcards(stmt: &SelectStatement) -> (bool, bool) { /// Rewrite the projection list and GROUP BY of the specified `SELECT` statement. /// -/// Wildcards and regular expressions in the `SELECT` projection list and `GROUP BY` are expanded. -/// Any fields with no type specifier are rewritten with the appropriate type, if they exist in the -/// underlying schema. +/// The following transformations are performed: +/// +/// * Wildcards and regular expressions in the `SELECT` projection list and `GROUP BY` are expanded. +/// * Any fields with no type specifier are rewritten with the appropriate type, if they exist in the +/// underlying schema. /// /// Derived from [Go implementation](https://github.com/influxdata/influxql/blob/1ba470371ec093d57a726b143fe6ccbacf1b452b/ast.go#L1185). fn rewrite_field_list(s: &dyn SchemaProvider, stmt: &mut SelectStatement) -> Result<()> { diff --git a/iox_query_influxql/src/plan/test_utils.rs b/iox_query_influxql/src/plan/test_utils.rs index 45e22ac9d6..17fdf416ac 100644 --- a/iox_query_influxql/src/plan/test_utils.rs +++ b/iox_query_influxql/src/plan/test_utils.rs @@ -5,7 +5,7 @@ use crate::plan::SchemaProvider; use datafusion::common::{DataFusionError, Result as DataFusionResult}; use datafusion::datasource::empty::EmptyTable; use datafusion::datasource::provider_as_source; -use datafusion::logical_expr::TableSource; +use datafusion::logical_expr::{AggregateUDF, ScalarUDF, TableSource}; use influxdb_influxql_parser::parse_statements; use influxdb_influxql_parser::select::{Field, SelectStatement}; use influxdb_influxql_parser::statement::Statement; @@ -159,6 +159,14 @@ impl SchemaProvider for MockSchemaProvider { .ok_or_else(|| DataFusionError::Plan(format!("measurement does not exist: {name}"))) } + fn get_function_meta(&self, _name: &str) -> Option<Arc<ScalarUDF>> { + None + } + + fn get_aggregate_meta(&self, _name: &str) -> Option<Arc<AggregateUDF>> { + None + } + fn table_names(&self) -> Vec<&'_ str> { self.tables .keys() diff --git a/iox_query_influxql/src/plan/util_copy.rs b/iox_query_influxql/src/plan/util_copy.rs new file mode 100644 index 0000000000..94b8bfd2a1 --- /dev/null +++ b/iox_query_influxql/src/plan/util_copy.rs @@ -0,0 +1,337 @@ +// NOTE: This code is copied from DataFusion, as it is not public, +// so all warnings are disabled. +#![allow(warnings)] +#![allow(clippy::all)] +//! A collection of utility functions copied from DataFusion. +//! +//! If these APIs are stabilised and made public, they can be removed from IOx. +//! +//! NOTE +use datafusion::common::{DataFusionError, Result}; +use datafusion::logical_expr::{ + expr::{ + AggregateFunction, Between, BinaryExpr, Case, Cast, Expr, GetIndexedField, GroupingSet, + Like, Sort, TryCast, WindowFunction, + }, + utils::expr_as_column_expr, + LogicalPlan, +}; + +/// Rebuilds an `Expr` as a projection on top of a collection of `Expr`'s. +/// +/// For example, the expression `a + b < 1` would require, as input, the 2 +/// individual columns, `a` and `b`. But, if the base expressions already +/// contain the `a + b` result, then that may be used in lieu of the `a` and +/// `b` columns. +/// +/// This is useful in the context of a query like: +/// +/// SELECT a + b < 1 ... GROUP BY a + b +/// +/// where post-aggregation, `a + b` need not be a projection against the +/// individual columns `a` and `b`, but rather it is a projection against the +/// `a + b` found in the GROUP BY. +/// +/// Source: <https://github.com/apache/arrow-datafusion/blob/e6d71068474f3b2ef9ad5e9af85f56f0d0560a1b/datafusion/sql/src/utils.rs#L63> +pub(crate) fn rebase_expr(expr: &Expr, base_exprs: &[Expr], plan: &LogicalPlan) -> Result<Expr> { + clone_with_replacement(expr, &|nested_expr| { + if base_exprs.contains(nested_expr) { + Ok(Some(expr_as_column_expr(nested_expr, plan)?)) + } else { + Ok(None) + } + }) +} + +/// Returns a cloned `Expr`, but any of the `Expr`'s in the tree may be +/// replaced/customized by the replacement function. +/// +/// The replacement function is called repeatedly with `Expr`, starting with +/// the argument `expr`, then descending depth-first through its +/// descendants. The function chooses to replace or keep (clone) each `Expr`. +/// +/// The function's return type is `Result<Option<Expr>>>`, where: +/// +/// * `Ok(Some(replacement_expr))`: A replacement `Expr` is provided; it is +/// swapped in at the particular node in the tree. Any nested `Expr` are +/// not subject to cloning/replacement. +/// * `Ok(None)`: A replacement `Expr` is not provided. The `Expr` is +/// recreated, with all of its nested `Expr`'s subject to +/// cloning/replacement. +/// * `Err(err)`: Any error returned by the function is returned as-is by +/// `clone_with_replacement()`. +/// +/// Source: <https://github.com/apache/arrow-datafusion/blob/26e1b20ea/datafusion/sql/src/utils.rs#L153> +fn clone_with_replacement<F>(expr: &Expr, replacement_fn: &F) -> Result<Expr> +where + F: Fn(&Expr) -> Result<Option<Expr>>, +{ + let replacement_opt = replacement_fn(expr)?; + + match replacement_opt { + // If we were provided a replacement, use the replacement. Do not + // descend further. + Some(replacement) => Ok(replacement), + // No replacement was provided, clone the node and recursively call + // clone_with_replacement() on any nested expressions. + None => { + match expr { + Expr::AggregateFunction(AggregateFunction { + fun, + args, + distinct, + filter, + }) => Ok(Expr::AggregateFunction(AggregateFunction::new( + fun.clone(), + args.iter() + .map(|e| clone_with_replacement(e, replacement_fn)) + .collect::<Result<Vec<Expr>>>()?, + *distinct, + filter.clone(), + ))), + Expr::WindowFunction(WindowFunction { + fun, + args, + partition_by, + order_by, + window_frame, + }) => Ok(Expr::WindowFunction(WindowFunction::new( + fun.clone(), + args.iter() + .map(|e| clone_with_replacement(e, replacement_fn)) + .collect::<Result<Vec<_>>>()?, + partition_by + .iter() + .map(|e| clone_with_replacement(e, replacement_fn)) + .collect::<Result<Vec<_>>>()?, + order_by + .iter() + .map(|e| clone_with_replacement(e, replacement_fn)) + .collect::<Result<Vec<_>>>()?, + window_frame.clone(), + ))), + Expr::AggregateUDF { fun, args, filter } => Ok(Expr::AggregateUDF { + fun: fun.clone(), + args: args + .iter() + .map(|e| clone_with_replacement(e, replacement_fn)) + .collect::<Result<Vec<Expr>>>()?, + filter: filter.clone(), + }), + Expr::Alias(nested_expr, alias_name) => Ok(Expr::Alias( + Box::new(clone_with_replacement(nested_expr, replacement_fn)?), + alias_name.clone(), + )), + Expr::Between(Between { + expr, + negated, + low, + high, + }) => Ok(Expr::Between(Between::new( + Box::new(clone_with_replacement(expr, replacement_fn)?), + *negated, + Box::new(clone_with_replacement(low, replacement_fn)?), + Box::new(clone_with_replacement(high, replacement_fn)?), + ))), + Expr::InList { + expr: nested_expr, + list, + negated, + } => Ok(Expr::InList { + expr: Box::new(clone_with_replacement(nested_expr, replacement_fn)?), + list: list + .iter() + .map(|e| clone_with_replacement(e, replacement_fn)) + .collect::<Result<Vec<Expr>>>()?, + negated: *negated, + }), + Expr::BinaryExpr(BinaryExpr { left, right, op }) => { + Ok(Expr::BinaryExpr(BinaryExpr::new( + Box::new(clone_with_replacement(left, replacement_fn)?), + *op, + Box::new(clone_with_replacement(right, replacement_fn)?), + ))) + } + Expr::Like(Like { + negated, + expr, + pattern, + escape_char, + }) => Ok(Expr::Like(Like::new( + *negated, + Box::new(clone_with_replacement(expr, replacement_fn)?), + Box::new(clone_with_replacement(pattern, replacement_fn)?), + *escape_char, + ))), + Expr::ILike(Like { + negated, + expr, + pattern, + escape_char, + }) => Ok(Expr::ILike(Like::new( + *negated, + Box::new(clone_with_replacement(expr, replacement_fn)?), + Box::new(clone_with_replacement(pattern, replacement_fn)?), + *escape_char, + ))), + Expr::SimilarTo(Like { + negated, + expr, + pattern, + escape_char, + }) => Ok(Expr::SimilarTo(Like::new( + *negated, + Box::new(clone_with_replacement(expr, replacement_fn)?), + Box::new(clone_with_replacement(pattern, replacement_fn)?), + *escape_char, + ))), + Expr::Case(case) => Ok(Expr::Case(Case::new( + match &case.expr { + Some(case_expr) => { + Some(Box::new(clone_with_replacement(case_expr, replacement_fn)?)) + } + None => None, + }, + case.when_then_expr + .iter() + .map(|(a, b)| { + Ok(( + Box::new(clone_with_replacement(a, replacement_fn)?), + Box::new(clone_with_replacement(b, replacement_fn)?), + )) + }) + .collect::<Result<Vec<(_, _)>>>()?, + match &case.else_expr { + Some(else_expr) => { + Some(Box::new(clone_with_replacement(else_expr, replacement_fn)?)) + } + None => None, + }, + ))), + Expr::ScalarFunction { fun, args } => Ok(Expr::ScalarFunction { + fun: fun.clone(), + args: args + .iter() + .map(|e| clone_with_replacement(e, replacement_fn)) + .collect::<Result<Vec<Expr>>>()?, + }), + Expr::ScalarUDF { fun, args } => Ok(Expr::ScalarUDF { + fun: fun.clone(), + args: args + .iter() + .map(|arg| clone_with_replacement(arg, replacement_fn)) + .collect::<Result<Vec<Expr>>>()?, + }), + Expr::Negative(nested_expr) => Ok(Expr::Negative(Box::new( + clone_with_replacement(nested_expr, replacement_fn)?, + ))), + Expr::Not(nested_expr) => Ok(Expr::Not(Box::new(clone_with_replacement( + nested_expr, + replacement_fn, + )?))), + Expr::IsNotNull(nested_expr) => Ok(Expr::IsNotNull(Box::new( + clone_with_replacement(nested_expr, replacement_fn)?, + ))), + Expr::IsNull(nested_expr) => Ok(Expr::IsNull(Box::new(clone_with_replacement( + nested_expr, + replacement_fn, + )?))), + Expr::IsTrue(nested_expr) => Ok(Expr::IsTrue(Box::new(clone_with_replacement( + nested_expr, + replacement_fn, + )?))), + Expr::IsFalse(nested_expr) => Ok(Expr::IsFalse(Box::new(clone_with_replacement( + nested_expr, + replacement_fn, + )?))), + Expr::IsUnknown(nested_expr) => Ok(Expr::IsUnknown(Box::new( + clone_with_replacement(nested_expr, replacement_fn)?, + ))), + Expr::IsNotTrue(nested_expr) => Ok(Expr::IsNotTrue(Box::new( + clone_with_replacement(nested_expr, replacement_fn)?, + ))), + Expr::IsNotFalse(nested_expr) => Ok(Expr::IsNotFalse(Box::new( + clone_with_replacement(nested_expr, replacement_fn)?, + ))), + Expr::IsNotUnknown(nested_expr) => Ok(Expr::IsNotUnknown(Box::new( + clone_with_replacement(nested_expr, replacement_fn)?, + ))), + Expr::Cast(Cast { expr, data_type }) => Ok(Expr::Cast(Cast::new( + Box::new(clone_with_replacement(expr, replacement_fn)?), + data_type.clone(), + ))), + Expr::TryCast(TryCast { + expr: nested_expr, + data_type, + }) => Ok(Expr::TryCast(TryCast::new( + Box::new(clone_with_replacement(nested_expr, replacement_fn)?), + data_type.clone(), + ))), + Expr::Sort(Sort { + expr: nested_expr, + asc, + nulls_first, + }) => Ok(Expr::Sort(Sort::new( + Box::new(clone_with_replacement(nested_expr, replacement_fn)?), + *asc, + *nulls_first, + ))), + Expr::Column { .. } + | Expr::OuterReferenceColumn(_, _) + | Expr::Literal(_) + | Expr::ScalarVariable(_, _) + | Expr::Exists { .. } + | Expr::ScalarSubquery(_) => Ok(expr.clone()), + Expr::InSubquery { + expr: nested_expr, + subquery, + negated, + } => Ok(Expr::InSubquery { + expr: Box::new(clone_with_replacement(nested_expr, replacement_fn)?), + subquery: subquery.clone(), + negated: *negated, + }), + Expr::Wildcard => Ok(Expr::Wildcard), + Expr::QualifiedWildcard { .. } => Ok(expr.clone()), + Expr::GetIndexedField(GetIndexedField { key, expr }) => { + Ok(Expr::GetIndexedField(GetIndexedField::new( + Box::new(clone_with_replacement(expr.as_ref(), replacement_fn)?), + key.clone(), + ))) + } + Expr::GroupingSet(set) => match set { + GroupingSet::Rollup(exprs) => Ok(Expr::GroupingSet(GroupingSet::Rollup( + exprs + .iter() + .map(|e| clone_with_replacement(e, replacement_fn)) + .collect::<Result<Vec<Expr>>>()?, + ))), + GroupingSet::Cube(exprs) => Ok(Expr::GroupingSet(GroupingSet::Cube( + exprs + .iter() + .map(|e| clone_with_replacement(e, replacement_fn)) + .collect::<Result<Vec<Expr>>>()?, + ))), + GroupingSet::GroupingSets(lists_of_exprs) => { + let mut new_lists_of_exprs = vec![]; + for exprs in lists_of_exprs { + new_lists_of_exprs.push( + exprs + .iter() + .map(|e| clone_with_replacement(e, replacement_fn)) + .collect::<Result<Vec<Expr>>>()?, + ); + } + Ok(Expr::GroupingSet(GroupingSet::GroupingSets( + new_lists_of_exprs, + ))) + } + }, + Expr::Placeholder { id, data_type } => Ok(Expr::Placeholder { + id: id.clone(), + data_type: data_type.clone(), + }), + } + } + } +} diff --git a/schema/src/lib.rs b/schema/src/lib.rs index 6dd3fa284d..61bb8086fe 100644 --- a/schema/src/lib.rs +++ b/schema/src/lib.rs @@ -282,6 +282,12 @@ impl Schema { ) } + /// Return the InfluxDB data model type, if any, and underlying arrow + /// schema field for the column identified by `name`. + pub fn field_by_name(&self, name: &str) -> Option<(InfluxColumnType, &ArrowField)> { + self.find_index_of(name).map(|index| self.field(index)) + } + /// Find the index of the column with the given name, if any. pub fn find_index_of(&self, name: &str) -> Option<usize> { self.inner.index_of(name).ok() diff --git a/test_helpers_end_to_end/src/client.rs b/test_helpers_end_to_end/src/client.rs index 818f1795cb..eb844d321b 100644 --- a/test_helpers_end_to_end/src/client.rs +++ b/test_helpers_end_to_end/src/client.rs @@ -11,6 +11,7 @@ use influxdb_iox_client::{ }; use mutable_batch_lp::lines_to_batches; use mutable_batch_pb::encode::encode_write; +use std::fmt::Display; use tonic::IntoRequest; /// Writes the line protocol to the write_base/api/v2/write endpoint (typically on the router) @@ -129,11 +130,11 @@ pub async fn run_sql( /// /// Use [`try_run_influxql`] if you want to check the error manually. pub async fn run_influxql( - influxql: impl Into<String>, + influxql: impl Into<String> + Clone + Display, namespace: impl Into<String>, querier_connection: Connection, ) -> Vec<RecordBatch> { - try_run_influxql(influxql, namespace, querier_connection) + try_run_influxql(influxql.clone(), namespace, querier_connection) .await - .expect("Error executing influxql query") + .unwrap_or_else(|_| panic!("Error executing InfluxQL query: {influxql}")) } diff --git a/test_helpers_end_to_end/src/snapshot_comparison.rs b/test_helpers_end_to_end/src/snapshot_comparison.rs index 44a4bc3c92..83861d6d22 100644 --- a/test_helpers_end_to_end/src/snapshot_comparison.rs +++ b/test_helpers_end_to_end/src/snapshot_comparison.rs @@ -1,12 +1,14 @@ mod queries; -use crate::{run_influxql, run_sql, snapshot_comparison::queries::TestQueries, MiniCluster}; +use crate::{run_sql, snapshot_comparison::queries::TestQueries, try_run_influxql, MiniCluster}; +use arrow_flight::error::FlightError; use snafu::{OptionExt, ResultExt, Snafu}; use std::{ fmt::{Display, Formatter}, fs, path::{Path, PathBuf}, }; +use tonic::Code; use self::queries::Query; @@ -227,12 +229,21 @@ async fn run_query( .await } Language::InfluxQL => { - run_influxql( + match try_run_influxql( query_text, cluster.namespace(), cluster.querier().querier_grpc_connection(), ) .await + { + Ok(results) => results, + Err(influxdb_iox_client::flight::Error::ArrowFlightError(FlightError::Tonic( + status, + ))) if status.code() == Code::InvalidArgument => { + return Ok(vec![status.message().to_owned()]) + } + Err(err) => return Ok(vec![err.to_string()]), + } } };
b2e5ea22666f8e16187024b81ec04eca4f2e51b2
Fraser Savage
2023-05-03 11:50:14
Add test & docs for WriteOpEntryDecoder
Adds some documentation for the WriteOpEntryDecoder and a unit test that asserts it skips over non write entries and can continue to be consumed from.
null
refactor(wal): Add test & docs for WriteOpEntryDecoder Adds some documentation for the WriteOpEntryDecoder and a unit test that asserts it skips over non write entries and can continue to be consumed from.
diff --git a/Cargo.lock b/Cargo.lock index 9551cf4d6b..4365e64912 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6328,6 +6328,7 @@ dependencies = [ name = "wal" version = "0.1.0" dependencies = [ + "assert_matches", "async-trait", "byteorder", "bytes", diff --git a/wal/Cargo.toml b/wal/Cargo.toml index 048d49b972..de1d55c06f 100644 --- a/wal/Cargo.toml +++ b/wal/Cargo.toml @@ -29,6 +29,7 @@ tokio-util = "0.7" workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] # In alphabetical order +assert_matches = "1.5.0" dml = { path = "../dml" } mutable_batch_lp = { path = "../mutable_batch_lp" } test_helpers = { path = "../test_helpers" } diff --git a/wal/src/lib.rs b/wal/src/lib.rs index 4a46b34fbe..b60b85a475 100644 --- a/wal/src/lib.rs +++ b/wal/src/lib.rs @@ -562,23 +562,33 @@ impl std::fmt::Debug for ClosedSegmentFileReader { } } +/// An in-memory representation of a WAL write operation entry. #[derive(Debug)] pub struct WriteOpEntry { pub namespace: NamespaceId, pub table_batches: HashMap<i64, MutableBatch>, } +/// A decoder that reads from a closed segment file and parses write +/// operations from their on-disk format to an internal format. #[derive(Debug)] -pub struct WriteOpDecoder { +pub struct WriteOpEntryDecoder { reader: ClosedSegmentFileReader, } -impl WriteOpDecoder { +impl WriteOpEntryDecoder { + /// Creates a decoder which will use the closed segment file of `reader` to + /// decode write ops from their on-disk format. pub fn from_closed_segment(reader: ClosedSegmentFileReader) -> Self { Self { reader } } - pub fn next_write_entry_batch(&mut self) -> Result<Option<Vec<WriteOpEntry>>> { + /// Reads a collection of write op entries in the next WAL entry batch from the + /// underlying closed segment. A returned Ok(None) indicates that there are no + /// more entries to be decoded from the underlying segment. A zero-length vector + /// may be returned if there are no writes in a WAL entry batch, but does not + /// indicate the decoder is consumed. + pub fn next_write_op_entry_batch(&mut self) -> Result<Option<Vec<WriteOpEntry>>> { match self.reader.next_batch()? { Some(batch) => Ok(batch .into_iter() @@ -621,7 +631,7 @@ impl ClosedSegment { #[cfg(test)] mod tests { - use super::*; + use assert_matches::assert_matches; use data_types::{NamespaceId, SequenceNumber, TableId}; use dml::DmlWrite; use generated_types::influxdata::{ @@ -630,6 +640,10 @@ mod tests { }; use mutable_batch_lp::lines_to_batches; + use super::*; + + const TEST_NAMESPACE_ID: NamespaceId = NamespaceId::new(42); + #[tokio::test] async fn wal_write_and_read_ops() { let dir = test_helpers::tmp_dir().unwrap(); @@ -727,6 +741,64 @@ mod tests { ); } + #[tokio::test] + async fn decode_write_op_entries() { + let dir = test_helpers::tmp_dir().unwrap(); + let wal = Wal::new(&dir.path()).await.unwrap(); + + let w1 = test_data("m1,t=foo v=1i 1"); + let w2 = test_data("m1,t=foo v=2i 2"); + + let op1 = SequencedWalOp { + sequence_number: 0, + op: WalOp::Write(w1), + }; + let op2 = SequencedWalOp { + sequence_number: 1, + op: WalOp::Delete(test_delete()), + }; + let op3 = SequencedWalOp { + sequence_number: 1, + op: WalOp::Persist(test_persist()), + }; + // A second write entry coming after a delete and persist entry must still be yielded + let op4 = SequencedWalOp { + sequence_number: 2, + op: WalOp::Write(w2), + }; + + wal.write_op(op1.clone()); + wal.write_op(op2.clone()).changed().await.unwrap(); + wal.write_op(op3.clone()); + wal.write_op(op4.clone()).changed().await.unwrap(); + + let (closed, _) = wal.rotate().unwrap(); + + let mut decoder = WriteOpEntryDecoder::from_closed_segment( + wal.reader_for_segment(closed.id) + .expect("failed to open reader for closed WAL segment"), + ); + + let mut write_op_entries = vec![]; + while let Ok(Some(mut entry_batch)) = decoder.next_write_op_entry_batch() { + write_op_entries.append(&mut entry_batch); + } + // The decoder should find 2 entries, each containing a single table write + assert_eq!(write_op_entries.len(), 2); + assert_matches!(write_op_entries.get(0), Some(got_op1) => { + assert_eq!(got_op1.namespace, TEST_NAMESPACE_ID); + assert_eq!(got_op1.table_batches.len(), 1); + let mb = got_op1.table_batches.get(&0).expect("no mutable batch for table ID 0"); + assert_eq!(mb.column_names(), vec!["t", "v", "time"].into_iter().collect()); + }); + assert_matches!(write_op_entries.get(1), Some(got_op2) => { + assert_eq!(got_op2.namespace, TEST_NAMESPACE_ID); + assert_eq!(got_op2.table_batches.len(), 1); + let mb = got_op2.table_batches.get(&0).expect("no mutable batch for table ID 0"); + assert_eq!(mb.column_names(), vec!["t", "v", "time"].into_iter().collect()); + }); + } + fn test_data(lp: &str) -> DatabaseBatch { let batches = lines_to_batches(lp, 0).unwrap(); let batches = batches @@ -736,7 +808,7 @@ mod tests { .collect(); let write = DmlWrite::new( - NamespaceId::new(42), + TEST_NAMESPACE_ID, batches, "bananas".into(), Default::default(), @@ -747,7 +819,7 @@ mod tests { fn test_delete() -> DeletePayload { DeletePayload { - database_id: 42, + database_id: TEST_NAMESPACE_ID.get(), predicate: None, table_name: "bananas".into(), } @@ -755,7 +827,7 @@ mod tests { fn test_persist() -> PersistOp { PersistOp { - namespace_id: 42, + namespace_id: TEST_NAMESPACE_ID.get(), parquet_file_uuid: "b4N4N4Z".into(), partition_id: 43, table_id: 44, diff --git a/wal_inspect/src/lib.rs b/wal_inspect/src/lib.rs index 77fcdc3042..0b37839d80 100644 --- a/wal_inspect/src/lib.rs +++ b/wal_inspect/src/lib.rs @@ -127,12 +127,12 @@ mod tests { iox::wal::v1::sequenced_wal_op::Op, pbdata::v1::DatabaseBatch, }; use mutable_batch_lp::lines_to_batches; - use wal::{Error as WalError, SequencedWalOp, WriteOpDecoder}; + use wal::{Error as WalError, SequencedWalOp, WriteOpEntryDecoder}; use super::*; #[tokio::test] - async fn translate_good_wal() { + async fn translate_good_wal_segment_file() { let test_dir = test_helpers::tmp_dir().expect("failed to create test dir"); let wal = wal::Wal::new(test_dir.path()).await.unwrap(); @@ -163,7 +163,7 @@ mod tests { // Rotate the WAL and create the translator. let (closed, _) = wal.rotate().expect("failed to rotate WAL"); - let mut decoder = WriteOpDecoder::from_closed_segment( + let mut decoder = WriteOpEntryDecoder::from_closed_segment( wal.reader_for_segment(closed.id()) .expect("failed to open reader for closed segment"), ); @@ -172,7 +172,7 @@ mod tests { let mut decoded_entries = 0; let mut decoded_ops = 0; while let Some(new_entries) = decoder - .next_write_entry_batch() + .next_write_op_entry_batch() .expect("decoder error should not occur") { decoded_entries += 1; @@ -213,7 +213,7 @@ mod tests { } #[tokio::test] - async fn partial_translate_bad_wal() { + async fn partial_translate_bad_wal_segment_file() { let test_dir = test_helpers::tmp_dir().expect("failed to create test dir"); let wal = wal::Wal::new(test_dir.path()).await.unwrap(); @@ -269,7 +269,7 @@ mod tests { } // Create the translator and read as much as possible out of the bad segment file - let mut decoder = WriteOpDecoder::from_closed_segment( + let mut decoder = WriteOpEntryDecoder::from_closed_segment( wal.reader_for_segment(closed.id()) .expect("failed to open reader for closed segment"), ); @@ -278,7 +278,7 @@ mod tests { let mut decoded_entries = 0; let mut decoded_ops = 0; loop { - match decoder.next_write_entry_batch() { + match decoder.next_write_op_entry_batch() { // If the translator returns `None` indicating successful translation // then something is broken. Ok(v) => assert_matches!(v, Some(new_entries) => {
8afa2f473d225ef56d19580665cd6515cf638225
Phil Bracikowski
2023-06-16 08:16:11
adjust parsing tagsets: better error message and error on corner case (#7997)
* fix(lp parser): improves the error message when parsing tagsets A tagset is an optional list of (tag_key=tag_value) that start immediatelly after the measurement. The tagset and measurement are separated by a comma. If that comma is present, at least one tag=value pair must be present. This pr expresses that must relationship and provides a Tag Set Malformed erorr now if there's something wrong with the tag set. The input test case is added which is missing the tag value - I'd like to improve the errors to be more specific still, but one case at a time. * fixes #7772 * chore: add test case This commit adds a test case for a comma after the measurement but no tagset thereafter. This is also an error condition. Before this PR this was accepted as an empty tagset, but the spec and the canonical golang parser do not permit this.
null
fix(lp parser): adjust parsing tagsets: better error message and error on corner case (#7997) * fix(lp parser): improves the error message when parsing tagsets A tagset is an optional list of (tag_key=tag_value) that start immediatelly after the measurement. The tagset and measurement are separated by a comma. If that comma is present, at least one tag=value pair must be present. This pr expresses that must relationship and provides a Tag Set Malformed erorr now if there's something wrong with the tag set. The input test case is added which is missing the tag value - I'd like to improve the errors to be more specific still, but one case at a time. * fixes #7772 * chore: add test case This commit adds a test case for a comma after the measurement but no tagset thereafter. This is also an error condition. Before this PR this was accepted as an empty tagset, but the spec and the canonical golang parser do not permit this.
diff --git a/influxdb_line_protocol/src/lib.rs b/influxdb_line_protocol/src/lib.rs index 0042ca8a5a..09b1e65430 100644 --- a/influxdb_line_protocol/src/lib.rs +++ b/influxdb_line_protocol/src/lib.rs @@ -5,9 +5,9 @@ //! compatible with the [Go implementation], however, this //! implementation uses a [nom] combinator-based parser rather than //! attempting to port the imperative Go logic so there are likely -//! some small diferences. +//! some small differences. //! -//! 2. A [builder](crate::builder::LineProtocolBuilder) to contruct valid [InfluxDB Line Protocol] +//! 2. A [builder](crate::builder::LineProtocolBuilder) to construct valid [InfluxDB Line Protocol] //! //! # Example //! @@ -140,6 +140,9 @@ pub enum Error { ))] CannotParseEntireLine { trailing_content: String }, + #[snafu(display(r#"Tag Set Malformed"#))] + TagSetMalformed, + // TODO: Replace this with specific failures. #[snafu(display(r#"A generic parsing error occurred: {:?}"#, kind))] GenericParsingError { @@ -514,7 +517,7 @@ impl PartialEq<String> for EscapedStr<'_> { } } -/// Parses a new line-delimited string into an interator of +/// Parses a new line-delimited string into an iterator of /// [`ParsedLine`]. See the [crate-level documentation](self) for more /// information and examples. pub fn parse_lines(input: &str) -> impl Iterator<Item = Result<ParsedLine<'_>>> { @@ -631,9 +634,7 @@ fn parse_line(i: &str) -> IResult<&str, ParsedLine<'_>> { } fn series(i: &str) -> IResult<&str, Series<'_>> { - let tag_set = preceded(tag(","), tag_set); - let series = tuple((measurement, opt(tag_set))); - + let series = tuple((measurement, maybe_tagset)); let series_and_raw_input = parse_and_recognize(series); map( @@ -646,6 +647,28 @@ fn series(i: &str) -> IResult<&str, Series<'_>> { )(i) } +/// Tagsets are optional, but if a comma follows the measurement, then we must have at least one tag=value pair. +/// anything else is an error +fn maybe_tagset(i: &str) -> IResult<&str, Option<TagSet<'_>>, Error> { + match tag::<&str, &str, Error>(",")(i) { + Err(nom::Err::Error(_)) => Ok((i, None)), + Ok((remainder, _)) => { + match tag_set(remainder) { + Ok((i, ts)) => { + // reaching here, we must find a tagset, which is at least one tag=value pair. + if ts.is_empty() { + return Err(nom::Err::Error(Error::TagSetMalformed)); + } + Ok((i, Some(ts))) + } + Err(nom::Err::Error(_)) => TagSetMalformedSnafu.fail().map_err(nom::Err::Error), + Err(e) => Err(e), + } + } + Err(e) => Err(e), + } +} + fn measurement(i: &str) -> IResult<&str, Measurement<'_>, Error> { let normal_char = take_while1(|c| { !is_whitespace_boundary_char(c) && !is_null_char(c) && c != ',' && c != '\\' @@ -1261,6 +1284,22 @@ mod test { assert_eq!(vals.unwrap().len(), 0); } + // tests that an incomplete tag=value pair returns an error about a malformed tagset + #[test] + fn parse_tag_no_value() { + let input = "testmeasure,foo= bar=1i"; + let vals = parse(input); + assert!(matches!(vals, Err(Error::TagSetMalformed))); + } + + // tests that just a comma after the measurement is an error + #[test] + fn parse_no_tagset() { + let input = "testmeasure, bar=1i"; + let vals = parse(input); + assert!(matches!(vals, Err(Error::TagSetMalformed))); + } + #[test] fn parse_no_measurement() { let input = ",tag1=1,tag2=2 value=1 123";
a58ef38bb06b6fd544023a80ff0f84b4857963dd
Carol (Nichols || Goulding)
2023-03-22 16:15:13
Move target_level into PlanIR
Because this info needs to stay with the rest of the plan info
null
refactor: Move target_level into PlanIR Because this info needs to stay with the rest of the plan info
diff --git a/compactor2/src/components/df_planner/panic.rs b/compactor2/src/components/df_planner/panic.rs index 1983c5025a..e4c50a8547 100644 --- a/compactor2/src/components/df_planner/panic.rs +++ b/compactor2/src/components/df_planner/panic.rs @@ -95,6 +95,7 @@ impl ExecutionPlan for PanicPlan { #[cfg(test)] mod tests { + use data_types::CompactionLevel; use datafusion::{physical_plan::collect, prelude::SessionContext}; use crate::test_utils::PartitionInfoBuilder; @@ -112,7 +113,13 @@ mod tests { let planner = PanicDataFusionPlanner::new(); let partition = Arc::new(PartitionInfoBuilder::new().build()); let plan = planner - .plan(&PlanIR::Compact { files: vec![] }, partition) + .plan( + &PlanIR::Compact { + files: vec![], + target_level: CompactionLevel::Final, + }, + partition, + ) .await .unwrap(); diff --git a/compactor2/src/components/df_planner/planner_v1.rs b/compactor2/src/components/df_planner/planner_v1.rs index 642346357e..37755904a8 100644 --- a/compactor2/src/components/df_planner/planner_v1.rs +++ b/compactor2/src/components/df_planner/planner_v1.rs @@ -49,7 +49,7 @@ impl DataFusionPlanner for V1DataFusionPlanner { let ctx = self.exec.new_context(ExecutorType::Reorg); let plan = match ir { - PlanIR::Compact { files } => { + PlanIR::Compact { files, .. } => { let query_chunks = to_query_chunks(files, &partition, self.store.clone()); let merged_schema = QueryableParquetChunk::merge_schemas(&query_chunks); let sort_key = partition @@ -72,7 +72,9 @@ impl DataFusionPlanner for V1DataFusionPlanner { ) })? } - PlanIR::Split { files, split_times } => { + PlanIR::Split { + files, split_times, .. + } => { let query_chunks = to_query_chunks(files, &partition, self.store.clone()); let merged_schema = QueryableParquetChunk::merge_schemas(&query_chunks); let sort_key = partition diff --git a/compactor2/src/components/ir_planner/planner_v1.rs b/compactor2/src/components/ir_planner/planner_v1.rs index a4420d21d1..6ccb715633 100644 --- a/compactor2/src/components/ir_planner/planner_v1.rs +++ b/compactor2/src/components/ir_planner/planner_v1.rs @@ -122,7 +122,7 @@ impl IRPlanner for V1IRPlanner { &self, files: Vec<ParquetFile>, _partition: Arc<PartitionInfo>, - compaction_level: CompactionLevel, + target_level: CompactionLevel, ) -> PlanIR { // gather data // total file size is the sum of the file sizes of the files to compact @@ -150,18 +150,17 @@ impl IRPlanner for V1IRPlanner { let files = files .into_iter() .map(|file| { - let order = order( - file.compaction_level, - compaction_level, - file.max_l0_created_at, - ); + let order = order(file.compaction_level, target_level, file.max_l0_created_at); FileIR { file, order } }) .collect::<Vec<_>>(); // Build logical compact plan if total_size <= small_cutoff_bytes { - PlanIR::Compact { files } + PlanIR::Compact { + files, + target_level, + } } else { let split_times = if small_cutoff_bytes < total_size && total_size <= large_cutoff_bytes { @@ -182,10 +181,17 @@ impl IRPlanner for V1IRPlanner { if split_times.is_empty() || (split_times.len() == 1 && split_times[0] == max_time) { // The split times might not have actually split anything, so in this case, compact // everything into one file - PlanIR::Compact { files } + PlanIR::Compact { + files, + target_level, + } } else { // split compact query plan to split the result into multiple files - PlanIR::Split { files, split_times } + PlanIR::Split { + files, + split_times, + target_level, + } } } } @@ -196,19 +202,16 @@ impl IRPlanner for V1IRPlanner { file: ParquetFile, split_times: Vec<i64>, _partition: Arc<PartitionInfo>, - compaction_level: CompactionLevel, + target_level: CompactionLevel, ) -> PlanIR { - let order = order( - file.compaction_level, - compaction_level, - file.max_l0_created_at, - ); + let order = order(file.compaction_level, target_level, file.max_l0_created_at); let file = FileIR { file, order }; PlanIR::Split { files: vec![file], split_times, + target_level, } } } diff --git a/compactor2/src/driver.rs b/compactor2/src/driver.rs index 19020a1975..f92a27e4ec 100644 --- a/compactor2/src/driver.rs +++ b/compactor2/src/driver.rs @@ -326,14 +326,11 @@ async fn run_plans( }; let mut created_file_params = Vec::with_capacity(capacity); - let plans_and_target_levels = match split_or_compact { + let plans = match split_or_compact { FilesToSplitOrCompact::Compact(_files) => { - vec![( - components.ir_planner.compact_plan( - files_inpad, - Arc::clone(partition_info), - target_level, - ), + vec![components.ir_planner.compact_plan( + files_inpad, + Arc::clone(partition_info), target_level, )] } @@ -345,13 +342,10 @@ async fn run_plans( // target level of a split file is the same as its level let target_level = file_to_split.file.compaction_level; - ( - components.ir_planner.split_plan( - file_inpad, - file_to_split.split_times.clone(), - Arc::clone(partition_info), - target_level, - ), + components.ir_planner.split_plan( + file_inpad, + file_to_split.split_times.clone(), + Arc::clone(partition_info), target_level, ) }) @@ -360,13 +354,12 @@ async fn run_plans( FilesToSplitOrCompact::None => vec![], // Nothing to do }; - for (plan_ir, target_level) in plans_and_target_levels { + for plan_ir in plans { created_file_params.extend( execute_plan( plan_ir, partition_info, components, - target_level, Arc::clone(&job_semaphore), ) .await?, @@ -380,7 +373,6 @@ async fn execute_plan( plan_ir: PlanIR, partition_info: &Arc<PartitionInfo>, components: &Arc<Components>, - target_level: CompactionLevel, job_semaphore: Arc<InstrumentedAsyncSemaphore>, ) -> Result<Vec<ParquetFileParams>, DynError> { let create = { @@ -407,7 +399,7 @@ async fn execute_plan( let job = components.parquet_files_sink.stream_into_file_sink( streams, Arc::clone(partition_info), - target_level, + plan_ir.target_level(), &plan_ir, ); diff --git a/compactor2/src/plan_ir.rs b/compactor2/src/plan_ir.rs index a43e717082..08206d603b 100644 --- a/compactor2/src/plan_ir.rs +++ b/compactor2/src/plan_ir.rs @@ -1,6 +1,6 @@ use std::fmt::Display; -use data_types::{ChunkOrder, ParquetFile}; +use data_types::{ChunkOrder, CompactionLevel, ParquetFile}; #[derive(Debug)] /// Describes a specific compactor plan to create. @@ -9,6 +9,8 @@ pub enum PlanIR { Compact { /// The files to be compacted files: Vec<FileIR>, + /// The level the compacted file will be + target_level: CompactionLevel, }, /// Compact `files` into multiple files, for each entry in /// `split_times` @@ -23,10 +25,20 @@ pub enum PlanIR { /// The distribution of times is described on /// [`iox_query::frontend::reorg::ReorgPlanner::split_plan`] split_times: Vec<i64>, + /// The level the split files will be + target_level: CompactionLevel, }, } impl PlanIR { + /// Return the target level for this plan + pub fn target_level(&self) -> CompactionLevel { + match *self { + Self::Compact { target_level, .. } => target_level, + Self::Split { target_level, .. } => target_level, + } + } + /// Return the number of output files produced pub fn n_output_files(&self) -> usize { match self { @@ -38,7 +50,7 @@ impl PlanIR { /// return the input files that will be compacted together pub fn input_files(&self) -> &[FileIR] { match self { - Self::Compact { files } => files, + Self::Compact { files, .. } => files, Self::Split { files, .. } => files, } } diff --git a/compactor2_test_utils/src/simulator.rs b/compactor2_test_utils/src/simulator.rs index 39efb08dbc..e7f14dd1c5 100644 --- a/compactor2_test_utils/src/simulator.rs +++ b/compactor2_test_utils/src/simulator.rs @@ -95,10 +95,11 @@ impl ParquetFilesSink for ParquetFileSimulator { info!("Simulating {plan_ir}"); let (plan_type, split_times): (String, &[i64]) = match plan_ir { // pretend it is an empty split - PlanIR::Compact { files: _ } => (plan_ir.to_string(), &[]), + PlanIR::Compact { files: _, .. } => (plan_ir.to_string(), &[]), PlanIR::Split { files: _, split_times, + .. } => { let plan_type = format!("{plan_ir}(split_times={split_times:?})"); (plan_type, split_times)
79d24fa350a3d83ec7ee311e5de04858ad330a6d
Dom Dwyer
2022-10-21 15:59:59
granular per-partition locking
This commit pushes the existing table-level mutex down to the partition. This allows the ingester to gather data from multiple partitions within a single table in parallel, and reduces contention between ingest/query workloads.
null
perf(ingester): granular per-partition locking This commit pushes the existing table-level mutex down to the partition. This allows the ingester to gather data from multiple partitions within a single table in parallel, and reduces contention between ingest/query workloads.
diff --git a/ingester/src/data.rs b/ingester/src/data.rs index 931fb9d175..a5fcbce18c 100644 --- a/ingester/src/data.rs +++ b/ingester/src/data.rs @@ -265,45 +265,46 @@ impl Persister for IngesterData { let table_data = namespace.table_id(table_id).unwrap_or_else(|| { panic!("table {table_id} in namespace {namespace_id} not in shard {shard_id} state") }); + // Assert various properties of the table to ensure the index is + // correct, out of an abundance of caution. + assert_eq!(table_data.shard_id(), shard_id); + assert_eq!(table_data.namespace_id(), namespace_id); + assert_eq!(table_data.table_id(), table_id); + let table_name = table_data.table_name().clone(); + + let partition = table_data.get_partition(partition_id).unwrap_or_else(|| { + panic!( + "partition {partition_id} in table {table_id} in namespace {namespace_id} not in shard {shard_id} state" + ) + }); - let table_name; let partition_key; let sort_key; let last_persisted_sequence_number; let batch; let batch_sequence_number_range; { - let mut guard = table_data.write().await; - // Assert various properties of the table to ensure the index is + // Acquire a write lock over the partition and extract all the + // necessary data. + let mut guard = partition.lock(); + + // Assert various properties of the partition to ensure the index is // correct, out of an abundance of caution. + assert_eq!(guard.partition_id(), partition_id); assert_eq!(guard.shard_id(), shard_id); assert_eq!(guard.namespace_id(), namespace_id); assert_eq!(guard.table_id(), table_id); - table_name = guard.table_name().clone(); - - let partition = guard.get_partition(partition_id).unwrap_or_else(|| { - panic!( - "partition {partition_id} in table {table_id} in namespace {namespace_id} not in shard {shard_id} state" - ) - }); - - // Assert various properties of the partition to ensure the index is - // correct, out of an abundance of caution. - assert_eq!(partition.partition_id(), partition_id); - assert_eq!(partition.shard_id(), shard_id); - assert_eq!(partition.namespace_id(), namespace_id); - assert_eq!(partition.table_id(), table_id); - assert_eq!(*partition.table_name(), table_name); + assert_eq!(*guard.table_name(), table_name); - partition_key = partition.partition_key().clone(); - sort_key = partition.sort_key().clone(); - last_persisted_sequence_number = partition.max_persisted_sequence_number(); + partition_key = guard.partition_key().clone(); + sort_key = guard.sort_key().clone(); + last_persisted_sequence_number = guard.max_persisted_sequence_number(); // The sequence number MUST be read without releasing the write lock // to ensure a consistent snapshot of batch contents and batch // sequence number range. - batch = partition.mark_persisting(); - batch_sequence_number_range = partition.sequence_number_range(); + batch = guard.mark_persisting(); + batch_sequence_number_range = guard.sequence_number_range(); }; // From this point on, the code MUST be infallible. @@ -423,12 +424,7 @@ impl Persister for IngesterData { .expect("retry forever"); // Update the sort key in the partition cache. - table_data - .write() - .await - .get_partition(partition_id) - .unwrap() - .update_sort_key(Some(new_sort_key.clone())); + partition.lock().update_sort_key(Some(new_sort_key.clone())); debug!( %object_store_id, @@ -545,18 +541,15 @@ impl Persister for IngesterData { // This SHOULD cause the data to be dropped, but there MAY be ongoing // queries that currently hold a reference to the data. In either case, // the persisted data will be dropped "shortly". - table_data - .write() - .await - .get_partition(partition_id) - .unwrap() + partition + .lock() .mark_persisted(iox_metadata.max_sequence_number); // BUG: ongoing queries retain references to the persisting data, // preventing it from being dropped, but memory is released back to // lifecycle memory tracker when this fn returns. // - // https://github.com/influxdata/influxdb_iox/issues/5872 + // https://github.com/influxdata/influxdb_iox/issues/5805 // info!( %object_store_id, @@ -813,11 +806,12 @@ mod tests { let n = sd.namespace(&"foo".into()).unwrap(); let mem_table = n.table_data(&"mem".into()).unwrap(); assert!(n.table_data(&"mem".into()).is_some()); - let mem_table = mem_table.write().await; let p = mem_table .get_partition_by_key(&"1970-01-01".into()) - .unwrap(); - (mem_table.table_id(), p.partition_id()) + .unwrap() + .lock() + .partition_id(); + (mem_table.table_id(), p) }; data.persist(shard1.id, namespace.id, table_id, partition_id) @@ -968,13 +962,12 @@ mod tests { let mem_table = n.table_data(&"mem".into()).unwrap(); assert!(n.table_data(&"cpu".into()).is_some()); - let mem_table = mem_table.write().await; table_id = mem_table.table_id(); let p = mem_table .get_partition_by_key(&"1970-01-01".into()) .unwrap(); - partition_id = p.partition_id(); + partition_id = p.lock().partition_id(); } { // verify the partition doesn't have a sort key before any data has been persisted @@ -1044,13 +1037,12 @@ mod tests { .unwrap() .table_id(table_id) .unwrap() - .write() - .await .get_partition(partition_id) .unwrap() + .lock() .sort_key() - .get() - .await; + .clone(); + let cached_sort_key = cached_sort_key.get().await; assert_eq!( cached_sort_key, Some(SortKey::from_columns(partition.sort_key)) @@ -1101,10 +1093,9 @@ mod tests { // verify that the parquet_max_sequence_number got updated assert_eq!( mem_table - .write() - .await .get_partition(partition_id) .unwrap() + .lock() .max_persisted_sequence_number(), Some(SequenceNumber::new(2)) ); @@ -1406,15 +1397,16 @@ mod tests { .await .unwrap(); { - let table_data = data.table_data(&"mem".into()).unwrap(); - let mut table = table_data.write().await; + let table = data.table_data(&"mem".into()).unwrap(); assert!(table - .partition_iter_mut() - .all(|p| p.get_query_data().is_none())); + .partitions() + .into_iter() + .all(|p| p.lock().get_query_data().is_none())); assert_eq!( table .get_partition_by_key(&"1970-01-01".into()) .unwrap() + .lock() .max_persisted_sequence_number(), Some(SequenceNumber::new(1)) ); @@ -1426,11 +1418,10 @@ mod tests { .await .unwrap(); - let table_data = data.table_data(&"mem".into()).unwrap(); - let table = table_data.read().await; + let table = data.table_data(&"mem".into()).unwrap(); let partition = table.get_partition_by_key(&"1970-01-01".into()).unwrap(); assert_eq!( - partition.sequence_number_range().inclusive_min(), + partition.lock().sequence_number_range().inclusive_min(), Some(SequenceNumber::new(2)) ); diff --git a/ingester/src/data/namespace.rs b/ingester/src/data/namespace.rs index a028ee1b22..464e2e087d 100644 --- a/ingester/src/data/namespace.rs +++ b/ingester/src/data/namespace.rs @@ -23,26 +23,26 @@ use crate::{data::DmlApplyAction, lifecycle::LifecycleHandle}; #[derive(Debug, Default)] struct DoubleRef { // TODO(4880): this can be removed when IDs are sent over the wire. - by_name: HashMap<TableName, Arc<tokio::sync::RwLock<TableData>>>, - by_id: HashMap<TableId, Arc<tokio::sync::RwLock<TableData>>>, + by_name: HashMap<TableName, Arc<TableData>>, + by_id: HashMap<TableId, Arc<TableData>>, } impl DoubleRef { - fn insert(&mut self, t: TableData) -> Arc<tokio::sync::RwLock<TableData>> { + fn insert(&mut self, t: TableData) -> Arc<TableData> { let name = t.table_name().clone(); let id = t.table_id(); - let t = Arc::new(tokio::sync::RwLock::new(t)); + let t = Arc::new(t); self.by_name.insert(name, Arc::clone(&t)); self.by_id.insert(id, Arc::clone(&t)); t } - fn by_name(&self, name: &TableName) -> Option<Arc<tokio::sync::RwLock<TableData>>> { + fn by_name(&self, name: &TableName) -> Option<Arc<TableData>> { self.by_name.get(name).map(Arc::clone) } - fn by_id(&self, id: TableId) -> Option<Arc<tokio::sync::RwLock<TableData>>> { + fn by_id(&self, id: TableId) -> Option<Arc<TableData>> { self.by_id.get(&id).map(Arc::clone) } } @@ -206,22 +206,19 @@ impl NamespaceData { None => self.insert_table(&t, catalog).await?, }; - { - // lock scope - let mut table_data = table_data.write().await; - let action = table_data - .buffer_table_write( - sequence_number, - b, - partition_key.clone(), - lifecycle_handle, - ) - .await?; - if let DmlApplyAction::Applied(should_pause) = action { - pause_writes = pause_writes || should_pause; - all_skipped = false; - } + let action = table_data + .buffer_table_write( + sequence_number, + b, + partition_key.clone(), + lifecycle_handle, + ) + .await?; + if let DmlApplyAction::Applied(should_pause) = action { + pause_writes = pause_writes || should_pause; + all_skipped = false; } + #[cfg(test)] self.test_triggers.on_write().await; } @@ -251,19 +248,13 @@ impl NamespaceData { } /// Return the specified [`TableData`] if it exists. - pub(crate) fn table_data( - &self, - table_name: &TableName, - ) -> Option<Arc<tokio::sync::RwLock<TableData>>> { + pub(crate) fn table_data(&self, table_name: &TableName) -> Option<Arc<TableData>> { let t = self.tables.read(); t.by_name(table_name) } /// Return the table data by ID. - pub(crate) fn table_id( - &self, - table_id: TableId, - ) -> Option<Arc<tokio::sync::RwLock<TableData>>> { + pub(crate) fn table_id(&self, table_id: TableId) -> Option<Arc<TableData>> { let t = self.tables.read(); t.by_id(table_id) } @@ -273,7 +264,7 @@ impl NamespaceData { &self, table_name: &TableName, catalog: &Arc<dyn Catalog>, - ) -> Result<Arc<tokio::sync::RwLock<TableData>>, super::Error> { + ) -> Result<Arc<TableData>, super::Error> { let mut repos = catalog.repositories().await; let table_id = repos @@ -317,7 +308,7 @@ impl NamespaceData { .actively_buffering(*self.buffering_sequence_number.read()); for table_data in tables { - progress = progress.combine(table_data.read().await.progress()) + progress = progress.combine(table_data.progress()) } progress } diff --git a/ingester/src/data/table.rs b/ingester/src/data/table.rs index 3e8556b128..b5be919f92 100644 --- a/ingester/src/data/table.rs +++ b/ingester/src/data/table.rs @@ -1,47 +1,48 @@ //! Table level data buffer structures. -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; use data_types::{NamespaceId, PartitionId, PartitionKey, SequenceNumber, ShardId, TableId}; use mutable_batch::MutableBatch; +use parking_lot::{Mutex, RwLock}; use write_summary::ShardProgress; use super::{ partition::{resolver::PartitionProvider, BufferError, PartitionData}, DmlApplyAction, }; -use crate::lifecycle::LifecycleHandle; +use crate::{arcmap::ArcMap, lifecycle::LifecycleHandle}; /// A double-referenced map where [`PartitionData`] can be looked up by /// [`PartitionKey`], or ID. #[derive(Debug, Default)] struct DoubleRef { // TODO(4880): this can be removed when IDs are sent over the wire. - by_key: HashMap<PartitionKey, PartitionData>, - by_id: HashMap<PartitionId, PartitionKey>, + by_key: ArcMap<PartitionKey, Mutex<PartitionData>>, + by_id: ArcMap<PartitionId, Mutex<PartitionData>>, } impl DoubleRef { - fn insert(&mut self, ns: PartitionData) { + /// Try to insert the provided [`PartitionData`]. + /// + /// Note that the partition MAY have been inserted concurrently, and the + /// returned [`PartitionData`] MAY be a different instance for the same + /// underlying partition. + fn try_insert(&mut self, ns: PartitionData) -> Arc<Mutex<PartitionData>> { let id = ns.partition_id(); let key = ns.partition_key().clone(); - assert!(self.by_key.insert(key.clone(), ns).is_none()); - assert!(self.by_id.insert(id, key).is_none()); + let ns = Arc::new(Mutex::new(ns)); + self.by_key.get_or_insert_with(&key, || Arc::clone(&ns)); + self.by_id.get_or_insert_with(&id, || ns) } - #[cfg(test)] - fn by_key(&self, key: &PartitionKey) -> Option<&PartitionData> { + fn by_key(&self, key: &PartitionKey) -> Option<Arc<Mutex<PartitionData>>> { self.by_key.get(key) } - fn by_key_mut(&mut self, key: &PartitionKey) -> Option<&mut PartitionData> { - self.by_key.get_mut(key) - } - - fn by_id_mut(&mut self, id: PartitionId) -> Option<&mut PartitionData> { - let key = self.by_id.get(&id)?.clone(); - self.by_key_mut(&key) + fn by_id(&self, id: PartitionId) -> Option<Arc<Mutex<PartitionData>>> { + self.by_id.get(&id) } } @@ -102,7 +103,7 @@ pub(crate) struct TableData { partition_provider: Arc<dyn PartitionProvider>, // Map of partition key to its data - partition_data: DoubleRef, + partition_data: RwLock<DoubleRef>, } impl TableData { @@ -136,13 +137,14 @@ impl TableData { // buffers the table write and returns true if the lifecycle manager indicates that // ingest should be paused. pub(super) async fn buffer_table_write( - &mut self, + &self, sequence_number: SequenceNumber, batch: MutableBatch, partition_key: PartitionKey, lifecycle_handle: &dyn LifecycleHandle, ) -> Result<DmlApplyAction, super::Error> { - let partition_data = match self.partition_data.by_key.get_mut(&partition_key) { + let p = self.partition_data.read().by_key(&partition_key); + let partition_data = match p { Some(p) => p, None => { let p = self @@ -156,20 +158,25 @@ impl TableData { ) .await; // Add the double-referenced partition to the map. - self.partition_data.insert(p); - self.partition_data.by_key_mut(&partition_key).unwrap() + // + // This MAY return a different instance than `p` if another + // thread has already initialised the partition. + self.partition_data.write().try_insert(p) } }; let size = batch.size(); let rows = batch.rows(); - match partition_data.buffer_write(batch, sequence_number) { - Ok(_) => { /* continue below */ } - Err(BufferError::SkipPersisted) => return Ok(DmlApplyAction::Skipped), - Err(BufferError::BufferError(e)) => { - return Err(super::Error::BufferWrite { source: e }) + let partition_id = { + let mut p = partition_data.lock(); + match p.buffer_write(batch, sequence_number) { + Ok(_) => p.partition_id(), + Err(BufferError::SkipPersisted) => return Ok(DmlApplyAction::Skipped), + Err(BufferError::BufferError(e)) => { + return Err(super::Error::BufferWrite { source: e }) + } } - } + }; // Record the write as having been buffered. // @@ -177,7 +184,7 @@ impl TableData { // op may fail which would lead to a write being recorded, but not // applied. let should_pause = lifecycle_handle.log_write( - partition_data.partition_id(), + partition_id, self.shard_id, self.namespace_id, self.table_id, @@ -195,19 +202,17 @@ impl TableData { /// /// The order of [`PartitionData`] in the iterator is arbitrary and should /// not be relied upon. - pub(crate) fn partition_iter_mut( - &mut self, - ) -> impl Iterator<Item = &mut PartitionData> + ExactSizeIterator { - self.partition_data.by_key.values_mut() + pub(crate) fn partitions(&self) -> Vec<Arc<Mutex<PartitionData>>> { + self.partition_data.read().by_key.values() } /// Return the [`PartitionData`] for the specified ID. #[allow(unused)] pub(crate) fn get_partition( - &mut self, + &self, partition_id: PartitionId, - ) -> Option<&mut PartitionData> { - self.partition_data.by_id_mut(partition_id) + ) -> Option<Arc<Mutex<PartitionData>>> { + self.partition_data.read().by_id(partition_id) } /// Return the [`PartitionData`] for the specified partition key. @@ -215,17 +220,19 @@ impl TableData { pub(crate) fn get_partition_by_key( &self, partition_key: &PartitionKey, - ) -> Option<&PartitionData> { - self.partition_data.by_key(partition_key) + ) -> Option<Arc<Mutex<PartitionData>>> { + self.partition_data.read().by_key(partition_key) } /// Return progress from this Table pub(super) fn progress(&self) -> ShardProgress { self.partition_data + .read() .by_key .values() - .fold(Default::default(), |progress, partition_data| { - progress.combine(partition_data.progress()) + .into_iter() + .fold(Default::default(), |progress, p| { + progress.combine(p.lock().progress()) }) } @@ -303,7 +310,7 @@ mod tests { ), )); - let mut table = TableData::new( + let table = TableData::new( table_id, TABLE_NAME.into(), shard_id, @@ -317,8 +324,12 @@ mod tests { .unwrap(); // Assert the table does not contain the test partition - assert!(table.partition_data.by_key(&PARTITION_KEY.into()).is_none()); - assert!(table.partition_data.by_id_mut(PARTITION_ID).is_none()); + assert!(table + .partition_data + .read() + .by_key(&PARTITION_KEY.into()) + .is_none()); + assert!(table.partition_data.read().by_id(PARTITION_ID).is_none()); // Write some test data let action = table @@ -333,8 +344,12 @@ mod tests { assert_matches!(action, DmlApplyAction::Applied(false)); // Referencing the partition should succeed - assert!(table.partition_data.by_key(&PARTITION_KEY.into()).is_some()); - assert!(table.partition_data.by_id_mut(PARTITION_ID).is_some()); + assert!(table + .partition_data + .read() + .by_key(&PARTITION_KEY.into()) + .is_some()); + assert!(table.partition_data.read().by_id(PARTITION_ID).is_some()); } #[tokio::test] @@ -362,7 +377,7 @@ mod tests { ), )); - let mut table = TableData::new( + let table = TableData::new( table_id, TABLE_NAME.into(), shard_id, @@ -380,7 +395,11 @@ mod tests { let handle = MockLifecycleHandle::default(); // Assert the table does not contain the test partition - assert!(table.partition_data.by_key(&PARTITION_KEY.into()).is_none()); + assert!(table + .partition_data + .read() + .by_key(&PARTITION_KEY.into()) + .is_none()); // Write some test data let action = table @@ -395,7 +414,11 @@ mod tests { assert_matches!(action, DmlApplyAction::Applied(false)); // Referencing the partition should succeed - assert!(table.partition_data.by_key(&PARTITION_KEY.into()).is_some()); + assert!(table + .partition_data + .read() + .by_key(&PARTITION_KEY.into()) + .is_some()); // And the lifecycle handle was called with the expected values assert_eq!( diff --git a/ingester/src/querier_handler.rs b/ingester/src/querier_handler.rs index 9492a64acc..7b5a3bd5dc 100644 --- a/ingester/src/querier_handler.rs +++ b/ingester/src/querier_handler.rs @@ -311,10 +311,11 @@ pub async fn prepare_data_to_querier( // acquire locks and read table data in parallel let unpersisted_partitions: Vec<_> = futures::stream::iter(table_refs) .map(|table_data| async move { - let mut table_data = table_data.write().await; table_data - .partition_iter_mut() + .partitions() + .into_iter() .map(|p| { + let mut p = p.lock(); ( p.partition_id(), p.get_query_data(),
90b90712971b59f4392f991fc728c063a6b3baa1
Michael Gattozzi
2024-12-16 11:24:35
Change references of Edge into OSS (#25661)
This changes the code to reference InfluxDB 3 OSS rather than Edge which had been it's original name when we first started the project. With this we now have the code reflect what we are actually calling it. On top of this the long help text has been changed to give advice about how to actually run the code now with the bare minimum set of flags needed now as `influxdb serve` is no longer a viable command on it's own. Closes #25649
null
fix: Change references of Edge into OSS (#25661) This changes the code to reference InfluxDB 3 OSS rather than Edge which had been it's original name when we first started the project. With this we now have the code reflect what we are actually calling it. On top of this the long help text has been changed to give advice about how to actually run the code now with the bare minimum set of flags needed now as `influxdb serve` is no longer a viable command on it's own. Closes #25649
diff --git a/Cargo.toml b/Cargo.toml index d41a2929be..6ef82b45fe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,7 +33,7 @@ exclude = [ [workspace.package] version = "0.1.0" -authors = ["influxdata Edge Developers"] +authors = ["InfluxData OSS Developers"] edition = "2021" license = "MIT OR Apache-2.0" diff --git a/influxdb3/src/commands/serve.rs b/influxdb3/src/commands/serve.rs index f296028b78..0d709accc3 100644 --- a/influxdb3/src/commands/serve.rs +++ b/influxdb3/src/commands/serve.rs @@ -1,4 +1,4 @@ -//! Entrypoint for InfluxDB 3.0 Edge Server +//! Entrypoint for InfluxDB 3.0 OSS Server use anyhow::{bail, Context}; use clap_blocks::{ @@ -363,7 +363,7 @@ pub async fn command(config: Config) -> Result<()> { uuid = %PROCESS_UUID.as_ref() as &str, num_cpus, %build_malloc_conf, - "InfluxDB3 Edge server starting", + "InfluxDB3 OSS server starting", ); let metrics = setup_metric_registry(); diff --git a/influxdb3/src/main.rs b/influxdb3/src/main.rs index d08f19280a..97d5f3663c 100644 --- a/influxdb3/src/main.rs +++ b/influxdb3/src/main.rs @@ -51,21 +51,21 @@ clap::Arg::new("help") .action(clap::ArgAction::Help) .global(true) ), -about = "InfluxDB 3.0 Edge server and command line tools", -long_about = r#"InfluxDB 3.0 Edge server and command line tools +about = "InfluxDB 3.0 OSS server and command line tools", +long_about = r#"InfluxDB 3.0 OSS server and command line tools Examples: - # Run the InfluxDB 3.0 Edge server - influxdb3 serve + # Run the InfluxDB 3.0 OSS server + influxdb3 serve --object-store file --data-dir ~/.influxdb3 --host_id my_host_name # Display all commands influxdb3 --help - # Run the InfluxDB 3.0 Edge server in all-in-one mode with extra verbose logging - influxdb3 serve -v + # Run the InfluxDB 3.0 OSS server with extra verbose logging + influxdb3 serve -v --object-store file --data-dir ~/.influxdb3 --host_id my_host_name - # Run InfluxDB 3.0 Edge with full debug logging specified with LOG_FILTER - LOG_FILTER=debug influxdb3 serve + # Run InfluxDB 3.0 OSS with full debug logging specified with LOG_FILTER + LOG_FILTER=debug influxdb3 serve --object-store file --data-dir ~/.influxdb3 --host_id my_host_name "# )] struct Config { diff --git a/influxdb3_catalog/src/catalog.rs b/influxdb3_catalog/src/catalog.rs index 11505e838a..73026f7751 100644 --- a/influxdb3_catalog/src/catalog.rs +++ b/influxdb3_catalog/src/catalog.rs @@ -155,11 +155,11 @@ impl Serialize for Catalog { } impl Catalog { - /// Limit for the number of Databases that InfluxDB Edge can have + /// Limit for the number of Databases that InfluxDB 3.0 OSS can have pub(crate) const NUM_DBS_LIMIT: usize = 5; - /// Limit for the number of columns per table that InfluxDB Edge can have + /// Limit for the number of columns per table that InfluxDB 3.0 OSS can have pub(crate) const NUM_COLUMNS_PER_TABLE_LIMIT: usize = 500; - /// Limit for the number of tables across all DBs that InfluxDB Edge can have + /// Limit for the number of tables across all DBs that InfluxDB 3.0 OSS can have pub(crate) const NUM_TABLES_LIMIT: usize = 2000; pub fn new(host_id: Arc<str>, instance_id: Arc<str>) -> Self { diff --git a/influxdb3_server/src/lib.rs b/influxdb3_server/src/lib.rs index 48abbbad7c..8f8dc498bb 100644 --- a/influxdb3_server/src/lib.rs +++ b/influxdb3_server/src/lib.rs @@ -1,4 +1,4 @@ -//! InfluxDB 3.0 Edge server implementation +//! InfluxDB 3.0 OSS server implementation //! //! The server is responsible for handling the HTTP API #![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)] diff --git a/influxdb3_server/src/query_executor.rs b/influxdb3_server/src/query_executor.rs index b75c7579ab..615517f16f 100644 --- a/influxdb3_server/src/query_executor.rs +++ b/influxdb3_server/src/query_executor.rs @@ -436,7 +436,7 @@ impl QueryNamespace for Database { query_params: StatementParams, ) -> QueryCompletedToken<StateReceived> { let trace_id = span_ctx.map(|ctx| ctx.trace_id); - let namespace_name: Arc<str> = Arc::from("influxdb3 edge"); + let namespace_name: Arc<str> = Arc::from("influxdb3 oss"); self.query_log.push( NamespaceId::new(0), namespace_name,
46bbee5423aedf1813cc1017c2aa8c6e2c82d2ee
Dom Dwyer
2022-10-14 14:45:48
reduce default column limit
Reduces the default number of columns allowed per-table, from 1,000 to 200.
null
refactor: reduce default column limit Reduces the default number of columns allowed per-table, from 1,000 to 200.
diff --git a/iox_catalog/migrations/20221014122742_lower-default-per-table-column-limit.sql b/iox_catalog/migrations/20221014122742_lower-default-per-table-column-limit.sql new file mode 100644 index 0000000000..69ed514488 --- /dev/null +++ b/iox_catalog/migrations/20221014122742_lower-default-per-table-column-limit.sql @@ -0,0 +1,7 @@ +-- Lower the defualt per-table column limit. +-- +-- https://github.com/influxdata/influxdb_iox/issues/5858 +ALTER TABLE + namespace ALTER max_columns_per_table +SET + DEFAULT 200;
896a03fdbcbea8c62aaa8a34c94fd8b52ea8f8fa
Marco Neumann
2022-11-29 10:00:13
update `rskafka` (#6258)
Useful because it updates `zstd` to 0.12. With the upcoming `parquet` update, we can than drop `zstd` 0.11.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
chore: update `rskafka` (#6258) Useful because it updates `zstd` to 0.12. With the upcoming `parquet` update, we can than drop `zstd` 0.11. Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/Cargo.lock b/Cargo.lock index 0c5621024e..61557a96c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4541,7 +4541,7 @@ dependencies = [ [[package]] name = "rskafka" version = "0.3.0" -source = "git+https://github.com/influxdata/rskafka.git?rev=8678dfe049de05415929ffec7c1be8921bb057f7#8678dfe049de05415929ffec7c1be8921bb057f7" +source = "git+https://github.com/influxdata/rskafka.git?rev=abb2a28cff5ce39d186e814a0c5012267b9690a4#abb2a28cff5ce39d186e814a0c5012267b9690a4" dependencies = [ "async-socks5", "async-trait", @@ -4556,7 +4556,7 @@ dependencies = [ "thiserror", "tokio", "tracing", - "zstd 0.11.2+zstd.1.5.2", + "zstd 0.12.0+zstd.1.5.2", ] [[package]] @@ -6367,8 +6367,6 @@ dependencies = [ "uuid", "winapi", "windows-sys", - "zstd 0.11.2+zstd.1.5.2", - "zstd-safe 5.0.2+zstd.1.5.2", "zstd-sys", ] diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index e08575a8db..fe710253dc 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -82,8 +82,6 @@ tracing-log = { version = "0.1", features = ["log-tracer", "std", "trace-logger" tracing-subscriber = { version = "0.3", features = ["alloc", "ansi", "env-filter", "fmt", "json", "matchers", "nu-ansi-term", "once_cell", "parking_lot", "regex", "registry", "serde", "serde_json", "sharded-slab", "smallvec", "std", "thread_local", "tracing", "tracing-log", "tracing-serde"] } url = { version = "2" } uuid = { version = "1", features = ["getrandom", "rng", "std", "v4"] } -zstd = { version = "0.11", features = ["arrays", "legacy", "zdict_builder"] } -zstd-safe = { version = "5", default-features = false, features = ["arrays", "legacy", "std", "zdict_builder"] } zstd-sys = { version = "2", default-features = false, features = ["legacy", "std", "zdict_builder"] } [build-dependencies] diff --git a/write_buffer/Cargo.toml b/write_buffer/Cargo.toml index 8b1913595b..0d1d6268c3 100644 --- a/write_buffer/Cargo.toml +++ b/write_buffer/Cargo.toml @@ -24,7 +24,7 @@ observability_deps = { path = "../observability_deps" } parking_lot = "0.12" pin-project = "1.0" prost = "0.11" -rskafka = { git = "https://github.com/influxdata/rskafka.git", rev="8678dfe049de05415929ffec7c1be8921bb057f7", default-features = false, features = ["compression-zstd", "transport-socks5"] } +rskafka = { git = "https://github.com/influxdata/rskafka.git", rev="abb2a28cff5ce39d186e814a0c5012267b9690a4", default-features = false, features = ["compression-zstd", "transport-socks5"] } schema = { path = "../schema" } tokio = { version = "1.22", features = ["fs", "macros", "parking_lot", "rt", "sync", "time"] } tokio-util = "0.7.4"
22fe629f549e720c733459cfa2dbdd10b89a11a0
Nga Tran
2023-02-27 14:09:37
rename files and function to remove tartget level (#7073)
* refactor: rename files and function to remove tartget level * chore: update a comment ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
refactor: rename files and function to remove tartget level (#7073) * refactor: rename files and function to remove tartget level * chore: update a comment --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/compactor2/src/components/files_split/mod.rs b/compactor2/src/components/files_split/mod.rs index 819154ff64..1344354703 100644 --- a/compactor2/src/components/files_split/mod.rs +++ b/compactor2/src/components/files_split/mod.rs @@ -2,9 +2,9 @@ use std::fmt::{Debug, Display}; use data_types::{CompactionLevel, ParquetFile}; -pub mod target_level_non_overlap_split; -pub mod target_level_target_level_split; -pub mod target_level_upgrade_split; +pub mod non_overlap_split; +pub mod target_level_split; +pub mod upgrade_split; pub trait FilesSplit: Debug + Display + Send + Sync { /// Split provided files into 2 groups of files: diff --git a/compactor2/src/components/files_split/target_level_non_overlap_split.rs b/compactor2/src/components/files_split/non_overlap_split.rs similarity index 95% rename from compactor2/src/components/files_split/target_level_non_overlap_split.rs rename to compactor2/src/components/files_split/non_overlap_split.rs index 218fe34858..720e2730e7 100644 --- a/compactor2/src/components/files_split/target_level_non_overlap_split.rs +++ b/compactor2/src/components/files_split/non_overlap_split.rs @@ -11,21 +11,21 @@ use super::FilesSplit; /// Split files into `[overlapping_files]` and `[non_overlapping_files]` /// To have better and efficient compaction performance, eligible non-overlapped files /// should not be compacted. -pub struct TargetLevelNonOverlapSplit {} +pub struct NonOverlapSplit {} -impl TargetLevelNonOverlapSplit { +impl NonOverlapSplit { pub fn new() -> Self { Self {} } } -impl Display for TargetLevelNonOverlapSplit { +impl Display for NonOverlapSplit { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Non-overlapping split for TargetLevel version") } } -impl FilesSplit for TargetLevelNonOverlapSplit { +impl FilesSplit for NonOverlapSplit { /// Return (`[overlapping_files]`, `[non_overlapping_files]`) of given files /// such that after combining all `overlapping_files` into a new file, the new file will /// have no overlap with any file in `non_overlapping_files`. @@ -139,7 +139,7 @@ mod tests { #[test] fn test_display() { assert_eq!( - TargetLevelNonOverlapSplit::new().to_string(), + NonOverlapSplit::new().to_string(), "Non-overlapping split for TargetLevel version" ); } @@ -148,7 +148,7 @@ mod tests { #[should_panic] fn test_wrong_target_level() { let files = create_overlapped_files(); - let split = TargetLevelNonOverlapSplit::new(); + let split = NonOverlapSplit::new(); split.apply(files, CompactionLevel::Initial); } @@ -158,7 +158,7 @@ mod tests { )] fn test_unexpected_compaction_level_2() { let files = create_overlapped_files(); - let split = TargetLevelNonOverlapSplit::new(); + let split = NonOverlapSplit::new(); // There are L2 files and will panic split.apply(files, CompactionLevel::FileNonOverlapped); } @@ -169,7 +169,7 @@ mod tests { )] fn test_unexpected_compaction_level_0() { let files = create_overlapped_files(); - let split = TargetLevelNonOverlapSplit::new(); + let split = NonOverlapSplit::new(); // There are L0 files and will panic split.apply(files, CompactionLevel::Final); } @@ -177,7 +177,7 @@ mod tests { #[test] fn test_apply_empty_files() { let files = vec![]; - let split = TargetLevelNonOverlapSplit::new(); + let split = NonOverlapSplit::new(); let (overlap, non_overlap) = split.apply(files, CompactionLevel::FileNonOverlapped); assert_eq!(overlap.len(), 0); @@ -199,7 +199,7 @@ mod tests { "### ); - let split = TargetLevelNonOverlapSplit::new(); + let split = NonOverlapSplit::new(); // Lower level is empty -> all files will be in non_overlapping_files let (overlap, non_overlap) = split.apply(files.clone(), CompactionLevel::FileNonOverlapped); @@ -231,7 +231,7 @@ mod tests { "### ); - let split = TargetLevelNonOverlapSplit::new(); + let split = NonOverlapSplit::new(); let (overlap, non_overlap) = split.apply(files, CompactionLevel::FileNonOverlapped); insta::assert_yaml_snapshot!( format_files_split("overlap", &overlap, "non_overlap", &non_overlap), @@ -274,7 +274,7 @@ mod tests { "### ); - let split = TargetLevelNonOverlapSplit::new(); + let split = NonOverlapSplit::new(); let (overlap, non_overlap) = split.apply(files, CompactionLevel::Final); insta::assert_yaml_snapshot!( format_files_split("overlap", &overlap, "non_overlap", &non_overlap), @@ -319,7 +319,7 @@ mod tests { "### ); - let split = TargetLevelNonOverlapSplit::new(); + let split = NonOverlapSplit::new(); let (overlap, non_overlap) = split.apply(files, CompactionLevel::FileNonOverlapped); insta::assert_yaml_snapshot!( format_files_split("overlap", &overlap, "non_overlap", &non_overlap), diff --git a/compactor2/src/components/files_split/target_level_target_level_split.rs b/compactor2/src/components/files_split/target_level_split.rs similarity index 95% rename from compactor2/src/components/files_split/target_level_target_level_split.rs rename to compactor2/src/components/files_split/target_level_split.rs index 6f9ff3b640..633712bc87 100644 --- a/compactor2/src/components/files_split/target_level_target_level_split.rs +++ b/compactor2/src/components/files_split/target_level_split.rs @@ -6,21 +6,21 @@ use super::FilesSplit; /// Split given files into 2 groups of files: `[<= target_level]` and `[> target_level]` #[derive(Debug)] -pub struct TargetLevelTargetLevelSplit {} +pub struct TargetLevelSplit {} -impl TargetLevelTargetLevelSplit { +impl TargetLevelSplit { pub fn new() -> Self { Self {} } } -impl Display for TargetLevelTargetLevelSplit { +impl Display for TargetLevelSplit { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Target level split for TargetLevel version") } } -impl FilesSplit for TargetLevelTargetLevelSplit { +impl FilesSplit for TargetLevelSplit { fn apply( &self, files: Vec<data_types::ParquetFile>, @@ -45,7 +45,7 @@ mod tests { #[test] fn test_display() { assert_eq!( - TargetLevelTargetLevelSplit::new().to_string(), + TargetLevelSplit::new().to_string(), "Target level split for TargetLevel version" ); } @@ -53,7 +53,7 @@ mod tests { #[test] fn test_apply_empty_files() { let files = vec![]; - let split = TargetLevelTargetLevelSplit::new(); + let split = TargetLevelSplit::new(); let (lower, higher) = split.apply(files, CompactionLevel::FileNonOverlapped); assert_eq!(lower.len(), 0); @@ -75,7 +75,7 @@ mod tests { "### ); - let split = TargetLevelTargetLevelSplit::new(); + let split = TargetLevelSplit::new(); let (lower, higher) = split.apply(files.clone(), CompactionLevel::Initial); assert_eq!(lower.len(), 3); assert_eq!(higher.len(), 0); @@ -104,7 +104,7 @@ mod tests { "### ); - let split = TargetLevelTargetLevelSplit::new(); + let split = TargetLevelSplit::new(); let (lower, higher) = split.apply(files.clone(), CompactionLevel::Initial); assert_eq!(lower.len(), 0); assert_eq!(higher.len(), 3); @@ -132,7 +132,7 @@ mod tests { "### ); - let split = TargetLevelTargetLevelSplit::new(); + let split = TargetLevelSplit::new(); let (lower, higher) = split.apply(files.clone(), CompactionLevel::Initial); assert_eq!(lower.len(), 0); assert_eq!(higher.len(), 2); @@ -169,7 +169,7 @@ mod tests { "### ); - let split = TargetLevelTargetLevelSplit::new(); + let split = TargetLevelSplit::new(); let (lower, higher) = split.apply(files, CompactionLevel::Initial); insta::assert_yaml_snapshot!( @@ -228,7 +228,7 @@ mod tests { "### ); - let split = TargetLevelTargetLevelSplit::new(); + let split = TargetLevelSplit::new(); let (lower, higher) = split.apply(files, CompactionLevel::FileNonOverlapped); insta::assert_yaml_snapshot!( @@ -287,7 +287,7 @@ mod tests { "### ); - let split = TargetLevelTargetLevelSplit::new(); + let split = TargetLevelSplit::new(); let (lower, higher) = split.apply(files, CompactionLevel::Final); // verify number of files (nothing in higher) diff --git a/compactor2/src/components/files_split/target_level_upgrade_split.rs b/compactor2/src/components/files_split/upgrade_split.rs similarity index 96% rename from compactor2/src/components/files_split/target_level_upgrade_split.rs rename to compactor2/src/components/files_split/upgrade_split.rs index 6a3ea2bb59..5777e6d6d0 100644 --- a/compactor2/src/components/files_split/target_level_upgrade_split.rs +++ b/compactor2/src/components/files_split/upgrade_split.rs @@ -9,12 +9,12 @@ use crate::file_group::{overlaps_in_time, split_by_level, FilesTimeRange}; /// Split files into `[files_to_compact]` and `[files_to_upgrade]` /// To have better and efficient compaction performance, eligible upgradable files /// should not be compacted but only need to update its compaction_level to the target_level -pub struct TargetLevelUpgradeSplit { +pub struct UpgradeSplit { // Maximum desired file size (try and avoid compacting files above this size) max_desired_file_size_bytes: u64, } -impl TargetLevelUpgradeSplit { +impl UpgradeSplit { pub fn new(size: u64) -> Self { Self { max_desired_file_size_bytes: size, @@ -22,7 +22,7 @@ impl TargetLevelUpgradeSplit { } } -impl Display for TargetLevelUpgradeSplit { +impl Display for UpgradeSplit { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, @@ -32,7 +32,7 @@ impl Display for TargetLevelUpgradeSplit { } } -impl FilesSplit for TargetLevelUpgradeSplit { +impl FilesSplit for UpgradeSplit { /// Return (`[files_to_compact]`, `[files_to_upgrade]`) of the given files /// so that `files_to_upgrade` does not overlap with any files in previous level /// @@ -156,7 +156,7 @@ mod tests { #[test] fn test_display() { assert_eq!( - TargetLevelUpgradeSplit::new(MAX_SIZE).to_string(), + UpgradeSplit::new(MAX_SIZE).to_string(), "Upgrade split for TargetLevel version - Size: 100" ); } @@ -164,7 +164,7 @@ mod tests { #[test] #[should_panic] fn test_wrong_target_level() { - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (_files_to_compact, _files_to_upgrade) = split.apply(vec![], CompactionLevel::Initial); } @@ -174,7 +174,7 @@ mod tests { )] fn test_unexpected_compaction_level_2() { let files = create_overlapped_files(); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); // There are L2 files and will panic split.apply(files, CompactionLevel::FileNonOverlapped); } @@ -185,14 +185,14 @@ mod tests { )] fn test_unexpected_compaction_level_0() { let files = create_overlapped_files(); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); // There are L0 files and will panic split.apply(files, CompactionLevel::Final); } #[test] fn test_apply_empty_files() { - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(vec![], CompactionLevel::FileNonOverlapped); assert_eq!((files_to_compact, files_to_upgrade), (vec![], vec![])); @@ -216,7 +216,7 @@ mod tests { "### ); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::FileNonOverlapped); @@ -250,7 +250,7 @@ mod tests { "### ); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::FileNonOverlapped); @@ -286,7 +286,7 @@ mod tests { - "L0.3[800,900] |-----L0.3------| " "### ); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::FileNonOverlapped); @@ -319,7 +319,7 @@ mod tests { - "L0.3[800,900] |-----L0.3------| " "### ); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::FileNonOverlapped); @@ -352,7 +352,7 @@ mod tests { - "L1.11[250,350] |-----L1.11-----| " "### ); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::FileNonOverlapped); @@ -374,7 +374,7 @@ mod tests { #[test] fn test_apply_one_level_large_l1() { let files = create_l1_files((MAX_SIZE + 1) as i64); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::Final); // All files are large and eligible for upgrade @@ -415,7 +415,7 @@ mod tests { "### ); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::Final); // Some files are large and eligible for upgrade @@ -455,7 +455,7 @@ mod tests { "### ); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::FileNonOverlapped); @@ -497,7 +497,7 @@ mod tests { "### ); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::FileNonOverlapped); @@ -524,7 +524,7 @@ mod tests { #[test] fn test_apply_all_small_target_l2() { let files = create_overlapped_l1_l2_files((MAX_SIZE - 1) as i64); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::Final); // All files are small --> nothing to upgrade @@ -563,7 +563,7 @@ mod tests { "### ); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::Final); // All files are large --> L1.2 and L1.3 are eligible for upgrade @@ -606,7 +606,7 @@ mod tests { "### ); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::Final); insta::assert_yaml_snapshot!( @@ -647,7 +647,7 @@ mod tests { "### ); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::Final); insta::assert_yaml_snapshot!( format_files_split("files_to_compact", &files_to_compact, "files_to_upgrade", &files_to_upgrade), @@ -687,7 +687,7 @@ mod tests { "### ); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::FileNonOverlapped); @@ -731,7 +731,7 @@ mod tests { "### ); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::FileNonOverlapped); @@ -779,7 +779,7 @@ mod tests { "### ); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::FileNonOverlapped); @@ -828,7 +828,7 @@ mod tests { "### ); - let split = TargetLevelUpgradeSplit::new(MAX_SIZE); + let split = UpgradeSplit::new(MAX_SIZE); let (files_to_compact, files_to_upgrade) = split.apply(files, CompactionLevel::FileNonOverlapped); diff --git a/compactor2/src/components/hardcoded.rs b/compactor2/src/components/hardcoded.rs index 32424fcb9e..19be5858f9 100644 --- a/compactor2/src/components/hardcoded.rs +++ b/compactor2/src/components/hardcoded.rs @@ -34,9 +34,8 @@ use super::{ }, file_filter::level_range::LevelRangeFileFilter, files_split::{ - target_level_non_overlap_split::TargetLevelNonOverlapSplit, - target_level_target_level_split::TargetLevelTargetLevelSplit, - target_level_upgrade_split::TargetLevelUpgradeSplit, + non_overlap_split::NonOverlapSplit, target_level_split::TargetLevelSplit, + upgrade_split::UpgradeSplit, }, id_only_partition_filter::{ and::AndIdOnlyPartitionFilter, shard::ShardPartitionFilter, IdOnlyPartitionFilter, @@ -343,8 +342,8 @@ fn make_partition_filters(config: &Config) -> Vec<Arc<dyn PartitionFilter>> { fn make_file_classifier(config: &Config) -> Arc<dyn FileClassifier> { Arc::new(SplitBasedFileClassifier::new( - TargetLevelTargetLevelSplit::new(), - TargetLevelNonOverlapSplit::new(), - TargetLevelUpgradeSplit::new(config.max_desired_file_size_bytes), + TargetLevelSplit::new(), + NonOverlapSplit::new(), + UpgradeSplit::new(config.max_desired_file_size_bytes), )) } diff --git a/compactor2/src/driver.rs b/compactor2/src/driver.rs index c64da1819b..59a5dfdf1b 100644 --- a/compactor2/src/driver.rs +++ b/compactor2/src/driver.rs @@ -116,23 +116,18 @@ async fn compact_partition( /// /// The high level flow is: /// -/// . Mutiple rounds, each round has 1 branch -/// . Each branch will compact files lowest level (aka initial level) into its next level (aka target level): -/// - hot: (L0s & L1s) to L1s if there are L0s -/// - cold: (L1s & L2s) to L2s if no L0s +/// . Mutiple rounds, each round process mutltiple branches. Each branch inlcudes at most 200 files +/// . Each branch will compact files lowest level (aka start-level) into its next level (aka target-level): +/// - Many L0s into fewer and larger L0s. Start-level = target-level = 0 +/// - Many L1s into fewer and larger L1s. Start-level = target-level = 1 +/// - (L0s & L1s) to L1s if there are L0s. Start-level = 0, target-level = 1 +/// - (L1s & L2s) to L2s if no L0s. Start-level = 1, target-level = 2 /// . Each branch does find non-overlaps and upgragde files to avoid unecessary recompacting. /// The actually split files: -/// 1. files_higher_level: do not compact these files because they are already higher than target level -/// . Value: nothing for hot and L2s for cold -/// 2. files_non_overlap: do not compact these target-level files because they are not overlapped -/// with the initial-level files -/// . Value: non-overlapped L1s for hot and non-overlapped L2s for cold -/// . Definition of overlaps is defined in the split non-overlapped files function -/// 3. files_upgrade: upgrade this initial-level files to target level because they are not overlap with +/// 1. files_to _keep: do not compact these files because they are already higher than target level +/// 2. files_to_upgrade: upgrade this initial-level files to target level because they are not overlap with /// any target-level and initial-level files and large enough (> desired max size) -/// . value: non-overlapped L0s for hot and non-overlapped L1s for cold -/// 4. files_compact: the rest of the files that must be compacted -/// . Value: (L0s & L1s) for hot and (L1s & L2s) for cold +/// 3. files_to_compact: the rest of the files that must be compacted /// /// Example: 4 files: two L0s, two L1s and one L2 /// Input: @@ -141,8 +136,7 @@ async fn compact_partition( /// |----L2.1-----| /// /// - Round 1: There are L0s, let compact L0s with L1s. But let split them first: -/// . files_higher_level: L2.1 -/// . files_non_overlap: L1.1 +/// . files_higher_keep: L2.1 (higher leelve than targetlevel) and L1.1 (not overlapped wot any L0s) /// . files_upgrade: L0.2 /// . files_compact: L0.1, L1.2 /// Output: 4 files
b9024582b0957620e8225c9e722457841edfbe7d
Stuart Carnie
2023-04-28 09:35:56
Ensure InfluxQL internal errors have a distinct message
Closes #7606
null
fix: Ensure InfluxQL internal errors have a distinct message Closes #7606
diff --git a/iox_query_influxql/src/plan/error.rs b/iox_query_influxql/src/plan/error.rs index c6797ce5c9..6b12535ec9 100644 --- a/iox_query_influxql/src/plan/error.rs +++ b/iox_query_influxql/src/plan/error.rs @@ -24,7 +24,11 @@ pub(crate) mod map { #[derive(Debug, Error)] enum PlannerError { /// An unexpected error that represents a bug in IOx. - #[error("internal: {0}")] + /// + /// The message is prefixed with `InfluxQL internal error: `, + /// which may be used by clients to identify internal InfluxQL + /// errors. + #[error("InfluxQL internal error: {0}")] Internal(String), } @@ -42,4 +46,17 @@ pub(crate) mod map { pub(crate) fn not_implemented(feature: impl Into<String>) -> DataFusionError { DataFusionError::NotImplemented(feature.into()) } + + #[cfg(test)] + mod test { + use crate::plan::error::map::PlannerError; + + #[test] + fn test_planner_error_display() { + // The InfluxQL internal error: + assert!(PlannerError::Internal("****".to_owned()) + .to_string() + .starts_with("InfluxQL internal error: ")) + } + } } diff --git a/iox_query_influxql/src/plan/rewriter.rs b/iox_query_influxql/src/plan/rewriter.rs index 08a05f9bc0..da8f0f7f7c 100644 --- a/iox_query_influxql/src/plan/rewriter.rs +++ b/iox_query_influxql/src/plan/rewriter.rs @@ -1530,15 +1530,15 @@ mod test { let sel = parse_select("SELECT count(distinct('foo')) FROM cpu"); assert_error!(select_statement_info(&sel), DataFusionError::Plan(ref s) if s == "expected field argument in distinct()"); let sel = parse_select("SELECT count(distinct foo) FROM cpu"); - assert_error!(select_statement_info(&sel), DataFusionError::External(ref s) if s.to_string() == "internal: unexpected distinct clause in count"); + assert_error!(select_statement_info(&sel), DataFusionError::External(ref s) if s.to_string() == "InfluxQL internal error: unexpected distinct clause in count"); // Test rules for math functions let sel = parse_select("SELECT abs(usage_idle) FROM cpu"); select_statement_info(&sel).unwrap(); let sel = parse_select("SELECT abs(*) + ceil(foo) FROM cpu"); - assert_error!(select_statement_info(&sel), DataFusionError::External(ref s) if s.to_string() == "internal: unexpected wildcard"); + assert_error!(select_statement_info(&sel), DataFusionError::External(ref s) if s.to_string() == "InfluxQL internal error: unexpected wildcard"); let sel = parse_select("SELECT abs(/f/) + ceil(foo) FROM cpu"); - assert_error!(select_statement_info(&sel), DataFusionError::External(ref s) if s.to_string() == "internal: unexpected regex"); + assert_error!(select_statement_info(&sel), DataFusionError::External(ref s) if s.to_string() == "InfluxQL internal error: unexpected regex"); // Fallible @@ -1560,11 +1560,11 @@ mod test { // wildcard expansion is not supported in binary expressions for aggregates let sel = parse_select("SELECT count(*) + count(foo) FROM cpu"); - assert_error!(select_statement_info(&sel), DataFusionError::External(ref s) if s.to_string() == "internal: unexpected wildcard or regex"); + assert_error!(select_statement_info(&sel), DataFusionError::External(ref s) if s.to_string() == "InfluxQL internal error: unexpected wildcard or regex"); // regex expansion is not supported in binary expressions let sel = parse_select("SELECT sum(/foo/) + count(foo) FROM cpu"); - assert_error!(select_statement_info(&sel), DataFusionError::External(ref s) if s.to_string() == "internal: unexpected wildcard or regex"); + assert_error!(select_statement_info(&sel), DataFusionError::External(ref s) if s.to_string() == "InfluxQL internal error: unexpected wildcard or regex"); // aggregate functions require a field reference let sel = parse_select("SELECT sum(1) FROM cpu");
254be59856fc1adcb3f2b1441d1757321d37cb7c
Marco Neumann
2022-11-01 14:22:33
enable ZSTD compression for write buffer payload
Closes #5981.
null
feat: enable ZSTD compression for write buffer payload Closes #5981.
diff --git a/Cargo.lock b/Cargo.lock index 907383279a..4e3652a148 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4283,10 +4283,10 @@ dependencies = [ "parking_lot 0.12.1", "pin-project-lite", "rand", - "snap", "thiserror", "tokio", "tracing", + "zstd", ] [[package]] diff --git a/write_buffer/Cargo.toml b/write_buffer/Cargo.toml index 04aa760252..0b7c97f2ff 100644 --- a/write_buffer/Cargo.toml +++ b/write_buffer/Cargo.toml @@ -24,7 +24,7 @@ observability_deps = { path = "../observability_deps" } parking_lot = "0.12" pin-project = "1.0" prost = "0.11" -rskafka = { git = "https://github.com/influxdata/rskafka.git", rev="8678dfe049de05415929ffec7c1be8921bb057f7", default-features = false, features = ["compression-snappy", "transport-socks5"] } +rskafka = { git = "https://github.com/influxdata/rskafka.git", rev="8678dfe049de05415929ffec7c1be8921bb057f7", default-features = false, features = ["compression-zstd", "transport-socks5"] } schema = { path = "../schema" } tokio = { version = "1.21", features = ["fs", "macros", "parking_lot", "rt", "sync", "time"] } tokio-util = "0.7.4" diff --git a/write_buffer/src/kafka/instrumentation.rs b/write_buffer/src/kafka/instrumentation.rs index 283dc281fd..73c44785bc 100644 --- a/write_buffer/src/kafka/instrumentation.rs +++ b/write_buffer/src/kafka/instrumentation.rs @@ -211,7 +211,7 @@ mod tests { }; wrapper - .produce(vec![record.clone()], Compression::Snappy) + .produce(vec![record.clone()], Compression::Zstd) .await .expect("produce call should succeed"); @@ -261,7 +261,7 @@ mod tests { .with_time_provider(Arc::clone(&clock)); wrapper - .produce(Vec::new(), Compression::Snappy) + .produce(Vec::new(), Compression::Zstd) .await .expect_err("produce call should fail"); diff --git a/write_buffer/src/kafka/mod.rs b/write_buffer/src/kafka/mod.rs index 21b5500df8..1d470f2221 100644 --- a/write_buffer/src/kafka/mod.rs +++ b/write_buffer/src/kafka/mod.rs @@ -691,7 +691,7 @@ mod tests { headers: Default::default(), timestamp: rskafka::chrono::Utc::now(), }], - Compression::NoCompression, + Compression::Zstd, ) .await .unwrap();
0556fdae536c03b9b7e9f90830907176f93dc937
Marco Neumann
2023-04-27 12:54:41
remove `QueryChunk::partition_sort_key` (#7680)
As of #7250 / #7449 the partition sort key is no longer required for query planning. Instead we use a combination of `QueryChunk::partition_id` and `QueryChunk::sort_key` which is more robust and easier to reason about. Removing it simplifies the querier code a lot since we no longer need to have a sort key for the ingester chunks and also don't need to "sync" the sort key between chunks for consistency.
null
refactor: remove `QueryChunk::partition_sort_key` (#7680) As of #7250 / #7449 the partition sort key is no longer required for query planning. Instead we use a combination of `QueryChunk::partition_id` and `QueryChunk::sort_key` which is more robust and easier to reason about. Removing it simplifies the querier code a lot since we no longer need to have a sort key for the ingester chunks and also don't need to "sync" the sort key between chunks for consistency.
diff --git a/compactor2/src/components/df_planner/query_chunk.rs b/compactor2/src/components/df_planner/query_chunk.rs index 4209acce8d..dadeda0b5b 100644 --- a/compactor2/src/components/df_planner/query_chunk.rs +++ b/compactor2/src/components/df_planner/query_chunk.rs @@ -25,19 +25,16 @@ pub struct QueryableParquetChunk { delete_predicates: Vec<Arc<DeletePredicate>>, partition_id: PartitionId, sort_key: Option<SortKey>, - partition_sort_key: Option<SortKey>, order: ChunkOrder, summary: Arc<TableSummary>, } impl QueryableParquetChunk { /// Initialize a QueryableParquetChunk - #[allow(clippy::too_many_arguments)] pub fn new( partition_id: PartitionId, data: Arc<ParquetChunk>, sort_key: Option<SortKey>, - partition_sort_key: Option<SortKey>, order: ChunkOrder, ) -> Self { let summary = Arc::new(create_basic_summary( @@ -50,7 +47,6 @@ impl QueryableParquetChunk { delete_predicates: vec![], partition_id, sort_key, - partition_sort_key, order, summary, } @@ -80,10 +76,6 @@ impl QueryChunkMeta for QueryableParquetChunk { self.data.schema() } - fn partition_sort_key(&self) -> Option<&SortKey> { - self.partition_sort_key.as_ref() - } - fn partition_id(&self) -> PartitionId { self.partition_id } @@ -218,11 +210,5 @@ fn to_queryable_parquet_chunk( ); let parquet_chunk = ParquetChunk::new(Arc::new(file.file.clone()), schema, store); - QueryableParquetChunk::new( - partition_id, - Arc::new(parquet_chunk), - sort_key, - partition_info.sort_key.clone(), - file.order, - ) + QueryableParquetChunk::new(partition_id, Arc::new(parquet_chunk), sort_key, file.order) } diff --git a/ingester2/src/query_adaptor.rs b/ingester2/src/query_adaptor.rs index 44b32d43ca..8e93855243 100644 --- a/ingester2/src/query_adaptor.rs +++ b/ingester2/src/query_adaptor.rs @@ -127,10 +127,6 @@ impl QueryChunkMeta for QueryAdaptor { &self.schema } - fn partition_sort_key(&self) -> Option<&SortKey> { - None // Ingester data has not persisted yet and should not be attached to any partition - } - fn partition_id(&self) -> PartitionId { self.partition_id } diff --git a/iox_query/src/lib.rs b/iox_query/src/lib.rs index 132c148e02..c3a952d2fd 100644 --- a/iox_query/src/lib.rs +++ b/iox_query/src/lib.rs @@ -63,10 +63,6 @@ pub trait QueryChunkMeta { /// return a reference to the summary of the data held in this chunk fn schema(&self) -> &Schema; - /// Return a reference to the chunk's partition sort key if any. - /// Only persisted chunk has its partition sort key - fn partition_sort_key(&self) -> Option<&SortKey>; - /// Return partition id for this chunk fn partition_id(&self) -> PartitionId; @@ -317,10 +313,6 @@ where self.as_ref().sort_key() } - fn partition_sort_key(&self) -> Option<&SortKey> { - self.as_ref().partition_sort_key() - } - fn delete_predicates(&self) -> &[Arc<DeletePredicate>] { let pred = self.as_ref().delete_predicates(); debug!(?pred, "Delete predicate in QueryChunkMeta"); @@ -346,10 +338,6 @@ impl QueryChunkMeta for Arc<dyn QueryChunk> { self.as_ref().sort_key() } - fn partition_sort_key(&self) -> Option<&SortKey> { - self.as_ref().partition_sort_key() - } - fn delete_predicates(&self) -> &[Arc<DeletePredicate>] { let pred = self.as_ref().delete_predicates(); debug!(?pred, "Delete predicate in QueryChunkMeta"); diff --git a/iox_query/src/test.rs b/iox_query/src/test.rs index abe8a81bf6..82c7a9aea0 100644 --- a/iox_query/src/test.rs +++ b/iox_query/src/test.rs @@ -343,9 +343,6 @@ pub struct TestChunk { /// The sort key of this chunk sort_key: Option<SortKey>, - /// The partition sort key of this chunk - partition_sort_key: Option<SortKey>, - /// Suppress output quiet: bool, } @@ -427,7 +424,6 @@ impl TestChunk { delete_predicates: Default::default(), order: ChunkOrder::MIN, sort_key: None, - partition_sort_key: None, partition_id: PartitionId::new(0), quiet: false, } @@ -1133,14 +1129,6 @@ impl TestChunk { } } - /// Set the partition sort key for this chunk - pub fn with_partition_sort_key(self, sort_key: SortKey) -> Self { - Self { - partition_sort_key: Some(sort_key), - ..self - } - } - /// Returns all columns of the table pub fn all_column_names(&self) -> StringSet { self.schema @@ -1252,10 +1240,6 @@ impl QueryChunkMeta for TestChunk { &self.schema } - fn partition_sort_key(&self) -> Option<&SortKey> { - self.partition_sort_key.as_ref() - } - fn partition_id(&self) -> PartitionId { self.partition_id } diff --git a/querier/src/ingester/mod.rs b/querier/src/ingester/mod.rs index 50dd83e30d..4344d8d84c 100644 --- a/querier/src/ingester/mod.rs +++ b/querier/src/ingester/mod.rs @@ -236,7 +236,6 @@ struct IngesterResponseOk { struct ObserveIngesterRequest<'a> { res: Option<Result<IngesterResponseOk, ()>>, t_start: Time, - time_provider: Arc<dyn TimeProvider>, metrics: Arc<IngesterConnectionMetrics>, request: GetPartitionForIngester<'a>, span_recorder: SpanRecorder, @@ -248,14 +247,12 @@ impl<'a> ObserveIngesterRequest<'a> { metrics: Arc<IngesterConnectionMetrics>, span_recorder: &SpanRecorder, ) -> Self { - let time_provider = request.catalog_cache.time_provider(); - let t_start = time_provider.now(); + let t_start = request.time_provider.now(); let span_recorder = span_recorder.child("flight request"); Self { res: None, t_start, - time_provider, metrics, request, span_recorder, @@ -279,7 +276,7 @@ impl<'a> ObserveIngesterRequest<'a> { impl<'a> Drop for ObserveIngesterRequest<'a> { fn drop(&mut self) { - let t_end = self.time_provider.now(); + let t_end = self.request.time_provider.now(); if let Some(ingester_duration) = t_end.checked_duration_since(self.t_start) { let (metric, status, ok_status) = match self.res { @@ -314,7 +311,7 @@ impl<'a> Drop for ObserveIngesterRequest<'a> { pub struct IngesterConnectionImpl { unique_ingester_addresses: HashSet<Arc<str>>, flight_client: Arc<dyn IngesterFlightClient>, - catalog_cache: Arc<CatalogCache>, + time_provider: Arc<dyn TimeProvider>, metrics: Arc<IngesterConnectionMetrics>, backoff_config: BackoffConfig, } @@ -362,7 +359,7 @@ impl IngesterConnectionImpl { Self { unique_ingester_addresses: ingester_addresses.into_iter().collect(), flight_client, - catalog_cache, + time_provider: catalog_cache.time_provider(), metrics, backoff_config, } @@ -373,7 +370,7 @@ impl IngesterConnectionImpl { #[derive(Debug, Clone)] struct GetPartitionForIngester<'a> { flight_client: Arc<dyn IngesterFlightClient>, - catalog_cache: Arc<CatalogCache>, + time_provider: Arc<dyn TimeProvider>, ingester_address: Arc<str>, namespace_id: NamespaceId, columns: Vec<String>, @@ -388,7 +385,7 @@ async fn execute( ) -> Result<Vec<IngesterPartition>> { let GetPartitionForIngester { flight_client, - catalog_cache, + time_provider: _, ingester_address, namespace_id, columns, @@ -477,15 +474,14 @@ async fn execute( // reconstruct partitions let mut decoder = IngesterStreamDecoder::new( ingester_address, - catalog_cache, cached_table, span_recorder.child_span("IngesterStreamDecoder"), ); for (msg, md) in messages { - decoder.register(msg, md).await?; + decoder.register(msg, md)?; } - decoder.finalize().await + decoder.finalize() } /// Helper to disassemble the data from the ingester Apache Flight arrow stream. @@ -497,25 +493,18 @@ struct IngesterStreamDecoder { current_partition: Option<IngesterPartition>, current_chunk: Option<(Schema, Vec<RecordBatch>)>, ingester_address: Arc<str>, - catalog_cache: Arc<CatalogCache>, cached_table: Arc<CachedTable>, span_recorder: SpanRecorder, } impl IngesterStreamDecoder { /// Create empty decoder. - fn new( - ingester_address: Arc<str>, - catalog_cache: Arc<CatalogCache>, - cached_table: Arc<CachedTable>, - span: Option<Span>, - ) -> Self { + fn new(ingester_address: Arc<str>, cached_table: Arc<CachedTable>, span: Option<Span>) -> Self { Self { finished_partitions: HashMap::new(), current_partition: None, current_chunk: None, ingester_address, - catalog_cache, cached_table, span_recorder: SpanRecorder::new(span), } @@ -538,40 +527,10 @@ impl IngesterStreamDecoder { /// Flush current partition, if any. /// /// This will also flush the current chunk. - async fn flush_partition(&mut self) -> Result<()> { + fn flush_partition(&mut self) -> Result<()> { self.flush_chunk()?; if let Some(current_partition) = self.current_partition.take() { - let schemas: Vec<_> = current_partition - .chunks() - .iter() - .map(|c| c.schema()) - .collect(); - let primary_keys: Vec<_> = schemas.iter().map(|s| s.primary_key()).collect(); - let primary_key: Vec<_> = primary_keys - .iter() - .flat_map(|pk| pk.iter()) - // cache may be older then the ingester response status, so some entries might be missing - .filter_map(|name| { - self.cached_table - .column_id_map_rev - .get(&Arc::from(name.to_owned())) - }) - .copied() - .collect(); - let partition_sort_key = self - .catalog_cache - .partition() - .sort_key( - Arc::clone(&self.cached_table), - current_partition.partition_id(), - &primary_key, - self.span_recorder - .child_span("cache GET partition sort key"), - ) - .await - .map(|sort_key| Arc::clone(&sort_key.sort_key)); - let current_partition = current_partition.with_partition_sort_key(partition_sort_key); self.finished_partitions .insert(current_partition.partition_id, current_partition); } @@ -580,7 +539,7 @@ impl IngesterStreamDecoder { } /// Register a new message and its metadata from the Flight stream. - async fn register( + fn register( &mut self, msg: DecodedPayload, md: IngesterQueryResponseMetadata, @@ -588,7 +547,7 @@ impl IngesterStreamDecoder { match msg { DecodedPayload::None => { // new partition announced - self.flush_partition().await?; + self.flush_partition()?; let partition_id = PartitionId::new(md.partition_id); let status = md.status.context(PartitionStatusMissingSnafu { @@ -664,8 +623,8 @@ impl IngesterStreamDecoder { } /// Flush internal state and return sorted set of partitions. - async fn finalize(mut self) -> Result<Vec<IngesterPartition>> { - self.flush_partition().await?; + fn finalize(mut self) -> Result<Vec<IngesterPartition>> { + self.flush_partition()?; let mut ids: Vec<_> = self.finished_partitions.keys().copied().collect(); ids.sort(); @@ -718,7 +677,7 @@ impl IngesterConnection for IngesterConnectionImpl { let metrics = Arc::clone(&metrics); let request = GetPartitionForIngester { flight_client: Arc::clone(&self.flight_client), - catalog_cache: Arc::clone(&self.catalog_cache), + time_provider: Arc::clone(&self.time_provider), ingester_address: Arc::clone(&ingester_address), namespace_id, cached_table: Arc::clone(&cached_table), @@ -901,19 +860,6 @@ impl IngesterPartition { Ok(self) } - /// Update partition sort key - pub(crate) fn with_partition_sort_key(self, partition_sort_key: Option<Arc<SortKey>>) -> Self { - Self { - partition_sort_key: partition_sort_key.clone(), - chunks: self - .chunks - .into_iter() - .map(|c| c.with_partition_sort_key(partition_sort_key.clone())) - .collect(), - ..self - } - } - pub(crate) fn ingester_uuid(&self) -> Option<Uuid> { self.ingester_uuid } @@ -978,21 +924,6 @@ pub struct IngesterChunk { } impl IngesterChunk { - /// [`Arc`]ed version of the partition sort key. - /// - /// Note that this might NOT be the up-to-date sort key of the partition but the one that existed when the chunk was - /// created. You must sync the keys to use the chunks. - pub(crate) fn partition_sort_key_arc(&self) -> Option<Arc<SortKey>> { - self.partition_sort_key.clone() - } - - pub(crate) fn with_partition_sort_key(self, partition_sort_key: Option<Arc<SortKey>>) -> Self { - Self { - partition_sort_key, - ..self - } - } - pub(crate) fn estimate_size(&self) -> usize { self.batches .iter() @@ -1024,10 +955,6 @@ impl QueryChunkMeta for IngesterChunk { &self.schema } - fn partition_sort_key(&self) -> Option<&SortKey> { - self.partition_sort_key.as_ref().map(|sk| sk.as_ref()) - } - fn partition_id(&self) -> PartitionId { self.partition_id } diff --git a/querier/src/parquet/creation.rs b/querier/src/parquet/creation.rs index 9987a98bdd..a49cb53c67 100644 --- a/querier/src/parquet/creation.rs +++ b/querier/src/parquet/creation.rs @@ -236,10 +236,6 @@ impl ChunkAdapter { self.catalog_cache.parquet_store(), )); - Some(QuerierParquetChunk::new( - parquet_chunk, - meta, - Some(Arc::clone(&partition_sort_key.sort_key)), - )) + Some(QuerierParquetChunk::new(parquet_chunk, meta)) } } diff --git a/querier/src/parquet/mod.rs b/querier/src/parquet/mod.rs index 37b89eacde..cf0823d669 100644 --- a/querier/src/parquet/mod.rs +++ b/querier/src/parquet/mod.rs @@ -71,9 +71,6 @@ pub struct QuerierParquetChunk { /// Delete predicates to be combined with the chunk delete_predicates: Vec<Arc<DeletePredicate>>, - /// Partition sort key (how does the read buffer use this?) - partition_sort_key: Option<Arc<SortKey>>, - /// Chunk of the Parquet file parquet_chunk: Arc<ParquetChunk>, @@ -83,11 +80,7 @@ pub struct QuerierParquetChunk { impl QuerierParquetChunk { /// Create new parquet-backed chunk (object store data). - pub fn new( - parquet_chunk: Arc<ParquetChunk>, - meta: Arc<QuerierParquetChunkMeta>, - partition_sort_key: Option<Arc<SortKey>>, - ) -> Self { + pub fn new(parquet_chunk: Arc<ParquetChunk>, meta: Arc<QuerierParquetChunkMeta>) -> Self { let table_summary = Arc::new(create_basic_summary( parquet_chunk.rows() as u64, parquet_chunk.schema(), @@ -97,7 +90,6 @@ impl QuerierParquetChunk { Self { meta, delete_predicates: Vec::new(), - partition_sort_key, parquet_chunk, table_summary, } @@ -116,22 +108,6 @@ impl QuerierParquetChunk { self.meta.as_ref() } - /// [`Arc`]ed version of the partition sort key. - /// - /// Note that this might NOT be the up-to-date sort key of the partition but the one that existed when the chunk was - /// created. You must sync the keys to use the chunks. - pub fn partition_sort_key_arc(&self) -> Option<Arc<SortKey>> { - self.partition_sort_key.clone() - } - - /// Set partition sort key - pub fn with_partition_sort_key(self, partition_sort_key: Option<Arc<SortKey>>) -> Self { - Self { - partition_sort_key, - ..self - } - } - pub fn estimate_size(&self) -> usize { self.parquet_chunk.parquet_file().file_size_bytes as usize } diff --git a/querier/src/parquet/query_access.rs b/querier/src/parquet/query_access.rs index 18923b9ffc..1e12d2b82b 100644 --- a/querier/src/parquet/query_access.rs +++ b/querier/src/parquet/query_access.rs @@ -18,10 +18,6 @@ impl QueryChunkMeta for QuerierParquetChunk { self.parquet_chunk.schema() } - fn partition_sort_key(&self) -> Option<&SortKey> { - self.partition_sort_key.as_ref().map(|sk| sk.as_ref()) - } - fn partition_id(&self) -> PartitionId { self.meta().partition_id() } diff --git a/querier/src/table/state_reconciler.rs b/querier/src/table/state_reconciler.rs index 3b22495cc7..221b5a9ae2 100644 --- a/querier/src/table/state_reconciler.rs +++ b/querier/src/table/state_reconciler.rs @@ -2,18 +2,14 @@ mod interface; -use data_types::{DeletePredicate, PartitionId}; +use data_types::DeletePredicate; use iox_query::QueryChunk; use observability_deps::tracing::debug; -use schema::sort::SortKey; use snafu::Snafu; -use std::{ - collections::{hash_map::Entry, HashMap}, - sync::Arc, -}; +use std::sync::Arc; use trace::span::{Span, SpanRecorder}; -use crate::{ingester::IngesterChunk, parquet::QuerierParquetChunk, IngesterPartition}; +use crate::{parquet::QuerierParquetChunk, IngesterPartition}; #[derive(Snafu, Debug)] #[allow(missing_copy_implementations)] @@ -58,13 +54,6 @@ impl Reconciler { chunks.extend(self.build_ingester_chunks(ingester_partitions, retention_delete_pred)); debug!(num_chunks=%chunks.len(), "Final chunk count after reconcilation"); - let chunks = self.sync_partition_sort_keys(chunks); - - let chunks: Vec<Arc<dyn QueryChunk>> = chunks - .into_iter() - .map(|c| c.upcast_to_querier_chunk().into()) - .collect(); - Ok(chunks) } @@ -74,7 +63,7 @@ impl Reconciler { retention_delete_pred: Option<Arc<DeletePredicate>>, parquet_files: Vec<QuerierParquetChunk>, _span: Option<Span>, - ) -> Result<Vec<Box<dyn UpdatableQuerierChunk>>, ReconcileError> { + ) -> Result<Vec<Arc<dyn QueryChunk>>, ReconcileError> { debug!( namespace=%self.namespace_name(), table_name=%self.table_name(), @@ -84,7 +73,7 @@ impl Reconciler { debug!(num_chunks=%parquet_files.len(), "Created chunks from parquet files"); - let mut chunks: Vec<Box<dyn UpdatableQuerierChunk>> = + let mut chunks: Vec<Arc<dyn QueryChunk>> = Vec::with_capacity(parquet_files.len() + ingester_partitions.len()); let retention_expr_len = usize::from(retention_delete_pred.is_some()); @@ -97,7 +86,7 @@ impl Reconciler { let chunk = chunk.with_delete_predicates(delete_predicates); - chunks.push(Box::new(chunk) as Box<dyn UpdatableQuerierChunk>); + chunks.push(Arc::new(chunk)); } Ok(chunks) @@ -107,7 +96,7 @@ impl Reconciler { &self, ingester_partitions: Vec<IngesterPartition>, retention_delete_pred: Option<Arc<DeletePredicate>>, - ) -> impl Iterator<Item = Box<dyn UpdatableQuerierChunk>> { + ) -> impl Iterator<Item = Arc<dyn QueryChunk>> { // Add ingester chunks to the overall chunk list. // - filter out chunks that don't have any record batches ingester_partitions @@ -119,43 +108,7 @@ impl Reconciler { }; c.into_chunks().into_iter() }) - .map(|c| Box::new(c) as Box<dyn UpdatableQuerierChunk>) - } - - fn sync_partition_sort_keys( - &self, - chunks: Vec<Box<dyn UpdatableQuerierChunk>>, - ) -> Vec<Box<dyn UpdatableQuerierChunk>> { - // collect latest (= longest) sort key - // Note that the partition sort key may stale (only a subset of the most recent partition - // sort key) because newer chunks have new columns. - // However, since the querier doesn't (yet) know about these chunks in the `chunks` list above - // using the most up to date sort key from the chunks it does know about is sufficient. - let mut sort_keys = HashMap::<PartitionId, Arc<SortKey>>::new(); - for c in &chunks { - if let Some(sort_key) = c.partition_sort_key_arc() { - match sort_keys.entry(c.partition_id()) { - Entry::Occupied(mut o) => { - if sort_key.len() > o.get().len() { - *o.get_mut() = sort_key; - } - } - Entry::Vacant(v) => { - v.insert(sort_key); - } - } - } - } - - // write partition sort keys to chunks - chunks - .into_iter() - .map(|chunk| { - let partition_id = chunk.partition_id(); - let sort_key = sort_keys.get(&partition_id); - chunk.update_partition_sort_key(sort_key.cloned()) - }) - .collect() + .map(|c| Arc::new(c) as Arc<dyn QueryChunk>) } #[must_use] @@ -168,94 +121,3 @@ impl Reconciler { self.namespace_name.as_ref() } } - -trait UpdatableQuerierChunk: QueryChunk { - fn partition_sort_key_arc(&self) -> Option<Arc<SortKey>>; - - fn update_partition_sort_key( - self: Box<Self>, - sort_key: Option<Arc<SortKey>>, - ) -> Box<dyn UpdatableQuerierChunk>; - - fn upcast_to_querier_chunk(self: Box<Self>) -> Box<dyn QueryChunk>; -} - -impl UpdatableQuerierChunk for QuerierParquetChunk { - fn partition_sort_key_arc(&self) -> Option<Arc<SortKey>> { - self.partition_sort_key_arc() - } - - fn update_partition_sort_key( - self: Box<Self>, - sort_key: Option<Arc<SortKey>>, - ) -> Box<dyn UpdatableQuerierChunk> { - Box::new(self.with_partition_sort_key(sort_key)) - } - - fn upcast_to_querier_chunk(self: Box<Self>) -> Box<dyn QueryChunk> { - self as _ - } -} - -impl UpdatableQuerierChunk for IngesterChunk { - fn partition_sort_key_arc(&self) -> Option<Arc<SortKey>> { - self.partition_sort_key_arc() - } - - fn update_partition_sort_key( - self: Box<Self>, - sort_key: Option<Arc<SortKey>>, - ) -> Box<dyn UpdatableQuerierChunk> { - Box::new(self.with_partition_sort_key(sort_key)) - } - - fn upcast_to_querier_chunk(self: Box<Self>) -> Box<dyn QueryChunk> { - self as _ - } -} - -#[cfg(test)] -mod tests { - use super::{ - interface::{IngesterPartitionInfo, ParquetFileInfo}, - *, - }; - use data_types::{CompactionLevel, SequenceNumber}; - - #[derive(Debug)] - struct MockIngesterPartitionInfo { - partition_id: PartitionId, - parquet_max_sequence_number: Option<SequenceNumber>, - } - - impl IngesterPartitionInfo for MockIngesterPartitionInfo { - fn partition_id(&self) -> PartitionId { - self.partition_id - } - - fn parquet_max_sequence_number(&self) -> Option<SequenceNumber> { - self.parquet_max_sequence_number - } - } - - #[derive(Debug, Clone, PartialEq, Eq)] - struct MockParquetFileInfo { - partition_id: PartitionId, - max_sequence_number: SequenceNumber, - compaction_level: CompactionLevel, - } - - impl ParquetFileInfo for MockParquetFileInfo { - fn partition_id(&self) -> PartitionId { - self.partition_id - } - - fn max_sequence_number(&self) -> SequenceNumber { - self.max_sequence_number - } - - fn compaction_level(&self) -> CompactionLevel { - self.compaction_level - } - } -}
bf1681f4fe99f70edb3b1e98a09396c64f7ccbd7
Luke Bond
2022-11-16 11:21:28
compactor ignores max file count for first file (#6144)
* feat: compactor ignores max file count for first file * chore: typo in comment in compactor
null
feat: compactor ignores max file count for first file (#6144) * feat: compactor ignores max file count for first file * chore: typo in comment in compactor
diff --git a/compactor/src/lib.rs b/compactor/src/lib.rs index 66a44556e8..b95148eca2 100644 --- a/compactor/src/lib.rs +++ b/compactor/src/lib.rs @@ -156,33 +156,6 @@ async fn compact_candidates_with_memory_budget<C, Fut>( FilterResult::NothingToCompact => { debug!(?partition_id, compaction_type, "nothing to compact"); } - FilterResult::OverLimitFileNum { - num_files, - budget_bytes, - } => { - // We cannot compact this partition because its first set of overlapped files - // are over the limit of file num - warn!( - ?partition_id, - ?table_id, - compaction_type, - num_files, - budget_bytes, - file_num_limit = compactor.config.max_num_compacting_files, - memory_budget_bytes = compactor.config.memory_budget_bytes, - "skipped; over limit of number of files" - ); - record_skipped_compaction( - partition_id, - Arc::clone(&compactor), - "over limit of num_files", - num_files, - compactor.config.max_num_compacting_files, - budget_bytes, - compactor.config.memory_budget_bytes, - ) - .await; - } FilterResult::OverBudget { budget_bytes: needed_bytes, num_files, diff --git a/compactor/src/parquet_file_filtering.rs b/compactor/src/parquet_file_filtering.rs index 354a3c5181..1a1d96ff30 100644 --- a/compactor/src/parquet_file_filtering.rs +++ b/compactor/src/parquet_file_filtering.rs @@ -19,10 +19,6 @@ pub(crate) struct FilteredFiles { #[derive(Debug, PartialEq)] pub(crate) enum FilterResult { NothingToCompact, - OverLimitFileNum { - num_files: usize, - budget_bytes: u64, - }, OverBudget { budget_bytes: u64, num_files: usize, @@ -187,20 +183,17 @@ fn filter_parquet_files_inner( // Over limit of num files // At this stage files_to_return only includes LN+1 files. To get both returning LN+1 and // LN files, we need to consider both files_to_return and level_n_to_return + // The file limit only applies after the first file, because we want to avoid the situation + // where a file (and its overlapping files) will never get processed. In this scenario, + // however, we do not proceed to pick up any more files other than this first one (and its + // overlaps) if files_to_return.len() + level_n_to_return.len() + 1 /* LN file */ + overlaps.len() > max_num_files + && !(files_to_return.is_empty() && level_n_to_return.is_empty()) { - if files_to_return.is_empty() && level_n_to_return.is_empty() { - // Cannot compact this partition because its first set of overlapped files - // exceed the file limit - return FilterResult::OverLimitFileNum { - num_files: 1 + overlaps.len(), - budget_bytes: budget_for_current_ln_and_ln_plus, - }; - } else { - // Only compact files that are under limit number of files - break; - } + // Only compact files that are under limit number of files if this is not the first + // file in the partition + break; } else if total_estimated_budget + budget_for_current_ln_and_ln_plus > max_bytes { // Over budget if total_estimated_budget == 0 { @@ -222,8 +215,10 @@ fn filter_parquet_files_inner( // Only compact the level 1 files under the desired file size break; } else { - // Still under budget and under limit of number of files. - // We do not add memory budget for output stream yet becasue it will change when we add + // Still under budget and under limit of number of files, OR + // it's the first LN file and it is over the max file limit but we have to do it anyway. + // + // We do not add memory budget for output stream yet because it will change when we add // more files total_estimated_budget += estimated_processing_file_bytes; ln_estimated_budget.push(ln_estimated_file_bytes); @@ -646,7 +641,7 @@ mod tests { } #[test] - fn file_num_0_returns_over_file_limit() { + fn file_num_0_over_file_limit_but_proceeds() { let level_0 = vec![ParquetFileBuilder::level_0().id(1).build()]; let level_1 = vec![]; let (files_metric, bytes_metric) = metrics(); @@ -659,45 +654,209 @@ mod tests { level_0, level_1, MEMORY_BUDGET, - 0, + 0, // note max file count is zero DEFAULT_MAX_OUTPUT_SIZE, &files_metric, &bytes_metric, ); - assert_eq!( - filter_result, - FilterResult::OverLimitFileNum { - num_files: 1, - budget_bytes: expected_estimated_budget // one input stream, one output stream - } + // we will process this despite > max file count because it's the first + assert!( + matches!( + &filter_result, + FilterResult::Proceed { files, budget_bytes } + if files.len() == 1 + && files.clone().pop().unwrap().id().get() == 1 + && *budget_bytes == expected_estimated_budget + ), + "Match failed, got: {filter_result:?}" + ); + } + + #[test] + fn file_num_0_over_file_limit_but_proceeds_but_no_more() { + let level_0 = vec![ + ParquetFileBuilder::level_0() + .id(1) + .min_time(200) + .max_time(300) + .build(), + // second file that shouldn't get picked up for processing + ParquetFileBuilder::level_0().id(2).build(), + ]; + let level_1 = vec![ + // overlaps with L0 id 1 + ParquetFileBuilder::level_1() + .id(102) + .min_time(250) + .max_time(350) + .build(), + ]; + let (files_metric, bytes_metric) = metrics(); + + let bytes_stored_2_files_in_memory = 2 * (2 * level_0[0].file_size_bytes()) as u64; + let expected_estimated_budget = bytes_stored_2_files_in_memory + 3 * ESTIMATED_STREAM_BYTES; // two input streams, one output stream + + let filter_result = filter_parquet_files_inner( + level_0, + level_1, + MEMORY_BUDGET, + 0, // note max file count is zero + DEFAULT_MAX_OUTPUT_SIZE, + &files_metric, + &bytes_metric, + ); + + // L0 file and its overlapping files get picked despite > max file count because it's the + // first file + assert!( + matches!( + &filter_result, + FilterResult::Proceed { files, budget_bytes } + if files.len() == 2 + // L0 file + its overlapping L1 file but not the next L0 file even though we + // have the memory for it + && files.iter().map(|f| f.id().get()).collect::<Vec<_>>() == [102, 1] + && *budget_bytes == expected_estimated_budget + ), + "Match failed, got: {filter_result:?}" + ); + } + + #[test] + fn file_num_limit_1_over_file_limit_but_proceeds_but_no_more() { + // in this test it's the L1 file that pushes us over max file count, but it still proceeds + // because it overlaps the first L0 file + let level_0 = vec![ + ParquetFileBuilder::level_0() + .id(1) + .min_time(200) + .max_time(300) + .build(), + // second file that shouldn't get picked up for processing + ParquetFileBuilder::level_0().id(2).build(), + ]; + let level_1 = vec![ + // Completely contains the level 0 times + ParquetFileBuilder::level_1() + .id(102) + .min_time(150) + .max_time(350) + .build(), + ]; + let (files_metric, bytes_metric) = metrics(); + + let bytes_stored_2_files_in_memory = 2 * (2 * level_0[0].file_size_bytes()) as u64; + let expected_estimated_budget = bytes_stored_2_files_in_memory + 3 * ESTIMATED_STREAM_BYTES; // two input streams, one output stream + + let filter_result = filter_parquet_files_inner( + level_0, + level_1, + MEMORY_BUDGET, + 1, + DEFAULT_MAX_OUTPUT_SIZE, + &files_metric, + &bytes_metric, + ); + + assert!( + matches!( + &filter_result, + FilterResult::Proceed { files, budget_bytes } + if files.len() == 2 + && files.iter().map(|f| f.id().get()).collect::<Vec<_>>() == [102, 1] + && *budget_bytes == expected_estimated_budget + ), + "Match failed, got: {filter_result:?}" ); } #[test] - fn file_num_limit_1_return_over_file_limit() { + fn file_num_limit_second_l1_over_file_limit_but_proceeds_but_no_more() { + // in this test it's a second L1 file that pushes us over max file count, but it still + // proceeds because it overlaps the first L0 file + let level_0 = vec![ + ParquetFileBuilder::level_0() + .id(1) + .min_time(200) + .max_time(300) + .build(), + // second file that shouldn't get picked up for processing + ParquetFileBuilder::level_0().id(2).build(), + ]; + let level_1 = vec![ + // both overlap the L0 file + ParquetFileBuilder::level_1() + .id(102) + .min_time(150) + .max_time(250) + .build(), + ParquetFileBuilder::level_1() + .id(103) + .min_time(250) + .max_time(350) + .build(), + ]; + let (files_metric, bytes_metric) = metrics(); + + let bytes_stored_3_files_in_memory = 3 * (2 * level_0[0].file_size_bytes()) as u64; + let expected_estimated_budget = bytes_stored_3_files_in_memory + 4 * ESTIMATED_STREAM_BYTES; // three input streams, one output stream + + let filter_result = filter_parquet_files_inner( + level_0, + level_1, + MEMORY_BUDGET, + 2, + DEFAULT_MAX_OUTPUT_SIZE, + &files_metric, + &bytes_metric, + ); + + assert!( + matches!( + &filter_result, + FilterResult::Proceed { files, budget_bytes } + if files.len() == 3 + && files.iter().map(|f| f.id().get()).collect::<Vec<_>>() == [102, 103, 1] + && *budget_bytes == expected_estimated_budget + ), + "Match failed, got: {filter_result:?}" + ); + } + + #[test] + fn file_num_limit_first_over_file_limit_but_insufficient_budget() { + // in this test the first file and its overlaps push us over the file limit, which usually + // means they're included anyway but in this case we don't have the memory budget let level_0 = vec![ParquetFileBuilder::level_0() .id(1) .min_time(200) .max_time(300) .build()]; let level_1 = vec![ - // Completely contains the level 0 times + // both overlap the L0 file ParquetFileBuilder::level_1() .id(102) .min_time(150) + .max_time(250) + .build(), + ParquetFileBuilder::level_1() + .id(103) + .min_time(250) .max_time(350) .build(), ]; let (files_metric, bytes_metric) = metrics(); - let bytes_stored_2_files_in_memory = 2 * (2 * level_0[0].file_size_bytes()) as u64; - let expected_estimated_budget = bytes_stored_2_files_in_memory + 3 * ESTIMATED_STREAM_BYTES; // two input streams, one output stream + let bytes_stored_3_files_in_memory = 3 * (2 * level_0[0].file_size_bytes()) as u64; + let expected_estimated_budget = bytes_stored_3_files_in_memory + 4 * ESTIMATED_STREAM_BYTES; // three input streams, one output stream let filter_result = filter_parquet_files_inner( level_0, level_1, - MEMORY_BUDGET, + // enough for 2 files and one output stream + (ESTIMATED_STREAM_BYTES * 3) + (FILE_SIZE_BYTES as u64 * 2 * 2) + 1, + // max file count not a factor here 1, DEFAULT_MAX_OUTPUT_SIZE, &files_metric, @@ -706,9 +865,9 @@ mod tests { assert_eq!( filter_result, - FilterResult::OverLimitFileNum { - num_files: 2, - budget_bytes: expected_estimated_budget + FilterResult::OverBudget { + budget_bytes: expected_estimated_budget, + num_files: 3 } ); } @@ -1126,7 +1285,7 @@ mod tests { } ); - // Test 2: increase udget to fit all files + // Test 2: increase budget to fit all files let (files_metric, bytes_metric) = metrics(); let bytes_stored_6_files_in_memory = 6 * (2 * level_0[0].file_size_bytes()) as u64;
870d2830da48cbd84533fdad7b6a43c4bdd681a8
Nga Tran
2023-03-10 11:08:42
upgrade upgradable files when there are nothing else to compact or split (#7174)
* feat: upgrade upgradable files when there are nothing else to compact or split * chore: fix a typo in a comment
null
feat: upgrade upgradable files when there are nothing else to compact or split (#7174) * feat: upgrade upgradable files when there are nothing else to compact or split * chore: fix a typo in a comment
diff --git a/compactor2/src/driver.rs b/compactor2/src/driver.rs index 6243744934..a3635df7e4 100644 --- a/compactor2/src/driver.rs +++ b/compactor2/src/driver.rs @@ -206,13 +206,15 @@ async fn try_compact_partition( .file_classifier .classify(&partition_info, &round_info, branch); - if !components - .partition_resource_limit_filter - .apply( - &partition_info, - &file_classification.files_to_compact_or_split.files(), - ) - .await? + // Skip partition if it has neither files to upgrade nor files to compact or split + if !file_classification.has_upgrade_files() + && !components + .partition_resource_limit_filter + .apply( + &partition_info, + &file_classification.files_to_compact_or_split.files(), + ) + .await? { return Ok(()); } diff --git a/compactor2/src/file_classification.rs b/compactor2/src/file_classification.rs index 3623fa9413..2d7e6c7dc1 100644 --- a/compactor2/src/file_classification.rs +++ b/compactor2/src/file_classification.rs @@ -39,6 +39,10 @@ impl FileClassification { FilesToCompactOrSplit::FilesToSplit(files) => files.len(), } } + + pub fn has_upgrade_files(&self) -> bool { + !self.files_to_upgrade.is_empty() + } } /// Files to compact or to split diff --git a/compactor2/tests/layouts/large_files.rs b/compactor2/tests/layouts/large_files.rs index dcf6340063..cb2d412b7e 100644 --- a/compactor2/tests/layouts/large_files.rs +++ b/compactor2/tests/layouts/large_files.rs @@ -34,6 +34,7 @@ async fn one_larger_max_file_size() { ) .await; + // Large L1 is upgraded to L2 insta::assert_yaml_snapshot!( run_layout_scenario(&setup).await, @r###" @@ -41,10 +42,11 @@ async fn one_larger_max_file_size() { - "**** Input Files " - "L1, all files 100mb " - "L1.1[1,1000] 1ns |------------------------------------------L1.1------------------------------------------|" - - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "Committing partition 1:" + - " Upgrading 1 files level to CompactionLevel::L2: L1.1" - "**** Final Output Files " - - "L1, all files 100mb " - - "L1.1[1,1000] 1ns |------------------------------------------L1.1------------------------------------------|" + - "L2, all files 100mb " + - "L2.1[1,1000] 1ns |------------------------------------------L2.1------------------------------------------|" "### ); } @@ -73,6 +75,8 @@ async fn one_l0_larger_max_file_size() { ) .await; + // Large L0 is upgraded to L1 and then L2 + // Note: a parquet file including level 0 does not include duplicated data and no need to go over compaction insta::assert_yaml_snapshot!( run_layout_scenario(&setup).await, @r###" @@ -80,10 +84,13 @@ async fn one_l0_larger_max_file_size() { - "**** Input Files " - "L0, all files 100mb " - "L0.1[1,1000] 1ns |------------------------------------------L0.1------------------------------------------|" - - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "Committing partition 1:" + - " Upgrading 1 files level to CompactionLevel::L1: L0.1" + - "Committing partition 1:" + - " Upgrading 1 files level to CompactionLevel::L2: L1.1" - "**** Final Output Files " - - "L0, all files 100mb " - - "L0.1[1,1000] 1ns |------------------------------------------L0.1------------------------------------------|" + - "L2, all files 100mb " + - "L2.1[1,1000] 1ns |------------------------------------------L2.1------------------------------------------|" "### ); } @@ -113,6 +120,7 @@ async fn one_larger_max_compact_size() { ) .await; + // Large L1 is upgraded to L2 insta::assert_yaml_snapshot!( run_layout_scenario(&setup).await, @r###" @@ -121,11 +129,12 @@ async fn one_larger_max_compact_size() { - "L1, all files 300mb " - "L1.1[1,1000] 1ns |------------------------------------------L1.1------------------------------------------|" - "WARNING: file L1.1[1,1000] 1ns 300mb exceeds soft limit 100mb by more than 50%" - - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "Committing partition 1:" + - " Upgrading 1 files level to CompactionLevel::L2: L1.1" - "**** Final Output Files " - - "L1, all files 300mb " - - "L1.1[1,1000] 1ns |------------------------------------------L1.1------------------------------------------|" - - "WARNING: file L1.1[1,1000] 1ns 300mb exceeds soft limit 100mb by more than 50%" + - "L2, all files 300mb " + - "L2.1[1,1000] 1ns |------------------------------------------L2.1------------------------------------------|" + - "WARNING: file L2.1[1,1000] 1ns 300mb exceeds soft limit 100mb by more than 50%" "### ); } @@ -155,6 +164,7 @@ async fn one_l0_larger_max_compact_size() { ) .await; + // Large L0 will be upgraded to L1 and then L2 insta::assert_yaml_snapshot!( run_layout_scenario(&setup).await, @r###" @@ -163,11 +173,14 @@ async fn one_l0_larger_max_compact_size() { - "L0, all files 300mb " - "L0.1[1,1000] 1ns |------------------------------------------L0.1------------------------------------------|" - "WARNING: file L0.1[1,1000] 1ns 300mb exceeds soft limit 100mb by more than 50%" - - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "Committing partition 1:" + - " Upgrading 1 files level to CompactionLevel::L1: L0.1" + - "Committing partition 1:" + - " Upgrading 1 files level to CompactionLevel::L2: L1.1" - "**** Final Output Files " - - "L0, all files 300mb " - - "L0.1[1,1000] 1ns |------------------------------------------L0.1------------------------------------------|" - - "WARNING: file L0.1[1,1000] 1ns 300mb exceeds soft limit 100mb by more than 50%" + - "L2, all files 300mb " + - "L2.1[1,1000] 1ns |------------------------------------------L2.1------------------------------------------|" + - "WARNING: file L2.1[1,1000] 1ns 300mb exceeds soft limit 100mb by more than 50%" "### ); } diff --git a/compactor2/tests/layouts/single_timestamp.rs b/compactor2/tests/layouts/single_timestamp.rs index fbb2c8d57f..1b9e7e38a5 100644 --- a/compactor2/tests/layouts/single_timestamp.rs +++ b/compactor2/tests/layouts/single_timestamp.rs @@ -28,6 +28,7 @@ async fn single_giant_file() { ) .await; + // L0 file is upgraded to L1 and then L2 insta::assert_yaml_snapshot!( run_layout_scenario(&setup).await, @r###" @@ -36,11 +37,14 @@ async fn single_giant_file() { - "L0, all files 4.88gb " - "L0.1[100,100] 1ns |------------------------------------------L0.1------------------------------------------|" - "WARNING: file L0.1[100,100] 1ns 4.88gb exceeds soft limit 100mb by more than 50%" - - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 268435456. The may happen if a large amount of data has the same timestamp" + - "Committing partition 1:" + - " Upgrading 1 files level to CompactionLevel::L1: L0.1" + - "Committing partition 1:" + - " Upgrading 1 files level to CompactionLevel::L2: L1.1" - "**** Final Output Files " - - "L0, all files 4.88gb " - - "L0.1[100,100] 1ns |------------------------------------------L0.1------------------------------------------|" - - "WARNING: file L0.1[100,100] 1ns 4.88gb exceeds soft limit 100mb by more than 50%" + - "L2, all files 4.88gb " + - "L2.1[100,100] 1ns |------------------------------------------L2.1------------------------------------------|" + - "WARNING: file L2.1[100,100] 1ns 4.88gb exceeds soft limit 100mb by more than 50%" "### ); } @@ -322,6 +326,8 @@ async fn many_small_files() { .await; } + // L0s are compacted into a single L1 file. It can't be split becasue of single timestamp + // Then the L1 is large enough to get upgraded to L2 insta::assert_yaml_snapshot!( run_layout_scenario(&setup).await, @r###" @@ -376,11 +382,12 @@ async fn many_small_files() { - "Committing partition 1:" - " Soft Deleting 20 files: L0.1, L0.2, L0.3, L0.4, L0.5, L0.6, L0.7, L0.8, L0.9, L0.10, L0.11, L0.12, L0.13, L0.14, L0.15, L0.16, L0.17, L0.18, L0.19, L0.20" - " Creating 1 files at level CompactionLevel::L1" - - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 268435456. The may happen if a large amount of data has the same timestamp" + - "Committing partition 1:" + - " Upgrading 1 files level to CompactionLevel::L2: L1.21" - "**** Final Output Files " - - "L1, all files 200mb " - - "L1.21[100,100] 1ns |-----------------------------------------L1.21------------------------------------------|" - - "WARNING: file L1.21[100,100] 1ns 200mb exceeds soft limit 100mb by more than 50%" + - "L2, all files 200mb " + - "L2.21[100,100] 1ns |-----------------------------------------L2.21------------------------------------------|" + - "WARNING: file L2.21[100,100] 1ns 200mb exceeds soft limit 100mb by more than 50%" "### ); }
5ac80bf4a58f47c35ed69ad00bd6cf64b7cc90ad
Stuart Carnie
2022-12-02 16:37:41
Add mutable visitor to InfluxQL parser crate (#6292)
* feat: Add mutable visitor to InfluxQL parser crate * chore: Add missing snapshots * chore: Fix docs * chore: Fix test visitor
null
feat: Add mutable visitor to InfluxQL parser crate (#6292) * feat: Add mutable visitor to InfluxQL parser crate * chore: Add missing snapshots * chore: Fix docs * chore: Fix test visitor
diff --git a/influxdb_influxql_parser/src/lib.rs b/influxdb_influxql_parser/src/lib.rs index a02d0214d9..f3d95ca9e2 100644 --- a/influxdb_influxql_parser/src/lib.rs +++ b/influxdb_influxql_parser/src/lib.rs @@ -46,6 +46,7 @@ pub mod simple_from_clause; pub mod statement; pub mod string; pub mod visit; +pub mod visit_mut; /// A error returned when parsing an InfluxQL query using /// [`parse_statements`] fails. diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__delete_statement-2.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__delete_statement-2.snap new file mode 100644 index 0000000000..dbe8e5c59b --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__delete_statement-2.snap @@ -0,0 +1,21 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"DELETE WHERE 'foo bar' =~ /foo/\")" +--- +- "pre_visit_statement: Delete(Where(WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) })))" +- "pre_visit_delete_statement: Where(WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) }))" +- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) })" +- "pre_visit_conditional_expression: Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) }" +- "pre_visit_conditional_expression: Expr(Literal(String(\"foo bar\")))" +- "pre_visit_expr: Literal(String(\"foo bar\"))" +- "post_visit_expr: Literal(String(\"foo bar\"))" +- "post_visit_conditional_expression: Expr(Literal(String(\"foo bar\")))" +- "pre_visit_conditional_expression: Expr(Literal(Regex(Regex(\"foo\"))))" +- "pre_visit_expr: Literal(Regex(Regex(\"foo\")))" +- "post_visit_expr: Literal(Regex(Regex(\"foo\")))" +- "post_visit_conditional_expression: Expr(Literal(Regex(Regex(\"foo\"))))" +- "post_visit_conditional_expression: Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) }" +- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) })" +- "post_visit_delete_statement: Where(WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) }))" +- "post_visit_statement: Delete(Where(WhereClause(Binary { lhs: Expr(Literal(String(\"foo bar\"))), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"foo\")))) })))" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__delete_statement-3.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__delete_statement-3.snap new file mode 100644 index 0000000000..bce7e56959 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__delete_statement-3.snap @@ -0,0 +1,13 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"DELETE FROM cpu\")" +--- +- "pre_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }, condition: None })" +- "pre_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }, condition: None }" +- "pre_visit_delete_from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }" +- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_delete_from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }" +- "post_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }, condition: None }" +- "post_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Name(Identifier(\"cpu\"))] }, condition: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__delete_statement-4.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__delete_statement-4.snap new file mode 100644 index 0000000000..a938f7e58a --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__delete_statement-4.snap @@ -0,0 +1,13 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"DELETE FROM /^cpu/\")" +--- +- "pre_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }, condition: None })" +- "pre_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }, condition: None }" +- "pre_visit_delete_from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }" +- "pre_visit_measurement_name: Regex(Regex(\"^cpu\"))" +- "post_visit_measurement_name: Regex(Regex(\"^cpu\"))" +- "post_visit_delete_from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }" +- "post_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }, condition: None }" +- "post_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Regex(Regex(\"^cpu\"))] }, condition: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__delete_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__delete_statement.snap new file mode 100644 index 0000000000..c49d971b0a --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__delete_statement.snap @@ -0,0 +1,25 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"DELETE FROM a WHERE b = \\\"c\\\"\")" +--- +- "pre_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Name(Identifier(\"a\"))] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })) })" +- "pre_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Name(Identifier(\"a\"))] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })) }" +- "pre_visit_delete_from: OneOrMore { contents: [Name(Identifier(\"a\"))] }" +- "pre_visit_measurement_name: Name(Identifier(\"a\"))" +- "post_visit_measurement_name: Name(Identifier(\"a\"))" +- "post_visit_delete_from: OneOrMore { contents: [Name(Identifier(\"a\"))] }" +- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })" +- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) }" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"b\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"b\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"b\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"b\"), data_type: None })" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"c\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"c\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"c\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"c\"), data_type: None })" +- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) }" +- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })" +- "post_visit_delete_statement: FromWhere { from: OneOrMore { contents: [Name(Identifier(\"a\"))] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })) }" +- "post_visit_statement: Delete(FromWhere { from: OneOrMore { contents: [Name(Identifier(\"a\"))] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"b\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"c\"), data_type: None }) })) })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__drop_measurement_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__drop_measurement_statement.snap new file mode 100644 index 0000000000..9ae2ef6f40 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__drop_measurement_statement.snap @@ -0,0 +1,9 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"DROP MEASUREMENT cpu\")" +--- +- "pre_visit_statement: DropMeasurement(DropMeasurementStatement { name: Identifier(\"cpu\") })" +- "pre_visit_drop_measurement_statement: DropMeasurementStatement { name: Identifier(\"cpu\") }" +- "post_visit_drop_measurement_statement: DropMeasurementStatement { name: Identifier(\"cpu\") }" +- "post_visit_statement: DropMeasurement(DropMeasurementStatement { name: Identifier(\"cpu\") })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__explain_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__explain_statement.snap new file mode 100644 index 0000000000..4c55ec0817 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__explain_statement.snap @@ -0,0 +1,25 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"EXPLAIN SELECT * FROM cpu\")" +--- +- "pre_visit_statement: Explain(ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } })" +- "pre_visit_explain_statement: ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } }" +- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }" +- "pre_visit_select_field: Field { expr: Wildcard(None), alias: None }" +- "pre_visit_expr: Wildcard(None)" +- "post_visit_expr: Wildcard(None)" +- "post_visit_select_field: Field { expr: Wildcard(None), alias: None }" +- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }" +- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }" +- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })" +- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }" +- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "post_visit_explain_statement: ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } }" +- "post_visit_statement: Explain(ExplainStatement { options: None, select: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None } })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-2.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-2.snap new file mode 100644 index 0000000000..81c240850e --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-2.snap @@ -0,0 +1,23 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(r#\"SELECT DISTINCT value FROM temp\"#)" +--- +- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" +- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }" +- "pre_visit_select_field: Field { expr: Distinct(Identifier(\"value\")), alias: None }" +- "pre_visit_expr: Distinct(Identifier(\"value\"))" +- "post_visit_expr: Distinct(Identifier(\"value\"))" +- "post_visit_select_field: Field { expr: Distinct(Identifier(\"value\")), alias: None }" +- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }" +- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }" +- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"temp\"))" +- "post_visit_measurement_name: Name(Identifier(\"temp\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }" +- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })" +- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }" +- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Distinct(Identifier(\"value\")), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-3.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-3.snap new file mode 100644 index 0000000000..d75ead33e6 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-3.snap @@ -0,0 +1,25 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(r#\"SELECT COUNT(value) FROM temp\"#)" +--- +- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" +- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }" +- "pre_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }" +- "pre_visit_expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }" +- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" +- "post_visit_expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }" +- "post_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }" +- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }" +- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }" +- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"temp\"))" +- "post_visit_measurement_name: Name(Identifier(\"temp\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }" +- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })" +- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }" +- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [VarRef { name: Identifier(\"value\"), data_type: None }] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-4.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-4.snap new file mode 100644 index 0000000000..c64f0963ec --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-4.snap @@ -0,0 +1,25 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(r#\"SELECT COUNT(DISTINCT value) FROM temp\"#)" +--- +- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" +- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }" +- "pre_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }" +- "pre_visit_expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }" +- "pre_visit_expr: Distinct(Identifier(\"value\"))" +- "post_visit_expr: Distinct(Identifier(\"value\"))" +- "post_visit_expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }" +- "post_visit_select_field: Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }" +- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }" +- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }" +- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"temp\"))" +- "post_visit_measurement_name: Name(Identifier(\"temp\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }" +- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })" +- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }" +- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Call { name: \"COUNT\", args: [Distinct(Identifier(\"value\"))] }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-5.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-5.snap new file mode 100644 index 0000000000..9e471c7171 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-5.snap @@ -0,0 +1,29 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(r#\"SELECT * FROM /cpu/, memory\"#)" +--- +- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" +- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }" +- "pre_visit_select_field: Field { expr: Wildcard(None), alias: None }" +- "pre_visit_expr: Wildcard(None)" +- "post_visit_expr: Wildcard(None)" +- "post_visit_select_field: Field { expr: Wildcard(None), alias: None }" +- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }" +- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }" +- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }" +- "pre_visit_measurement_name: Regex(Regex(\"cpu\"))" +- "post_visit_measurement_name: Regex(Regex(\"cpu\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }" +- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) })" +- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"memory\"))" +- "post_visit_measurement_name: Name(Identifier(\"memory\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) }" +- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })" +- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }" +- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: Wildcard(None), alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }), Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"memory\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-6.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-6.snap new file mode 100644 index 0000000000..0fa2ad88e0 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-6.snap @@ -0,0 +1,93 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE host = \"node1\")\n WHERE region =~ /west/ AND value > 5\n GROUP BY TIME(5m), host\n FILL(previous)\n ORDER BY TIME DESC\n LIMIT 1 OFFSET 2\n SLIMIT 3 SOFFSET 4\n TZ('Australia/Hobart')\n \"#)" +--- +- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })" +- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }" +- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }" +- "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }" +- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" +- "post_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }" +- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }" +- "pre_visit_select_from_clause: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }" +- "pre_visit_select_measurement_selection: Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" +- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }" +- "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }" +- "pre_visit_expr: VarRef { name: Identifier(\"usage\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"usage\"), data_type: None }" +- "post_visit_select_field: Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }" +- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }" +- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }" +- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })" +- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }" +- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })" +- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) }" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"node1\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"node1\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"node1\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"node1\"), data_type: None })" +- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) }" +- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })" +- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "post_visit_select_measurement_selection: Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" +- "post_visit_select_from_clause: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }" +- "pre_visit_where_clause: WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })" +- "pre_visit_conditional_expression: Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } }" +- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"region\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"region\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"region\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"region\"), data_type: None })" +- "pre_visit_conditional_expression: Expr(Literal(Regex(Regex(\"west\"))))" +- "pre_visit_expr: Literal(Regex(Regex(\"west\")))" +- "post_visit_expr: Literal(Regex(Regex(\"west\")))" +- "post_visit_conditional_expression: Expr(Literal(Regex(Regex(\"west\"))))" +- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }" +- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) }" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"value\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"value\"), data_type: None })" +- "pre_visit_conditional_expression: Expr(Literal(Unsigned(5)))" +- "pre_visit_expr: Literal(Unsigned(5))" +- "post_visit_expr: Literal(Unsigned(5))" +- "post_visit_conditional_expression: Expr(Literal(Unsigned(5)))" +- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) }" +- "post_visit_conditional_expression: Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } }" +- "post_visit_where_clause: WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })" +- "pre_visit_group_by_clause: OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }" +- "pre_visit_select_dimension: Time { interval: Literal(Duration(Duration(300000000000))), offset: None }" +- "pre_visit_expr: Literal(Duration(Duration(300000000000)))" +- "post_visit_expr: Literal(Duration(Duration(300000000000)))" +- "post_visit_select_dimension: Time { interval: Literal(Duration(Duration(300000000000))), offset: None }" +- "pre_visit_select_dimension: Tag(Identifier(\"host\"))" +- "post_visit_select_dimension: Tag(Identifier(\"host\"))" +- "post_visit_group_by_clause: OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }" +- "pre_visit_fill_clause: Previous" +- "post_visit_fill_clause: Previous" +- "pre_visit_order_by_clause: Descending" +- "post_visit_order_by_clause: Descending" +- "pre_visit_limit_clause: LimitClause(1)" +- "post_visit_limit_clause: LimitClause(1)" +- "pre_visit_offset_clause: OffsetClause(2)" +- "post_visit_offset_clause: OffsetClause(2)" +- "pre_visit_slimit_clause: SLimitClause(3)" +- "post_visit_slimit_clause: SLimitClause(3)" +- "pre_visit_soffset_clause: SOffsetClause(4)" +- "post_visit_soffset_clause: SOffsetClause(4)" +- "pre_visit_timezone_clause: TimeZoneClause(\"Australia/Hobart\")" +- "post_visit_timezone_clause: TimeZoneClause(\"Australia/Hobart\")" +- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }" +- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Subquery(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(OneOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement.snap new file mode 100644 index 0000000000..561de5db42 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement.snap @@ -0,0 +1,23 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(r#\"SELECT value FROM temp\"#)" +--- +- "pre_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" +- "pre_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "pre_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }" +- "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }" +- "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" +- "post_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }" +- "post_visit_select_field_list: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }" +- "pre_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }" +- "pre_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"temp\"))" +- "post_visit_measurement_name: Name(Identifier(\"temp\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) }" +- "post_visit_select_measurement_selection: Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })" +- "post_visit_select_from_clause: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }" +- "post_visit_select_statement: SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None }" +- "post_visit_statement: Select(SelectStatement { fields: OneOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: OneOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"temp\")) })] }, condition: None, group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_databases_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_databases_statement.snap new file mode 100644 index 0000000000..268d0efd98 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_databases_statement.snap @@ -0,0 +1,9 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW DATABASES\")" +--- +- "pre_visit_statement: ShowDatabases(ShowDatabasesStatement)" +- "pre_visit_show_databases_statement: ShowDatabasesStatement" +- "post_visit_show_databases_statement: ShowDatabasesStatement" +- "post_visit_statement: ShowDatabases(ShowDatabasesStatement)" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_field_keys_statement-2.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_field_keys_statement-2.snap new file mode 100644 index 0000000000..c2986d698a --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_field_keys_statement-2.snap @@ -0,0 +1,11 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW FIELD KEYS ON telegraf\")" +--- +- "pre_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: None, limit: None, offset: None })" +- "pre_visit_show_field_keys_statement: ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: None, limit: None, offset: None }" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "post_visit_show_field_keys_statement: ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: None, limit: None, offset: None }" +- "post_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_field_keys_statement-3.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_field_keys_statement-3.snap new file mode 100644 index 0000000000..06c4b3e334 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_field_keys_statement-3.snap @@ -0,0 +1,15 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW FIELD KEYS FROM cpu\")" +--- +- "pre_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: None, from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), limit: None, offset: None })" +- "pre_visit_show_field_keys_statement: ShowFieldKeysStatement { database: None, from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), limit: None, offset: None }" +- "pre_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "post_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }" +- "post_visit_show_field_keys_statement: ShowFieldKeysStatement { database: None, from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), limit: None, offset: None }" +- "post_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: None, from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_field_keys_statement-4.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_field_keys_statement-4.snap new file mode 100644 index 0000000000..37e68b2ecb --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_field_keys_statement-4.snap @@ -0,0 +1,17 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW FIELD KEYS ON telegraf FROM /cpu/\")" +--- +- "pre_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }), limit: None, offset: None })" +- "pre_visit_show_field_keys_statement: ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }), limit: None, offset: None }" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "pre_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }" +- "pre_visit_measurement_name: Regex(Regex(\"cpu\"))" +- "post_visit_measurement_name: Regex(Regex(\"cpu\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }" +- "post_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }" +- "post_visit_show_field_keys_statement: ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }), limit: None, offset: None }" +- "post_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"cpu\")) }] }), limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_field_keys_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_field_keys_statement.snap new file mode 100644 index 0000000000..a928d262fc --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_field_keys_statement.snap @@ -0,0 +1,9 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW FIELD KEYS\")" +--- +- "pre_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: None, from: None, limit: None, offset: None })" +- "pre_visit_show_field_keys_statement: ShowFieldKeysStatement { database: None, from: None, limit: None, offset: None }" +- "post_visit_show_field_keys_statement: ShowFieldKeysStatement { database: None, from: None, limit: None, offset: None }" +- "post_visit_statement: ShowFieldKeys(ShowFieldKeysStatement { database: None, from: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-2.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-2.snap new file mode 100644 index 0000000000..07ae5aa155 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-2.snap @@ -0,0 +1,11 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW MEASUREMENTS ON db.rp\")" +--- +- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: Some(DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))), with_measurement: None, condition: None, limit: None, offset: None })" +- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: Some(DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))), with_measurement: None, condition: None, limit: None, offset: None }" +- "pre_visit_extended_on_clause: DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))" +- "post_visit_extended_on_clause: DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))" +- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: Some(DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))), with_measurement: None, condition: None, limit: None, offset: None }" +- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: Some(DatabaseRetentionPolicy(Identifier(\"db\"), Identifier(\"rp\"))), with_measurement: None, condition: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-3.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-3.snap new file mode 100644 index 0000000000..6bba991287 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-3.snap @@ -0,0 +1,15 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW MEASUREMENTS WITH MEASUREMENT = \\\"cpu\\\"\")" +--- +- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: Some(Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })), condition: None, limit: None, offset: None })" +- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: Some(Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })), condition: None, limit: None, offset: None }" +- "pre_visit_with_measurement_clause: Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "post_visit_with_measurement_clause: Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })" +- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: Some(Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })), condition: None, limit: None, offset: None }" +- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: Some(Equals(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })), condition: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-4.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-4.snap new file mode 100644 index 0000000000..10414c4c93 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-4.snap @@ -0,0 +1,21 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW MEASUREMENTS WHERE host = 'west'\")" +--- +- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: None, offset: None })" +- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: None, offset: None }" +- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })" +- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) }" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_conditional_expression: Expr(Literal(String(\"west\")))" +- "pre_visit_expr: Literal(String(\"west\"))" +- "post_visit_expr: Literal(String(\"west\"))" +- "post_visit_conditional_expression: Expr(Literal(String(\"west\")))" +- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) }" +- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })" +- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: None, offset: None }" +- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-5.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-5.snap new file mode 100644 index 0000000000..d187802250 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-5.snap @@ -0,0 +1,11 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW MEASUREMENTS LIMIT 5\")" +--- +- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: Some(LimitClause(5)), offset: None })" +- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: Some(LimitClause(5)), offset: None }" +- "pre_visit_limit_clause: LimitClause(5)" +- "post_visit_limit_clause: LimitClause(5)" +- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: Some(LimitClause(5)), offset: None }" +- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: Some(LimitClause(5)), offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-6.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-6.snap new file mode 100644 index 0000000000..bd41e93d52 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-6.snap @@ -0,0 +1,11 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW MEASUREMENTS OFFSET 10\")" +--- +- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: Some(OffsetClause(10)) })" +- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: Some(OffsetClause(10)) }" +- "pre_visit_offset_clause: OffsetClause(10)" +- "post_visit_offset_clause: OffsetClause(10)" +- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: Some(OffsetClause(10)) }" +- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: Some(OffsetClause(10)) })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-7.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-7.snap new file mode 100644 index 0000000000..79a7c70148 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement-7.snap @@ -0,0 +1,33 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW MEASUREMENTS ON * WITH MEASUREMENT =~ /foo/ WHERE host = 'west' LIMIT 10 OFFSET 20\")" +--- +- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: Some(AllDatabases), with_measurement: Some(Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: Some(LimitClause(10)), offset: Some(OffsetClause(20)) })" +- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: Some(AllDatabases), with_measurement: Some(Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: Some(LimitClause(10)), offset: Some(OffsetClause(20)) }" +- "pre_visit_extended_on_clause: AllDatabases" +- "post_visit_extended_on_clause: AllDatabases" +- "pre_visit_with_measurement_clause: Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) }" +- "pre_visit_measurement_name: Regex(Regex(\"foo\"))" +- "post_visit_measurement_name: Regex(Regex(\"foo\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) }" +- "post_visit_with_measurement_clause: Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })" +- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })" +- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) }" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_conditional_expression: Expr(Literal(String(\"west\")))" +- "pre_visit_expr: Literal(String(\"west\"))" +- "post_visit_expr: Literal(String(\"west\"))" +- "post_visit_conditional_expression: Expr(Literal(String(\"west\")))" +- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) }" +- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })" +- "pre_visit_limit_clause: LimitClause(10)" +- "post_visit_limit_clause: LimitClause(10)" +- "pre_visit_offset_clause: OffsetClause(20)" +- "post_visit_offset_clause: OffsetClause(20)" +- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: Some(AllDatabases), with_measurement: Some(Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: Some(LimitClause(10)), offset: Some(OffsetClause(20)) }" +- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: Some(AllDatabases), with_measurement: Some(Regex(QualifiedMeasurementName { database: None, retention_policy: None, name: Regex(Regex(\"foo\")) })), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(Literal(String(\"west\"))) })), limit: Some(LimitClause(10)), offset: Some(OffsetClause(20)) })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement.snap new file mode 100644 index 0000000000..4ae2dbf8eb --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_measurements_statement.snap @@ -0,0 +1,9 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW MEASUREMENTS\")" +--- +- "pre_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: None })" +- "pre_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: None }" +- "post_visit_show_measurements_statement: ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: None }" +- "post_visit_statement: ShowMeasurements(ShowMeasurementsStatement { on: None, with_measurement: None, condition: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_retention_policies_statement-2.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_retention_policies_statement-2.snap new file mode 100644 index 0000000000..4ca3219b46 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_retention_policies_statement-2.snap @@ -0,0 +1,11 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW RETENTION POLICIES ON telegraf\")" +--- +- "pre_visit_statement: ShowRetentionPolicies(ShowRetentionPoliciesStatement { database: Some(OnClause(Identifier(\"telegraf\"))) })" +- "pre_visit_show_retention_policies_statement: ShowRetentionPoliciesStatement { database: Some(OnClause(Identifier(\"telegraf\"))) }" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "post_visit_show_retention_policies_statement: ShowRetentionPoliciesStatement { database: Some(OnClause(Identifier(\"telegraf\"))) }" +- "post_visit_statement: ShowRetentionPolicies(ShowRetentionPoliciesStatement { database: Some(OnClause(Identifier(\"telegraf\"))) })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_retention_policies_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_retention_policies_statement.snap new file mode 100644 index 0000000000..d6b3cf9028 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_retention_policies_statement.snap @@ -0,0 +1,9 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW RETENTION POLICIES\")" +--- +- "pre_visit_statement: ShowRetentionPolicies(ShowRetentionPoliciesStatement { database: None })" +- "pre_visit_show_retention_policies_statement: ShowRetentionPoliciesStatement { database: None }" +- "post_visit_show_retention_policies_statement: ShowRetentionPoliciesStatement { database: None }" +- "post_visit_statement: ShowRetentionPolicies(ShowRetentionPoliciesStatement { database: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_keys_statement-2.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_keys_statement-2.snap new file mode 100644 index 0000000000..ef9096dbc0 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_keys_statement-2.snap @@ -0,0 +1,33 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW TAG KEYS ON telegraf FROM cpu WHERE host = \\\"west\\\" LIMIT 5 OFFSET 10\")" +--- +- "pre_visit_statement: ShowTagKeys(ShowTagKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) })" +- "pre_visit_show_tag_keys_statement: ShowTagKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) }" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "pre_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "post_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }" +- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })" +- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) }" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"west\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"west\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"west\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"west\"), data_type: None })" +- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) }" +- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })" +- "pre_visit_limit_clause: LimitClause(5)" +- "post_visit_limit_clause: LimitClause(5)" +- "pre_visit_offset_clause: OffsetClause(10)" +- "post_visit_offset_clause: OffsetClause(10)" +- "post_visit_show_tag_keys_statement: ShowTagKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) }" +- "post_visit_statement: ShowTagKeys(ShowTagKeysStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_keys_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_keys_statement.snap new file mode 100644 index 0000000000..52182ef54a --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_keys_statement.snap @@ -0,0 +1,9 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW TAG KEYS\")" +--- +- "pre_visit_statement: ShowTagKeys(ShowTagKeysStatement { database: None, from: None, condition: None, limit: None, offset: None })" +- "pre_visit_show_tag_keys_statement: ShowTagKeysStatement { database: None, from: None, condition: None, limit: None, offset: None }" +- "post_visit_show_tag_keys_statement: ShowTagKeysStatement { database: None, from: None, condition: None, limit: None, offset: None }" +- "post_visit_statement: ShowTagKeys(ShowTagKeysStatement { database: None, from: None, condition: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_values_statement-2.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_values_statement-2.snap new file mode 100644 index 0000000000..93529ee774 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_values_statement-2.snap @@ -0,0 +1,11 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW TAG VALUES WITH KEY =~ /host|region/\")" +--- +- "pre_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: EqRegex(Regex(\"host|region\")), condition: None, limit: None, offset: None })" +- "pre_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: EqRegex(Regex(\"host|region\")), condition: None, limit: None, offset: None }" +- "pre_visit_with_key_clause: EqRegex(Regex(\"host|region\"))" +- "post_visit_with_key_clause: EqRegex(Regex(\"host|region\"))" +- "post_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: EqRegex(Regex(\"host|region\")), condition: None, limit: None, offset: None }" +- "post_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: EqRegex(Regex(\"host|region\")), condition: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_values_statement-3.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_values_statement-3.snap new file mode 100644 index 0000000000..8e5b0e20db --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_values_statement-3.snap @@ -0,0 +1,11 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW TAG VALUES WITH KEY IN (host, region)\")" +--- +- "pre_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] }), condition: None, limit: None, offset: None })" +- "pre_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] }), condition: None, limit: None, offset: None }" +- "pre_visit_with_key_clause: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] })" +- "post_visit_with_key_clause: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] })" +- "post_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] }), condition: None, limit: None, offset: None }" +- "post_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: In(OneOrMore { contents: [Identifier(\"host\"), Identifier(\"region\")] }), condition: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_values_statement-4.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_values_statement-4.snap new file mode 100644 index 0000000000..2f270f47d3 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_values_statement-4.snap @@ -0,0 +1,35 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW TAG VALUES ON telegraf FROM cpu WITH KEY = host WHERE host = \\\"west\\\" LIMIT 5 OFFSET 10\")" +--- +- "pre_visit_statement: ShowTagValues(ShowTagValuesStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), with_key: Eq(Identifier(\"host\")), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) })" +- "pre_visit_show_tag_values_statement: ShowTagValuesStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), with_key: Eq(Identifier(\"host\")), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) }" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "pre_visit_on_clause: OnClause(Identifier(\"telegraf\"))" +- "pre_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }" +- "pre_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "pre_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_measurement_name: Name(Identifier(\"cpu\"))" +- "post_visit_qualified_measurement_name: QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }" +- "post_visit_show_from_clause: OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }" +- "pre_visit_with_key_clause: Eq(Identifier(\"host\"))" +- "post_visit_with_key_clause: Eq(Identifier(\"host\"))" +- "pre_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })" +- "pre_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) }" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"host\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"host\"), data_type: None })" +- "pre_visit_conditional_expression: Expr(VarRef { name: Identifier(\"west\"), data_type: None })" +- "pre_visit_expr: VarRef { name: Identifier(\"west\"), data_type: None }" +- "post_visit_expr: VarRef { name: Identifier(\"west\"), data_type: None }" +- "post_visit_conditional_expression: Expr(VarRef { name: Identifier(\"west\"), data_type: None })" +- "post_visit_conditional_expression: Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) }" +- "post_visit_where_clause: WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })" +- "pre_visit_limit_clause: LimitClause(5)" +- "post_visit_limit_clause: LimitClause(5)" +- "pre_visit_offset_clause: OffsetClause(10)" +- "post_visit_offset_clause: OffsetClause(10)" +- "post_visit_show_tag_values_statement: ShowTagValuesStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), with_key: Eq(Identifier(\"host\")), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) }" +- "post_visit_statement: ShowTagValues(ShowTagValuesStatement { database: Some(OnClause(Identifier(\"telegraf\"))), from: Some(OneOrMore { contents: [QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) }] }), with_key: Eq(Identifier(\"host\")), condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"west\"), data_type: None }) })), limit: Some(LimitClause(5)), offset: Some(OffsetClause(10)) })" + diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_values_statement.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_values_statement.snap new file mode 100644 index 0000000000..2167fc4ee8 --- /dev/null +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__show_tag_values_statement.snap @@ -0,0 +1,11 @@ +--- +source: influxdb_influxql_parser/src/visit_mut.rs +expression: "visit_statement!(\"SHOW TAG VALUES WITH KEY = host\")" +--- +- "pre_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: Eq(Identifier(\"host\")), condition: None, limit: None, offset: None })" +- "pre_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: Eq(Identifier(\"host\")), condition: None, limit: None, offset: None }" +- "pre_visit_with_key_clause: Eq(Identifier(\"host\"))" +- "post_visit_with_key_clause: Eq(Identifier(\"host\"))" +- "post_visit_show_tag_values_statement: ShowTagValuesStatement { database: None, from: None, with_key: Eq(Identifier(\"host\")), condition: None, limit: None, offset: None }" +- "post_visit_statement: ShowTagValues(ShowTagValuesStatement { database: None, from: None, with_key: Eq(Identifier(\"host\")), condition: None, limit: None, offset: None })" + diff --git a/influxdb_influxql_parser/src/visit.rs b/influxdb_influxql_parser/src/visit.rs index 6870a47eed..2e44984bae 100644 --- a/influxdb_influxql_parser/src/visit.rs +++ b/influxdb_influxql_parser/src/visit.rs @@ -21,6 +21,7 @@ //! let vis = MyVisitor; //! statement.accept(vis); //! ``` +use self::Recursion::*; use crate::common::{ LimitClause, MeasurementName, OffsetClause, OrderByClause, QualifiedMeasurementName, WhereClause, @@ -45,7 +46,6 @@ use crate::show_tag_keys::ShowTagKeysStatement; use crate::show_tag_values::{ShowTagValuesStatement, WithKeyClause}; use crate::simple_from_clause::{DeleteFromClause, ShowFromClause}; use crate::statement::Statement; -use crate::visit::Recursion::*; /// The result type for a [`Visitor`]. pub type VisitorResult<T, E = &'static str> = Result<T, E>; @@ -1168,6 +1168,8 @@ impl Visitable for OnClause { #[cfg(test)] mod test { + use super::Recursion::Continue; + use super::{Recursion, Visitable, Visitor, VisitorResult}; use crate::common::{ LimitClause, MeasurementName, OffsetClause, OrderByClause, QualifiedMeasurementName, WhereClause, @@ -1191,8 +1193,6 @@ mod test { use crate::show_tag_values::{ShowTagValuesStatement, WithKeyClause}; use crate::simple_from_clause::{DeleteFromClause, ShowFromClause}; use crate::statement::{statement, Statement}; - use crate::visit::Recursion::Continue; - use crate::visit::{Recursion, Visitable, Visitor, VisitorResult}; use std::fmt::Debug; struct TestVisitor(Vec<String>); diff --git a/influxdb_influxql_parser/src/visit_mut.rs b/influxdb_influxql_parser/src/visit_mut.rs new file mode 100644 index 0000000000..728a182c25 --- /dev/null +++ b/influxdb_influxql_parser/src/visit_mut.rs @@ -0,0 +1,1725 @@ +//! The visit module provides API for walking the AST. +//! +//! # Example +//! +//! ``` +//! use influxdb_influxql_parser::visit_mut::{VisitableMut, VisitorMut, VisitorResult}; +//! use influxdb_influxql_parser::parse_statements; +//! use influxdb_influxql_parser::common::WhereClause; +//! +//! struct MyVisitor; +//! +//! impl VisitorMut for MyVisitor { +//! fn post_visit_where_clause(&mut self, n: &mut WhereClause) -> VisitorResult<()> { +//! println!("{}", n); +//! Ok(()) +//! } +//! } +//! +//! let statements = parse_statements("SELECT value FROM cpu WHERE host = 'west'").unwrap(); +//! let mut statement = statements.first().unwrap().clone(); +//! let mut vis = MyVisitor; +//! statement.accept(&mut vis).unwrap(); +//! ``` +use self::Recursion::*; +use crate::common::{ + LimitClause, MeasurementName, OffsetClause, OrderByClause, QualifiedMeasurementName, + WhereClause, +}; +use crate::create::CreateDatabaseStatement; +use crate::delete::DeleteStatement; +use crate::drop::DropMeasurementStatement; +use crate::explain::ExplainStatement; +use crate::expression::arithmetic::Expr; +use crate::expression::conditional::ConditionalExpression; +use crate::select::{ + Dimension, Field, FieldList, FillClause, FromMeasurementClause, GroupByClause, + MeasurementSelection, SLimitClause, SOffsetClause, SelectStatement, TimeZoneClause, +}; +use crate::show::{OnClause, ShowDatabasesStatement}; +use crate::show_field_keys::ShowFieldKeysStatement; +use crate::show_measurements::{ + ExtendedOnClause, ShowMeasurementsStatement, WithMeasurementClause, +}; +use crate::show_retention_policies::ShowRetentionPoliciesStatement; +use crate::show_tag_keys::ShowTagKeysStatement; +use crate::show_tag_values::{ShowTagValuesStatement, WithKeyClause}; +use crate::simple_from_clause::{DeleteFromClause, ShowFromClause}; +use crate::statement::Statement; + +/// The result type for a [`VisitorMut`]. +pub type VisitorResult<T, E = &'static str> = Result<T, E>; + +/// Controls how the visitor recursion should proceed. +#[derive(Clone, Copy)] +pub enum Recursion { + /// Attempt to visit all the children, recursively, of this expression. + Continue, + /// Do not visit the children of this expression, though the walk + /// of parents of this expression will not be affected + Stop, +} + +/// Encode the depth-first traversal of an InfluxQL statement. When passed to +/// any [`VisitableMut::accept`], `pre_visit` functions are invoked repeatedly +/// until a leaf node is reached or a `pre_visit` function returns [`Recursion::Stop`]. +pub trait VisitorMut: Sized { + /// Invoked before any children of the InfluxQL statement are visited. + fn pre_visit_statement(&mut self, _n: &mut Statement) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the InfluxQL statement are visited. + fn post_visit_statement(&mut self, _n: &mut Statement) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of `n` are visited. + fn pre_visit_create_database_statement( + &mut self, + _n: &mut CreateDatabaseStatement, + ) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of `n` are visited. Default + /// implementation does nothing. + fn post_visit_create_database_statement( + &mut self, + _n: &mut CreateDatabaseStatement, + ) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `DELETE` statement are visited. + fn pre_visit_delete_statement(&mut self, _n: &mut DeleteStatement) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `DELETE` statement are visited. + fn post_visit_delete_statement(&mut self, _n: &mut DeleteStatement) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `FROM` clause of a `DELETE` statement are visited. + fn pre_visit_delete_from_clause( + &mut self, + _n: &mut DeleteFromClause, + ) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `FROM` clause of a `DELETE` statement are visited. + fn post_visit_delete_from_clause(&mut self, _n: &mut DeleteFromClause) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the measurement name are visited. + fn pre_visit_measurement_name(&mut self, _n: &mut MeasurementName) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the measurement name are visited. + fn post_visit_measurement_name(&mut self, _n: &mut MeasurementName) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `DROP MEASUREMENT` statement are visited. + fn pre_visit_drop_measurement_statement( + &mut self, + _n: &mut DropMeasurementStatement, + ) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `DROP MEASUREMENT` statement are visited. + fn post_visit_drop_measurement_statement( + &mut self, + _n: &mut DropMeasurementStatement, + ) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `EXPLAIN` statement are visited. + fn pre_visit_explain_statement( + &mut self, + _n: &mut ExplainStatement, + ) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `EXPLAIN` statement are visited. + fn post_visit_explain_statement(&mut self, _n: &mut ExplainStatement) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `SELECT` statement are visited. + fn pre_visit_select_statement(&mut self, _n: &mut SelectStatement) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `SELECT` statement are visited. + fn post_visit_select_statement(&mut self, _n: &mut SelectStatement) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `SHOW DATABASES` statement are visited. + fn pre_visit_show_databases_statement( + &mut self, + _n: &mut ShowDatabasesStatement, + ) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `SHOW DATABASES` statement are visited. + fn post_visit_show_databases_statement( + &mut self, + _n: &mut ShowDatabasesStatement, + ) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `SHOW MEASUREMENTS` statement are visited. + fn pre_visit_show_measurements_statement( + &mut self, + _n: &mut ShowMeasurementsStatement, + ) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `SHOW MEASUREMENTS` statement are visited. + fn post_visit_show_measurements_statement( + &mut self, + _n: &mut ShowMeasurementsStatement, + ) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `SHOW RETENTION POLICIES` statement are visited. + fn pre_visit_show_retention_policies_statement( + &mut self, + _n: &mut ShowRetentionPoliciesStatement, + ) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `SHOW RETENTION POLICIES` statement are visited. + fn post_visit_show_retention_policies_statement( + &mut self, + _n: &mut ShowRetentionPoliciesStatement, + ) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `SHOW TAG KEYS` statement are visited. + fn pre_visit_show_tag_keys_statement( + &mut self, + _n: &mut ShowTagKeysStatement, + ) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `SHOW TAG KEYS` statement are visited. + fn post_visit_show_tag_keys_statement( + &mut self, + _n: &mut ShowTagKeysStatement, + ) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `SHOW TAG VALUES` statement are visited. + fn pre_visit_show_tag_values_statement( + &mut self, + _n: &mut ShowTagValuesStatement, + ) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `SHOW TAG VALUES` statement are visited. + fn post_visit_show_tag_values_statement( + &mut self, + _n: &mut ShowTagValuesStatement, + ) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `SHOW FIELD KEYS` statement are visited. + fn pre_visit_show_field_keys_statement( + &mut self, + _n: &mut ShowFieldKeysStatement, + ) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `SHOW FIELD KEYS` statement are visited. + fn post_visit_show_field_keys_statement( + &mut self, + _n: &mut ShowFieldKeysStatement, + ) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the conditional expression are visited. + fn pre_visit_conditional_expression( + &mut self, + _n: &mut ConditionalExpression, + ) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the conditional expression are visited. + fn post_visit_conditional_expression( + &mut self, + _n: &mut ConditionalExpression, + ) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the arithmetic expression are visited. + fn pre_visit_expr(&mut self, _n: &mut Expr) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the arithmetic expression are visited. + fn post_visit_expr(&mut self, _n: &mut Expr) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any fields of the `SELECT` projection are visited. + fn pre_visit_select_field_list(&mut self, _n: &mut FieldList) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all fields of the `SELECT` projection are visited. + fn post_visit_select_field_list(&mut self, _n: &mut FieldList) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the field of a `SELECT` statement are visited. + fn pre_visit_select_field(&mut self, _n: &mut Field) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the field of a `SELECT` statement are visited. + fn post_visit_select_field(&mut self, _n: &mut Field) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `FROM` clause of a `SELECT` statement are visited. + fn pre_visit_select_from_clause( + &mut self, + _n: &mut FromMeasurementClause, + ) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `FROM` clause of a `SELECT` statement are visited. + fn post_visit_select_from_clause( + &mut self, + _n: &mut FromMeasurementClause, + ) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the measurement selection of a `FROM` clause for a `SELECT` statement are visited. + fn pre_visit_select_measurement_selection( + &mut self, + _n: &mut MeasurementSelection, + ) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the measurement selection of a `FROM` clause for a `SELECT` statement are visited. + fn post_visit_select_measurement_selection( + &mut self, + _n: &mut MeasurementSelection, + ) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `GROUP BY` clause are visited. + fn pre_visit_group_by_clause(&mut self, _n: &mut GroupByClause) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `GROUP BY` clause are visited. + fn post_visit_group_by_clause(&mut self, _n: &mut GroupByClause) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `GROUP BY` dimension expression are visited. + fn pre_visit_select_dimension(&mut self, _n: &mut Dimension) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `GROUP BY` dimension expression are visited. + fn post_visit_select_dimension(&mut self, _n: &mut Dimension) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `WHERE` clause are visited. + fn pre_visit_where_clause(&mut self, _n: &mut WhereClause) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `WHERE` clause are visited. + fn post_visit_where_clause(&mut self, _n: &mut WhereClause) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `FROM` clause for any `SHOW` statement are visited. + fn pre_visit_show_from_clause(&mut self, _n: &mut ShowFromClause) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `FROM` clause for any `SHOW` statement are visited. + fn post_visit_show_from_clause(&mut self, _n: &mut ShowFromClause) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the qualified measurement name are visited. + fn pre_visit_qualified_measurement_name( + &mut self, + _n: &mut QualifiedMeasurementName, + ) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the qualified measurement name are visited. + fn post_visit_qualified_measurement_name( + &mut self, + _n: &mut QualifiedMeasurementName, + ) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `FILL` clause are visited. + fn pre_visit_fill_clause(&mut self, _n: &mut FillClause) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `FILL` clause are visited. + fn post_visit_fill_clause(&mut self, _n: &mut FillClause) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `ORDER BY` clause are visited. + fn pre_visit_order_by_clause(&mut self, _n: &mut OrderByClause) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `ORDER BY` clause are visited. + fn post_visit_order_by_clause(&mut self, _n: &mut OrderByClause) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `LIMIT` clause are visited. + fn pre_visit_limit_clause(&mut self, _n: &mut LimitClause) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `LIMIT` clause are visited. + fn post_visit_limit_clause(&mut self, _n: &mut LimitClause) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `OFFSET` clause are visited. + fn pre_visit_offset_clause(&mut self, _n: &mut OffsetClause) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `OFFSET` clause are visited. + fn post_visit_offset_clause(&mut self, _n: &mut OffsetClause) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `SLIMIT` clause are visited. + fn pre_visit_slimit_clause(&mut self, _n: &mut SLimitClause) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `SLIMIT` clause are visited. + fn post_visit_slimit_clause(&mut self, _n: &mut SLimitClause) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of the `SOFFSET` clause are visited. + fn pre_visit_soffset_clause(&mut self, _n: &mut SOffsetClause) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of the `SOFFSET` clause are visited. + fn post_visit_soffset_clause(&mut self, _n: &mut SOffsetClause) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of a `TZ` clause are visited. + fn pre_visit_timezone_clause(&mut self, _n: &mut TimeZoneClause) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of a `TZ` clause are visited. + fn post_visit_timezone_clause(&mut self, _n: &mut TimeZoneClause) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of an extended `ON` clause are visited. + fn pre_visit_extended_on_clause( + &mut self, + _n: &mut ExtendedOnClause, + ) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of an extended `ON` clause are visited. + fn post_visit_extended_on_clause(&mut self, _n: &mut ExtendedOnClause) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of an `ON` clause are visited. + fn pre_visit_on_clause(&mut self, _n: &mut OnClause) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of an `ON` clause are visited. + fn post_visit_on_clause(&mut self, _n: &mut OnClause) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of a `WITH MEASUREMENT` clause are visited. + fn pre_visit_with_measurement_clause( + &mut self, + _n: &mut WithMeasurementClause, + ) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of a `WITH MEASUREMENT` clause are visited. + fn post_visit_with_measurement_clause( + &mut self, + _n: &mut WithMeasurementClause, + ) -> VisitorResult<()> { + Ok(()) + } + + /// Invoked before any children of a `WITH KEY` clause are visited. + fn pre_visit_with_key_clause(&mut self, _n: &mut WithKeyClause) -> VisitorResult<Recursion> { + Ok(Continue) + } + + /// Invoked after all children of a `WITH KEY` clause are visited. + fn post_visit_with_key_clause(&mut self, _n: &mut WithKeyClause) -> VisitorResult<()> { + Ok(()) + } +} + +/// Trait for types that can be visited by [`VisitorMut`] +pub trait VisitableMut: Sized { + /// accept a visitor, calling `visit` on all children of this + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()>; +} + +impl VisitableMut for Statement { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_statement(self)? { + return Ok(()); + }; + + match self { + Self::CreateDatabase(s) => s.accept(visitor), + Self::Delete(s) => s.accept(visitor), + Self::DropMeasurement(s) => s.accept(visitor), + Self::Explain(s) => s.accept(visitor), + Self::Select(s) => s.accept(visitor), + Self::ShowDatabases(s) => s.accept(visitor), + Self::ShowMeasurements(s) => s.accept(visitor), + Self::ShowRetentionPolicies(s) => s.accept(visitor), + Self::ShowTagKeys(s) => s.accept(visitor), + Self::ShowTagValues(s) => s.accept(visitor), + Self::ShowFieldKeys(s) => s.accept(visitor), + }?; + + visitor.post_visit_statement(self) + } +} + +impl VisitableMut for CreateDatabaseStatement { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_create_database_statement(self)? { + return Ok(()); + }; + + visitor.post_visit_create_database_statement(self) + } +} + +impl VisitableMut for DeleteStatement { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_delete_statement(self)? { + return Ok(()); + }; + + match self { + Self::FromWhere { from, condition } => { + from.accept(visitor)?; + + if let Some(condition) = condition { + condition.accept(visitor)?; + } + } + Self::Where(condition) => condition.accept(visitor)?, + }; + + visitor.post_visit_delete_statement(self) + } +} + +impl VisitableMut for WhereClause { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_where_clause(self)? { + return Ok(()); + }; + + self.0.accept(visitor)?; + + visitor.post_visit_where_clause(self) + } +} + +impl VisitableMut for DeleteFromClause { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_delete_from_clause(self)? { + return Ok(()); + }; + + self.contents + .iter_mut() + .try_for_each(|n| n.accept(visitor))?; + + visitor.post_visit_delete_from_clause(self) + } +} + +impl VisitableMut for MeasurementName { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_measurement_name(self)? { + return Ok(()); + }; + + visitor.post_visit_measurement_name(self) + } +} + +impl VisitableMut for DropMeasurementStatement { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_drop_measurement_statement(self)? { + return Ok(()); + }; + + visitor.post_visit_drop_measurement_statement(self) + } +} + +impl VisitableMut for ExplainStatement { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_explain_statement(self)? { + return Ok(()); + }; + + self.select.accept(visitor)?; + + visitor.post_visit_explain_statement(self) + } +} + +impl VisitableMut for SelectStatement { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_select_statement(self)? { + return Ok(()); + }; + + self.fields.accept(visitor)?; + + self.from.accept(visitor)?; + + if let Some(condition) = &mut self.condition { + condition.accept(visitor)?; + } + + if let Some(group_by) = &mut self.group_by { + group_by.accept(visitor)?; + } + + if let Some(fill_clause) = &mut self.fill { + fill_clause.accept(visitor)?; + } + + if let Some(order_by) = &mut self.order_by { + order_by.accept(visitor)?; + } + + if let Some(limit) = &mut self.limit { + limit.accept(visitor)?; + } + + if let Some(offset) = &mut self.offset { + offset.accept(visitor)?; + } + + if let Some(limit) = &mut self.series_limit { + limit.accept(visitor)?; + } + + if let Some(offset) = &mut self.series_offset { + offset.accept(visitor)?; + } + + if let Some(tz_clause) = &mut self.timezone { + tz_clause.accept(visitor)?; + } + + visitor.post_visit_select_statement(self) + } +} + +impl VisitableMut for TimeZoneClause { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_timezone_clause(self)? { + return Ok(()); + }; + + visitor.post_visit_timezone_clause(self) + } +} + +impl VisitableMut for LimitClause { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_limit_clause(self)? { + return Ok(()); + }; + + visitor.post_visit_limit_clause(self) + } +} + +impl VisitableMut for OffsetClause { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_offset_clause(self)? { + return Ok(()); + }; + + visitor.post_visit_offset_clause(self) + } +} + +impl VisitableMut for SLimitClause { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_slimit_clause(self)? { + return Ok(()); + }; + + visitor.post_visit_slimit_clause(self) + } +} + +impl VisitableMut for SOffsetClause { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_soffset_clause(self)? { + return Ok(()); + }; + + visitor.post_visit_soffset_clause(self) + } +} + +impl VisitableMut for FillClause { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_fill_clause(self)? { + return Ok(()); + }; + + visitor.post_visit_fill_clause(self) + } +} + +impl VisitableMut for OrderByClause { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_order_by_clause(self)? { + return Ok(()); + }; + + visitor.post_visit_order_by_clause(self) + } +} + +impl VisitableMut for GroupByClause { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_group_by_clause(self)? { + return Ok(()); + }; + + self.contents + .iter_mut() + .try_for_each(|d| d.accept(visitor))?; + + visitor.post_visit_group_by_clause(self) + } +} + +impl VisitableMut for ShowMeasurementsStatement { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_show_measurements_statement(self)? { + return Ok(()); + }; + + if let Some(on_clause) = &mut self.on { + on_clause.accept(visitor)?; + } + + if let Some(with_clause) = &mut self.with_measurement { + with_clause.accept(visitor)?; + } + + if let Some(condition) = &mut self.condition { + condition.accept(visitor)?; + } + + if let Some(limit) = &mut self.limit { + limit.accept(visitor)?; + } + + if let Some(offset) = &mut self.offset { + offset.accept(visitor)?; + } + + visitor.post_visit_show_measurements_statement(self) + } +} + +impl VisitableMut for ExtendedOnClause { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_extended_on_clause(self)? { + return Ok(()); + }; + + visitor.post_visit_extended_on_clause(self) + } +} + +impl VisitableMut for WithMeasurementClause { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_with_measurement_clause(self)? { + return Ok(()); + }; + + match self { + Self::Equals(n) => n.accept(visitor), + Self::Regex(n) => n.accept(visitor), + }?; + + visitor.post_visit_with_measurement_clause(self) + } +} + +impl VisitableMut for ShowRetentionPoliciesStatement { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_show_retention_policies_statement(self)? { + return Ok(()); + }; + + if let Some(on_clause) = &mut self.database { + on_clause.accept(visitor)?; + } + + visitor.post_visit_show_retention_policies_statement(self) + } +} + +impl VisitableMut for ShowFromClause { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_show_from_clause(self)? { + return Ok(()); + }; + + self.contents + .iter_mut() + .try_for_each(|f| f.accept(visitor))?; + + visitor.post_visit_show_from_clause(self) + } +} + +impl VisitableMut for QualifiedMeasurementName { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_qualified_measurement_name(self)? { + return Ok(()); + }; + + self.name.accept(visitor)?; + + visitor.post_visit_qualified_measurement_name(self) + } +} + +impl VisitableMut for ShowTagKeysStatement { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_show_tag_keys_statement(self)? { + return Ok(()); + }; + + if let Some(on_clause) = &mut self.database { + on_clause.accept(visitor)?; + } + + if let Some(from) = &mut self.from { + from.accept(visitor)?; + } + + if let Some(condition) = &mut self.condition { + condition.accept(visitor)?; + } + + if let Some(limit) = &mut self.limit { + limit.accept(visitor)?; + } + + if let Some(offset) = &mut self.offset { + offset.accept(visitor)?; + } + + visitor.post_visit_show_tag_keys_statement(self) + } +} + +impl VisitableMut for ShowTagValuesStatement { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_show_tag_values_statement(self)? { + return Ok(()); + }; + + if let Some(on_clause) = &mut self.database { + on_clause.accept(visitor)?; + } + + if let Some(from) = &mut self.from { + from.accept(visitor)?; + } + + self.with_key.accept(visitor)?; + + if let Some(condition) = &mut self.condition { + condition.accept(visitor)?; + } + + if let Some(limit) = &mut self.limit { + limit.accept(visitor)?; + } + + if let Some(offset) = &mut self.offset { + offset.accept(visitor)?; + } + + visitor.post_visit_show_tag_values_statement(self) + } +} + +impl VisitableMut for ShowFieldKeysStatement { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_show_field_keys_statement(self)? { + return Ok(()); + }; + + if let Some(on_clause) = &mut self.database { + on_clause.accept(visitor)?; + } + + if let Some(from) = &mut self.from { + from.accept(visitor)?; + } + + if let Some(limit) = &mut self.limit { + limit.accept(visitor)?; + } + + if let Some(offset) = &mut self.offset { + offset.accept(visitor)?; + } + + visitor.post_visit_show_field_keys_statement(self) + } +} + +impl VisitableMut for FieldList { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_select_field_list(self)? { + return Ok(()); + }; + + self.contents + .iter_mut() + .try_for_each(|f| f.accept(visitor))?; + + visitor.post_visit_select_field_list(self) + } +} + +impl VisitableMut for Field { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_select_field(self)? { + return Ok(()); + }; + + self.expr.accept(visitor)?; + + visitor.post_visit_select_field(self) + } +} + +impl VisitableMut for FromMeasurementClause { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_select_from_clause(self)? { + return Ok(()); + }; + + self.contents + .iter_mut() + .try_for_each(|f| f.accept(visitor))?; + + visitor.post_visit_select_from_clause(self) + } +} + +impl VisitableMut for MeasurementSelection { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_select_measurement_selection(self)? { + return Ok(()); + }; + + match self { + Self::Name(name) => name.accept(visitor), + Self::Subquery(select) => select.accept(visitor), + }?; + + visitor.post_visit_select_measurement_selection(self) + } +} + +impl VisitableMut for Dimension { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_select_dimension(self)? { + return Ok(()); + }; + + match self { + Self::Time { interval, offset } => { + interval.accept(visitor)?; + if let Some(offset) = offset { + offset.accept(visitor)?; + } + } + Self::Tag(_) | Self::Regex(_) | Self::Wildcard => {} + }; + + visitor.post_visit_select_dimension(self) + } +} + +impl VisitableMut for WithKeyClause { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_with_key_clause(self)? { + return Ok(()); + }; + + visitor.post_visit_with_key_clause(self) + } +} + +impl VisitableMut for ShowDatabasesStatement { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_show_databases_statement(self)? { + return Ok(()); + }; + visitor.post_visit_show_databases_statement(self) + } +} + +impl VisitableMut for ConditionalExpression { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_conditional_expression(self)? { + return Ok(()); + }; + + match self { + Self::Expr(expr) => expr.accept(visitor), + Self::Binary { lhs, rhs, .. } => { + lhs.accept(visitor)?; + rhs.accept(visitor) + } + Self::Grouped(expr) => expr.accept(visitor), + }?; + + visitor.post_visit_conditional_expression(self) + } +} + +impl VisitableMut for Expr { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_expr(self)? { + return Ok(()); + }; + + match self { + Self::UnaryOp(_, expr) => expr.accept(visitor)?, + Self::Call { args, .. } => args.iter_mut().try_for_each(|e| e.accept(visitor))?, + Self::Binary { lhs, op: _, rhs } => { + lhs.accept(visitor)?; + rhs.accept(visitor)?; + } + Self::Nested(expr) => expr.accept(visitor)?, + + // We explicitly list out each enumeration, to ensure + // we revisit if new items are added to the Expr enumeration. + Self::VarRef { .. } + | Self::BindParameter(_) + | Self::Literal(_) + | Self::Wildcard(_) + | Self::Distinct(_) => {} + }; + + visitor.post_visit_expr(self) + } +} + +impl VisitableMut for OnClause { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + if let Stop = visitor.pre_visit_on_clause(self)? { + return Ok(()); + }; + + visitor.post_visit_on_clause(self) + } +} + +#[cfg(test)] +mod test { + use super::Recursion::Continue; + use super::{Recursion, VisitableMut, VisitorMut, VisitorResult}; + use crate::common::{ + LimitClause, MeasurementName, OffsetClause, OrderByClause, QualifiedMeasurementName, + WhereClause, + }; + use crate::delete::DeleteStatement; + use crate::drop::DropMeasurementStatement; + use crate::explain::ExplainStatement; + use crate::expression::arithmetic::Expr; + use crate::expression::conditional::ConditionalExpression; + use crate::parse_statements; + use crate::select::{ + Dimension, Field, FieldList, FillClause, FromMeasurementClause, GroupByClause, + MeasurementSelection, SLimitClause, SOffsetClause, SelectStatement, TimeZoneClause, + }; + use crate::show::{OnClause, ShowDatabasesStatement}; + use crate::show_field_keys::ShowFieldKeysStatement; + use crate::show_measurements::{ + ExtendedOnClause, ShowMeasurementsStatement, WithMeasurementClause, + }; + use crate::show_retention_policies::ShowRetentionPoliciesStatement; + use crate::show_tag_keys::ShowTagKeysStatement; + use crate::show_tag_values::{ShowTagValuesStatement, WithKeyClause}; + use crate::simple_from_clause::{DeleteFromClause, ShowFromClause}; + use crate::statement::{statement, Statement}; + use std::fmt::Debug; + + struct TestVisitor(Vec<String>); + + impl TestVisitor { + fn new() -> Self { + Self(Vec::new()) + } + + fn push_pre(&mut self, name: &str, n: impl Debug) { + self.0.push(format!("pre_visit_{}: {:?}", name, n)); + } + + fn push_post(&mut self, name: &str, n: impl Debug) { + self.0.push(format!("post_visit_{}: {:?}", name, n)); + } + } + + impl VisitorMut for TestVisitor { + fn pre_visit_statement(&mut self, n: &mut Statement) -> VisitorResult<Recursion> { + self.push_pre("statement", n); + Ok(Continue) + } + + fn post_visit_statement(&mut self, n: &mut Statement) -> VisitorResult<()> { + self.push_post("statement", n); + Ok(()) + } + + fn pre_visit_delete_statement( + &mut self, + n: &mut DeleteStatement, + ) -> VisitorResult<Recursion> { + self.push_pre("delete_statement", n); + Ok(Continue) + } + + fn post_visit_delete_statement(&mut self, n: &mut DeleteStatement) -> VisitorResult<()> { + self.push_post("delete_statement", n); + Ok(()) + } + + fn pre_visit_delete_from_clause( + &mut self, + n: &mut DeleteFromClause, + ) -> VisitorResult<Recursion> { + self.push_pre("delete_from", n); + Ok(Continue) + } + + fn post_visit_delete_from_clause(&mut self, n: &mut DeleteFromClause) -> VisitorResult<()> { + self.push_post("delete_from", n); + Ok(()) + } + + fn pre_visit_measurement_name( + &mut self, + n: &mut MeasurementName, + ) -> VisitorResult<Recursion> { + self.push_pre("measurement_name", n); + Ok(Continue) + } + + fn post_visit_measurement_name(&mut self, n: &mut MeasurementName) -> VisitorResult<()> { + self.push_post("measurement_name", n); + Ok(()) + } + + fn pre_visit_drop_measurement_statement( + &mut self, + n: &mut DropMeasurementStatement, + ) -> VisitorResult<Recursion> { + self.push_pre("drop_measurement_statement", n); + Ok(Continue) + } + + fn post_visit_drop_measurement_statement( + &mut self, + n: &mut DropMeasurementStatement, + ) -> VisitorResult<()> { + self.push_post("drop_measurement_statement", n); + Ok(()) + } + + fn pre_visit_explain_statement( + &mut self, + n: &mut ExplainStatement, + ) -> VisitorResult<Recursion> { + self.push_pre("explain_statement", n); + Ok(Continue) + } + + fn post_visit_explain_statement(&mut self, n: &mut ExplainStatement) -> VisitorResult<()> { + self.push_post("explain_statement", n); + Ok(()) + } + + fn pre_visit_select_statement( + &mut self, + n: &mut SelectStatement, + ) -> VisitorResult<Recursion> { + self.push_pre("select_statement", n); + Ok(Continue) + } + + fn post_visit_select_statement(&mut self, n: &mut SelectStatement) -> VisitorResult<()> { + self.push_post("select_statement", n); + Ok(()) + } + + fn pre_visit_show_databases_statement( + &mut self, + n: &mut ShowDatabasesStatement, + ) -> VisitorResult<Recursion> { + self.push_pre("show_databases_statement", n); + Ok(Continue) + } + + fn post_visit_show_databases_statement( + &mut self, + n: &mut ShowDatabasesStatement, + ) -> VisitorResult<()> { + self.push_post("show_databases_statement", n); + Ok(()) + } + + fn pre_visit_show_measurements_statement( + &mut self, + n: &mut ShowMeasurementsStatement, + ) -> VisitorResult<Recursion> { + self.push_pre("show_measurements_statement", n); + Ok(Continue) + } + + fn post_visit_show_measurements_statement( + &mut self, + n: &mut ShowMeasurementsStatement, + ) -> VisitorResult<()> { + self.push_post("show_measurements_statement", n); + Ok(()) + } + + fn pre_visit_show_retention_policies_statement( + &mut self, + n: &mut ShowRetentionPoliciesStatement, + ) -> VisitorResult<Recursion> { + self.push_pre("show_retention_policies_statement", n); + Ok(Continue) + } + + fn post_visit_show_retention_policies_statement( + &mut self, + n: &mut ShowRetentionPoliciesStatement, + ) -> VisitorResult<()> { + self.push_post("show_retention_policies_statement", n); + Ok(()) + } + + fn pre_visit_show_tag_keys_statement( + &mut self, + n: &mut ShowTagKeysStatement, + ) -> VisitorResult<Recursion> { + self.push_pre("show_tag_keys_statement", n); + Ok(Continue) + } + + fn post_visit_show_tag_keys_statement( + &mut self, + n: &mut ShowTagKeysStatement, + ) -> VisitorResult<()> { + self.push_post("show_tag_keys_statement", n); + Ok(()) + } + + fn pre_visit_show_tag_values_statement( + &mut self, + n: &mut ShowTagValuesStatement, + ) -> VisitorResult<Recursion> { + self.push_pre("show_tag_values_statement", n); + Ok(Continue) + } + + fn post_visit_show_tag_values_statement( + &mut self, + n: &mut ShowTagValuesStatement, + ) -> VisitorResult<()> { + self.push_post("show_tag_values_statement", n); + Ok(()) + } + + fn pre_visit_show_field_keys_statement( + &mut self, + n: &mut ShowFieldKeysStatement, + ) -> VisitorResult<Recursion> { + self.push_pre("show_field_keys_statement", n); + Ok(Continue) + } + + fn post_visit_show_field_keys_statement( + &mut self, + n: &mut ShowFieldKeysStatement, + ) -> VisitorResult<()> { + self.push_post("show_field_keys_statement", n); + Ok(()) + } + + fn pre_visit_conditional_expression( + &mut self, + n: &mut ConditionalExpression, + ) -> VisitorResult<Recursion> { + self.push_pre("conditional_expression", n); + Ok(Continue) + } + + fn post_visit_conditional_expression( + &mut self, + n: &mut ConditionalExpression, + ) -> VisitorResult<()> { + self.push_post("conditional_expression", n); + Ok(()) + } + + fn pre_visit_expr(&mut self, n: &mut Expr) -> VisitorResult<Recursion> { + self.push_pre("expr", n); + Ok(Continue) + } + + fn post_visit_expr(&mut self, n: &mut Expr) -> VisitorResult<()> { + self.push_post("expr", n); + Ok(()) + } + + fn pre_visit_select_field_list(&mut self, n: &mut FieldList) -> VisitorResult<Recursion> { + self.push_pre("select_field_list", n); + Ok(Continue) + } + + fn post_visit_select_field_list(&mut self, n: &mut FieldList) -> VisitorResult<()> { + self.push_post("select_field_list", n); + Ok(()) + } + + fn pre_visit_select_field(&mut self, n: &mut Field) -> VisitorResult<Recursion> { + self.push_pre("select_field", n); + Ok(Continue) + } + + fn post_visit_select_field(&mut self, n: &mut Field) -> VisitorResult<()> { + self.push_post("select_field", n); + Ok(()) + } + + fn pre_visit_select_from_clause( + &mut self, + n: &mut FromMeasurementClause, + ) -> VisitorResult<Recursion> { + self.push_pre("select_from_clause", n); + Ok(Continue) + } + + fn post_visit_select_from_clause( + &mut self, + n: &mut FromMeasurementClause, + ) -> VisitorResult<()> { + self.push_post("select_from_clause", n); + Ok(()) + } + + fn pre_visit_select_measurement_selection( + &mut self, + n: &mut MeasurementSelection, + ) -> VisitorResult<Recursion> { + self.push_pre("select_measurement_selection", n); + Ok(Continue) + } + + fn post_visit_select_measurement_selection( + &mut self, + n: &mut MeasurementSelection, + ) -> VisitorResult<()> { + self.push_post("select_measurement_selection", n); + Ok(()) + } + + fn pre_visit_group_by_clause(&mut self, n: &mut GroupByClause) -> VisitorResult<Recursion> { + self.push_pre("group_by_clause", n); + Ok(Continue) + } + + fn post_visit_group_by_clause(&mut self, n: &mut GroupByClause) -> VisitorResult<()> { + self.push_post("group_by_clause", n); + Ok(()) + } + + fn pre_visit_select_dimension(&mut self, n: &mut Dimension) -> VisitorResult<Recursion> { + self.push_pre("select_dimension", n); + Ok(Continue) + } + + fn post_visit_select_dimension(&mut self, n: &mut Dimension) -> VisitorResult<()> { + self.push_post("select_dimension", n); + Ok(()) + } + + fn pre_visit_where_clause(&mut self, n: &mut WhereClause) -> VisitorResult<Recursion> { + self.push_pre("where_clause", n); + Ok(Continue) + } + + fn post_visit_where_clause(&mut self, n: &mut WhereClause) -> VisitorResult<()> { + self.push_post("where_clause", n); + Ok(()) + } + + fn pre_visit_show_from_clause( + &mut self, + n: &mut ShowFromClause, + ) -> VisitorResult<Recursion> { + self.push_pre("show_from_clause", n); + Ok(Continue) + } + + fn post_visit_show_from_clause(&mut self, n: &mut ShowFromClause) -> VisitorResult<()> { + self.push_post("show_from_clause", n); + Ok(()) + } + + fn pre_visit_qualified_measurement_name( + &mut self, + n: &mut QualifiedMeasurementName, + ) -> VisitorResult<Recursion> { + self.push_pre("qualified_measurement_name", n); + Ok(Continue) + } + + fn post_visit_qualified_measurement_name( + &mut self, + n: &mut QualifiedMeasurementName, + ) -> VisitorResult<()> { + self.push_post("qualified_measurement_name", n); + Ok(()) + } + + fn pre_visit_fill_clause(&mut self, n: &mut FillClause) -> VisitorResult<Recursion> { + self.push_pre("fill_clause", n); + Ok(Continue) + } + + fn post_visit_fill_clause(&mut self, n: &mut FillClause) -> VisitorResult<()> { + self.push_post("fill_clause", n); + Ok(()) + } + + fn pre_visit_order_by_clause(&mut self, n: &mut OrderByClause) -> VisitorResult<Recursion> { + self.push_pre("order_by_clause", n); + Ok(Continue) + } + + fn post_visit_order_by_clause(&mut self, n: &mut OrderByClause) -> VisitorResult<()> { + self.push_post("order_by_clause", n); + Ok(()) + } + + fn pre_visit_limit_clause(&mut self, n: &mut LimitClause) -> VisitorResult<Recursion> { + self.push_pre("limit_clause", n); + Ok(Continue) + } + + fn post_visit_limit_clause(&mut self, n: &mut LimitClause) -> VisitorResult<()> { + self.push_post("limit_clause", n); + Ok(()) + } + + fn pre_visit_offset_clause(&mut self, n: &mut OffsetClause) -> VisitorResult<Recursion> { + self.push_pre("offset_clause", n); + Ok(Continue) + } + + fn post_visit_offset_clause(&mut self, n: &mut OffsetClause) -> VisitorResult<()> { + self.push_post("offset_clause", n); + Ok(()) + } + + fn pre_visit_slimit_clause(&mut self, n: &mut SLimitClause) -> VisitorResult<Recursion> { + self.push_pre("slimit_clause", n); + Ok(Continue) + } + + fn post_visit_slimit_clause(&mut self, n: &mut SLimitClause) -> VisitorResult<()> { + self.push_post("slimit_clause", n); + Ok(()) + } + + fn pre_visit_soffset_clause(&mut self, n: &mut SOffsetClause) -> VisitorResult<Recursion> { + self.push_pre("soffset_clause", n); + Ok(Continue) + } + + fn post_visit_soffset_clause(&mut self, n: &mut SOffsetClause) -> VisitorResult<()> { + self.push_post("soffset_clause", n); + Ok(()) + } + + fn pre_visit_timezone_clause( + &mut self, + n: &mut TimeZoneClause, + ) -> VisitorResult<Recursion> { + self.push_pre("timezone_clause", n); + Ok(Continue) + } + + fn post_visit_timezone_clause(&mut self, n: &mut TimeZoneClause) -> VisitorResult<()> { + self.push_post("timezone_clause", n); + Ok(()) + } + + fn pre_visit_extended_on_clause( + &mut self, + n: &mut ExtendedOnClause, + ) -> VisitorResult<Recursion> { + self.push_pre("extended_on_clause", n); + Ok(Continue) + } + + fn post_visit_extended_on_clause(&mut self, n: &mut ExtendedOnClause) -> VisitorResult<()> { + self.push_post("extended_on_clause", n); + Ok(()) + } + + fn pre_visit_on_clause(&mut self, n: &mut OnClause) -> VisitorResult<Recursion> { + self.push_pre("on_clause", n); + Ok(Continue) + } + + fn post_visit_on_clause(&mut self, n: &mut OnClause) -> VisitorResult<()> { + self.push_pre("on_clause", n); + Ok(()) + } + + fn pre_visit_with_measurement_clause( + &mut self, + n: &mut WithMeasurementClause, + ) -> VisitorResult<Recursion> { + self.push_pre("with_measurement_clause", n); + Ok(Continue) + } + + fn post_visit_with_measurement_clause( + &mut self, + n: &mut WithMeasurementClause, + ) -> VisitorResult<()> { + self.push_post("with_measurement_clause", n); + Ok(()) + } + + fn pre_visit_with_key_clause(&mut self, n: &mut WithKeyClause) -> VisitorResult<Recursion> { + self.push_pre("with_key_clause", n); + Ok(Continue) + } + + fn post_visit_with_key_clause(&mut self, n: &mut WithKeyClause) -> VisitorResult<()> { + self.push_post("with_key_clause", n); + Ok(()) + } + } + + macro_rules! visit_statement { + ($SQL:literal) => {{ + let (_, mut s) = statement($SQL).unwrap(); + let mut vis = TestVisitor::new(); + s.accept(&mut vis).unwrap(); + vis.0 + }}; + } + + #[test] + fn test_delete_statement() { + insta::assert_yaml_snapshot!(visit_statement!("DELETE FROM a WHERE b = \"c\"")); + insta::assert_yaml_snapshot!(visit_statement!("DELETE WHERE 'foo bar' =~ /foo/")); + insta::assert_yaml_snapshot!(visit_statement!("DELETE FROM cpu")); + insta::assert_yaml_snapshot!(visit_statement!("DELETE FROM /^cpu/")); + } + + #[test] + fn test_drop_measurement_statement() { + insta::assert_yaml_snapshot!(visit_statement!("DROP MEASUREMENT cpu")) + } + + #[test] + fn test_explain_statement() { + insta::assert_yaml_snapshot!(visit_statement!("EXPLAIN SELECT * FROM cpu")); + } + + #[test] + fn test_select_statement() { + insta::assert_yaml_snapshot!(visit_statement!(r#"SELECT value FROM temp"#)); + insta::assert_yaml_snapshot!(visit_statement!(r#"SELECT DISTINCT value FROM temp"#)); + insta::assert_yaml_snapshot!(visit_statement!(r#"SELECT COUNT(value) FROM temp"#)); + insta::assert_yaml_snapshot!(visit_statement!( + r#"SELECT COUNT(DISTINCT value) FROM temp"# + )); + insta::assert_yaml_snapshot!(visit_statement!(r#"SELECT * FROM /cpu/, memory"#)); + insta::assert_yaml_snapshot!(visit_statement!( + r#"SELECT value FROM (SELECT usage FROM cpu WHERE host = "node1") + WHERE region =~ /west/ AND value > 5 + GROUP BY TIME(5m), host + FILL(previous) + ORDER BY TIME DESC + LIMIT 1 OFFSET 2 + SLIMIT 3 SOFFSET 4 + TZ('Australia/Hobart') + "# + )); + } + + #[test] + fn test_show_databases_statement() { + insta::assert_yaml_snapshot!(visit_statement!("SHOW DATABASES")); + } + + #[test] + fn test_show_measurements_statement() { + insta::assert_yaml_snapshot!(visit_statement!("SHOW MEASUREMENTS")); + insta::assert_yaml_snapshot!(visit_statement!("SHOW MEASUREMENTS ON db.rp")); + insta::assert_yaml_snapshot!(visit_statement!( + "SHOW MEASUREMENTS WITH MEASUREMENT = \"cpu\"" + )); + insta::assert_yaml_snapshot!(visit_statement!("SHOW MEASUREMENTS WHERE host = 'west'")); + insta::assert_yaml_snapshot!(visit_statement!("SHOW MEASUREMENTS LIMIT 5")); + insta::assert_yaml_snapshot!(visit_statement!("SHOW MEASUREMENTS OFFSET 10")); + + insta::assert_yaml_snapshot!(visit_statement!( + "SHOW MEASUREMENTS ON * WITH MEASUREMENT =~ /foo/ WHERE host = 'west' LIMIT 10 OFFSET 20" + )); + } + + #[test] + fn test_show_retention_policies_statement() { + insta::assert_yaml_snapshot!(visit_statement!("SHOW RETENTION POLICIES")); + insta::assert_yaml_snapshot!(visit_statement!("SHOW RETENTION POLICIES ON telegraf")); + } + + #[test] + fn test_show_tag_keys_statement() { + insta::assert_yaml_snapshot!(visit_statement!("SHOW TAG KEYS")); + insta::assert_yaml_snapshot!(visit_statement!( + "SHOW TAG KEYS ON telegraf FROM cpu WHERE host = \"west\" LIMIT 5 OFFSET 10" + )); + } + + #[test] + fn test_show_tag_values_statement() { + insta::assert_yaml_snapshot!(visit_statement!("SHOW TAG VALUES WITH KEY = host")); + insta::assert_yaml_snapshot!(visit_statement!( + "SHOW TAG VALUES WITH KEY =~ /host|region/" + )); + insta::assert_yaml_snapshot!(visit_statement!( + "SHOW TAG VALUES WITH KEY IN (host, region)" + )); + insta::assert_yaml_snapshot!(visit_statement!("SHOW TAG VALUES ON telegraf FROM cpu WITH KEY = host WHERE host = \"west\" LIMIT 5 OFFSET 10")); + } + + #[test] + fn test_show_field_keys_statement() { + insta::assert_yaml_snapshot!(visit_statement!("SHOW FIELD KEYS")); + insta::assert_yaml_snapshot!(visit_statement!("SHOW FIELD KEYS ON telegraf")); + insta::assert_yaml_snapshot!(visit_statement!("SHOW FIELD KEYS FROM cpu")); + insta::assert_yaml_snapshot!(visit_statement!("SHOW FIELD KEYS ON telegraf FROM /cpu/")); + } + + #[test] + fn test_mutability() { + struct AddLimit; + + impl VisitorMut for AddLimit { + fn pre_visit_select_statement( + &mut self, + n: &mut SelectStatement, + ) -> VisitorResult<Recursion> { + n.limit = Some(LimitClause(10)); + Ok(Continue) + } + } + + let mut statement = parse_statements("SELECT usage FROM cpu") + .unwrap() + .first() + .unwrap() + .clone(); + let mut vis = AddLimit; + statement.accept(&mut vis).unwrap(); + let res = format!("{}", statement); + assert_eq!(res, "SELECT usage FROM cpu LIMIT 10"); + } +}
f1fbcca23c6de9fda130b6e763bbd4266d1171e3
Marco Neumann
2023-08-09 21:16:35
make concurrent index creations idempotent (#8450)
* fix: make concurrent index creations idempotent `CREATE INDEX CONCURRENTLY` may fail and leave an invalid index behind. See: <https://www.postgresql.org/docs/current/sql-createindex.html#SQL-CREATEINDEX-CONCURRENTLY> Invalid indexes will NOT be used for queries, however they is also useless and technically the migration wasn't successful. Since #8407 we detect this situation. #8394 allows us to fix these migrations. #8434 makes sure that we don't mess this up (esp. that the "other checksum" pragmas are correct). For #7897. * fix: multi-line processing in `check_linear_migrations.sh` ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
fix: make concurrent index creations idempotent (#8450) * fix: make concurrent index creations idempotent `CREATE INDEX CONCURRENTLY` may fail and leave an invalid index behind. See: <https://www.postgresql.org/docs/current/sql-createindex.html#SQL-CREATEINDEX-CONCURRENTLY> Invalid indexes will NOT be used for queries, however they is also useless and technically the migration wasn't successful. Since #8407 we detect this situation. #8394 allows us to fix these migrations. #8434 makes sure that we don't mess this up (esp. that the "other checksum" pragmas are correct). For #7897. * fix: multi-line processing in `check_linear_migrations.sh` --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/iox_catalog/check_linear_migrations.sh b/iox_catalog/check_linear_migrations.sh index 4d0a4225ad..3d0a9feb76 100755 --- a/iox_catalog/check_linear_migrations.sh +++ b/iox_catalog/check_linear_migrations.sh @@ -55,7 +55,9 @@ fi # check edited versions files_modified="$(git diff --name-only --diff-filter=M --no-renames "$main_branch" HEAD -- "$path" | sort)" if [[ -n "$files_modified" ]]; then - for f in "$files_modified"; do + readarray -t files_modified <<<"$files_modified" + + for f in "${files_modified[@]}"; do version="$(basename "$f" | sed -E 's:^([0-9]+).*:\1:g')" checksum_old="$(git show "$main_branch:$f" | sha384sum | sed -E 's:^([0-9a-f]+).*:\1:g')" pragma="-- IOX_OTHER_CHECKSUM: $checksum_old" diff --git a/iox_catalog/migrations/20230524151854_add_parquet_file_table_and_deleted_index.sql b/iox_catalog/migrations/20230524151854_add_parquet_file_table_and_deleted_index.sql index a7ad59b090..120ecb8a77 100644 --- a/iox_catalog/migrations/20230524151854_add_parquet_file_table_and_deleted_index.sql +++ b/iox_catalog/migrations/20230524151854_add_parquet_file_table_and_deleted_index.sql @@ -1,4 +1,5 @@ -- Add to help the querier when it searches for undeleted parquet files. +-- IOX_OTHER_CHECKSUM: ddc52db62ed446e4a8fe30af7bced52724bcf93e6e6c6cac8cbd3783bf7312595cfc44dc6aa282db9ac58e4ecfb08268 -- By default we often only have 5min to finish our statements. The `CREATE INDEX CONCURRENTLY` however takes longer. -- In our prod test this took about 15min, but better be safe than sorry. @@ -7,7 +8,13 @@ SET statement_timeout TO '60min'; -- IOX_STEP_BOUNDARY +-- remove potentially invalid index +-- IOX_NO_TRANSACTION +DROP INDEX CONCURRENTLY IF EXISTS parquet_file_table_delete_idx; + +-- IOX_STEP_BOUNDARY + -- While `CONCURRENTLY` means it runs parallel to other writes, this command will only finish after the index was -- successfully built. -- IOX_NO_TRANSACTION -CREATE INDEX CONCURRENTLY IF NOT EXISTS parquet_file_table_delete_idx ON parquet_file (table_id) WHERE to_delete IS NULL; +CREATE INDEX CONCURRENTLY parquet_file_table_delete_idx ON parquet_file (table_id) WHERE to_delete IS NULL; diff --git a/iox_catalog/migrations/20230530132809_add_parquet_file_partition_and_deleted_index.sql b/iox_catalog/migrations/20230530132809_add_parquet_file_partition_and_deleted_index.sql index 50c5627326..fbdcbb82fe 100644 --- a/iox_catalog/migrations/20230530132809_add_parquet_file_partition_and_deleted_index.sql +++ b/iox_catalog/migrations/20230530132809_add_parquet_file_partition_and_deleted_index.sql @@ -1,4 +1,5 @@ -- Add to help the compactor when it searches for undeleted parquet files. +-- IOX_OTHER_CHECKSUM: 4b8295a25aa051620c8fe5fa64b914901a2f4af4d343d2735cde484aa018691e22d274aab84200ba2830fbe51833ab1a -- By default we often only have 5min to finish our statements. The `CREATE INDEX CONCURRENTLY` however takes longer. -- In our prod test this took about 15min, but better be safe than sorry. @@ -7,7 +8,13 @@ SET statement_timeout TO '60min'; -- IOX_STEP_BOUNDARY +-- remove potentially invalid index +-- IOX_NO_TRANSACTION +DROP INDEX CONCURRENTLY IF EXISTS parquet_file_partition_delete_idx; + +-- IOX_STEP_BOUNDARY + -- While `CONCURRENTLY` means it runs parallel to other writes, this command will only finish after the index was -- successfully built. -- IOX_NO_TRANSACTION -CREATE INDEX CONCURRENTLY IF NOT EXISTS parquet_file_partition_delete_idx ON parquet_file (partition_id) WHERE to_delete IS NULL; +CREATE INDEX CONCURRENTLY parquet_file_partition_delete_idx ON parquet_file (partition_id) WHERE to_delete IS NULL; diff --git a/iox_catalog/migrations/20230707195110_add_created_at_index.sql b/iox_catalog/migrations/20230707195110_add_created_at_index.sql index 3445ee01ba..63cfe565e1 100644 --- a/iox_catalog/migrations/20230707195110_add_created_at_index.sql +++ b/iox_catalog/migrations/20230707195110_add_created_at_index.sql @@ -1,4 +1,5 @@ -- Add to help the compactor when it searches for partitions with files created recently. +-- IOX_OTHER_CHECKSUM: 7a353a9a9876a691c6df91af2f2774bb0a43e5f86aa65ca470c16e413ef1741f4a75505ad4e56f1ecb206c2097aad90f -- By default we often only have 5min to finish our statements. The `CREATE INDEX CONCURRENTLY` however takes longer. -- In our prod test this took about 15min, but better be safe than sorry. @@ -7,7 +8,13 @@ SET statement_timeout TO '60min'; -- IOX_STEP_BOUNDARY +-- remove potentially invalid index +-- IOX_NO_TRANSACTION +DROP INDEX CONCURRENTLY IF EXISTS partition_new_file_at_idx; + +-- IOX_STEP_BOUNDARY + -- While `CONCURRENTLY` means it runs parallel to other writes, this command will only finish after the index was -- successfully built. -- IOX_NO_TRANSACTION -CREATE INDEX CONCURRENTLY IF NOT EXISTS partition_new_file_at_idx ON partition (new_file_at); +CREATE INDEX CONCURRENTLY partition_new_file_at_idx ON partition (new_file_at); diff --git a/iox_catalog/migrations/20230710141740_partition_hash_id_index.sql b/iox_catalog/migrations/20230710141740_partition_hash_id_index.sql index 197e7d664a..11944bf898 100644 --- a/iox_catalog/migrations/20230710141740_partition_hash_id_index.sql +++ b/iox_catalog/migrations/20230710141740_partition_hash_id_index.sql @@ -1,11 +1,19 @@ -- By default, we often only have 5min to finish our statements. The `CREATE INDEX CONCURRENTLY`, -- however, can take longer. +-- IOX_OTHER_CHECKSUM: 2ee2416cc206254f5b8a5497a3cfc5bcb2146759416cd4cb6a83ae34d3e0141387eb733e61f224076d5af0e3c6016e7b + -- IOX_NO_TRANSACTION SET statement_timeout TO '60min'; -- IOX_STEP_BOUNDARY +-- remove potentially invalid index +-- IOX_NO_TRANSACTION +DROP INDEX CONCURRENTLY IF EXISTS parquet_file_partition_hash_id_idx; + +-- IOX_STEP_BOUNDARY + -- IOX_NO_TRANSACTION -CREATE INDEX CONCURRENTLY IF NOT EXISTS parquet_file_partition_hash_id_idx +CREATE INDEX CONCURRENTLY parquet_file_partition_hash_id_idx ON parquet_file (partition_hash_id) WHERE partition_hash_id IS NOT NULL;
adb135d47c5a02302a72c8293c512adbe6175b55
Dom Dwyer
2023-05-22 13:58:40
add missing lints to iox_query_influxrpc
This crate was missing some of the common lints we use everywhere else.
null
ci: add missing lints to iox_query_influxrpc This crate was missing some of the common lints we use everywhere else.
diff --git a/iox_query_influxrpc/src/lib.rs b/iox_query_influxrpc/src/lib.rs index 444ce71c99..aafa3a7559 100644 --- a/iox_query_influxrpc/src/lib.rs +++ b/iox_query_influxrpc/src/lib.rs @@ -1,5 +1,18 @@ //! Query frontend for InfluxDB Storage gRPC requests +#![deny(rustdoc::broken_intra_doc_links, rust_2018_idioms)] +#![warn( + clippy::clone_on_ref_ptr, + clippy::dbg_macro, + clippy::explicit_iter_loop, + // See https://github.com/influxdata/influxdb_iox/pull/1671 + clippy::future_not_send, + clippy::todo, + clippy::use_self, + missing_copy_implementations, + missing_debug_implementations, +)] + use arrow::datatypes::DataType; use data_types::ChunkId; use datafusion::{
2521aedb6ac7764ff924bbfcbd3549c81024e7cd
Dom Dwyer
2022-11-10 17:44:34
address namespaces by ID only
Removes reliance on string name identifiers for namespaces in the ingester buffer tree, reducing the memory usage of the namespace index and associated overhead. The namespace name is required (though unused by IOx) in the IoxMetadata embedded within a parquet file, and therefore the name is necessary at persist time. For this reason, a DeferredLoad is used to query the catalog (by ID) for the name, at some uniformly random duration of time after initialisation of the NamespaceData, up to a maximum of 1 minute later. This ensures the query remains off the hot ingest path, and the jitter prevents spikes in catalog load during replay/ingester startup. As an additional / easy optimisation, the persist code causes a pre-fetch of the name in the background while compacting, hiding the query latency should it not have already been resolved. In order to keep the the ingester buffer & catalog decoupled / easily testable, this commit uses a provider/factory trait NamespaceNameProvider and corresponding implementation (NamespaceNameResolver) in a similar fashion to the PartitionResolver, allowing easy mocking for tests, and composition for prod code, allowing future optimisations such as pre-fetching / caching the "hot" namespace names at startup. Internal string identifier removal is a pre-requisite for removing string identifiers from the write wire format (#4880).
null
perf(ingester): address namespaces by ID only Removes reliance on string name identifiers for namespaces in the ingester buffer tree, reducing the memory usage of the namespace index and associated overhead. The namespace name is required (though unused by IOx) in the IoxMetadata embedded within a parquet file, and therefore the name is necessary at persist time. For this reason, a DeferredLoad is used to query the catalog (by ID) for the name, at some uniformly random duration of time after initialisation of the NamespaceData, up to a maximum of 1 minute later. This ensures the query remains off the hot ingest path, and the jitter prevents spikes in catalog load during replay/ingester startup. As an additional / easy optimisation, the persist code causes a pre-fetch of the name in the background while compacting, hiding the query latency should it not have already been resolved. In order to keep the the ingester buffer & catalog decoupled / easily testable, this commit uses a provider/factory trait NamespaceNameProvider and corresponding implementation (NamespaceNameResolver) in a similar fashion to the PartitionResolver, allowing easy mocking for tests, and composition for prod code, allowing future optimisations such as pre-fetching / caching the "hot" namespace names at startup. Internal string identifier removal is a pre-requisite for removing string identifiers from the write wire format (#4880).
diff --git a/ingester/src/data.rs b/ingester/src/data.rs index a04df10cb6..16552e9490 100644 --- a/ingester/src/data.rs +++ b/ingester/src/data.rs @@ -25,6 +25,7 @@ use write_summary::ShardProgress; use crate::{ compact::{compact_persisting_batch, CompactedStream}, + handler::NAMESPACE_NAME_PRE_FETCH, lifecycle::LifecycleHandle, }; @@ -36,7 +37,11 @@ pub(crate) mod table; pub(crate) use sequence_range::*; -use self::{partition::resolver::PartitionProvider, shard::ShardData}; +use self::{ + namespace::name_resolver::{NamespaceNameProvider, NamespaceNameResolver}, + partition::resolver::PartitionProvider, + shard::ShardData, +}; #[cfg(test)] mod triggers; @@ -127,6 +132,13 @@ impl IngesterData { }, ); + let namespace_name_provider: Arc<dyn NamespaceNameProvider> = + Arc::new(NamespaceNameResolver::new( + NAMESPACE_NAME_PRE_FETCH, + Arc::clone(&catalog), + backoff_config.clone(), + )); + let shards = shards .into_iter() .map(|(id, index)| { @@ -135,6 +147,7 @@ impl IngesterData { ShardData::new( index, id, + Arc::clone(&namespace_name_provider), Arc::clone(&partition_provider), Arc::clone(&metrics), ), @@ -256,7 +269,7 @@ impl Persister for IngesterData { let namespace = self .shards .get(&shard_id) - .and_then(|s| s.namespace_by_id(namespace_id)) + .and_then(|s| s.namespace(namespace_id)) .unwrap_or_else(|| panic!("namespace {namespace_id} not in shard {shard_id} state")); // Begin resolving the load-deferred name concurrently if it is not @@ -809,7 +822,7 @@ mod tests { let (table_id, partition_id) = { let sd = data.shards.get(&shard1.id).unwrap(); - let n = sd.namespace(&"foo".into()).unwrap(); + let n = sd.namespace(namespace.id).unwrap(); let mem_table = n.table_data(&"mem".into()).unwrap(); assert!(n.table_data(&"mem".into()).is_some()); let p = mem_table @@ -961,7 +974,7 @@ mod tests { assert_progress(&data, shard_index, expected_progress).await; let sd = data.shards.get(&shard1.id).unwrap(); - let n = sd.namespace(&"foo".into()).unwrap(); + let n = sd.namespace(namespace.id).unwrap(); let partition_id; let table_id; { @@ -1039,7 +1052,7 @@ mod tests { let cached_sort_key = data .shard(shard1.id) .unwrap() - .namespace_by_id(namespace.id) + .namespace(namespace.id) .unwrap() .table_id(table_id) .unwrap() @@ -1219,7 +1232,7 @@ mod tests { // Get the namespace let sd = data.shards.get(&shard1.id).unwrap(); - let n = sd.namespace(&"foo".into()).unwrap(); + let n = sd.namespace(namespace.id).unwrap(); let expected_progress = ShardProgress::new().with_buffered(SequenceNumber::new(1)); assert_progress(&data, shard_index, expected_progress).await; diff --git a/ingester/src/data/namespace.rs b/ingester/src/data/namespace.rs index 0e1744e509..d6e54e9771 100644 --- a/ingester/src/data/namespace.rs +++ b/ingester/src/data/namespace.rs @@ -1,5 +1,7 @@ //! Namespace level data buffer structures. +pub(crate) mod name_resolver; + use std::{collections::HashMap, sync::Arc}; use data_types::{NamespaceId, SequenceNumber, ShardId, TableId}; @@ -355,6 +357,7 @@ mod tests { use crate::{ data::partition::{resolver::MockPartitionProvider, PartitionData, SortKeyState}, + deferred_load, lifecycle::mock_handle::MockLifecycleHandle, test_util::{make_write_op, TEST_TABLE}, }; @@ -396,7 +399,11 @@ mod tests { ); // Assert the namespace name was stored - assert_eq!(ns.namespace_name().to_string(), NAMESPACE_NAME); + let name = ns.namespace_name().to_string(); + assert!( + (name == NAMESPACE_NAME) || (name == deferred_load::UNRESOLVED_DISPLAY_STRING), + "unexpected namespace name: {name}" + ); // Assert the namespace does not contain the test data assert!(ns.table_data(&TABLE_NAME.into()).is_none()); @@ -430,5 +437,10 @@ mod tests { .expect("failed to get observer") .fetch(); assert_eq!(tables, 1); + + // Ensure the deferred namespace name is loaded. + let name = ns.namespace_name().get().await; + assert_eq!(&**name, NAMESPACE_NAME); + assert_eq!(ns.namespace_name().to_string(), NAMESPACE_NAME); } } diff --git a/ingester/src/data/namespace/name_resolver.rs b/ingester/src/data/namespace/name_resolver.rs new file mode 100644 index 0000000000..05728dc3ad --- /dev/null +++ b/ingester/src/data/namespace/name_resolver.rs @@ -0,0 +1,138 @@ +use std::{sync::Arc, time::Duration}; + +use backoff::{Backoff, BackoffConfig}; +use data_types::NamespaceId; +use iox_catalog::interface::Catalog; + +use crate::deferred_load::DeferredLoad; + +use super::NamespaceName; + +pub(crate) trait NamespaceNameProvider: Send + Sync + std::fmt::Debug { + fn for_namespace(&self, id: NamespaceId) -> DeferredLoad<NamespaceName>; +} + +#[derive(Debug)] +pub(crate) struct NamespaceNameResolver { + max_smear: Duration, + catalog: Arc<dyn Catalog>, + backoff_config: BackoffConfig, +} + +impl NamespaceNameResolver { + pub(crate) fn new( + max_smear: Duration, + catalog: Arc<dyn Catalog>, + backoff_config: BackoffConfig, + ) -> Self { + Self { + max_smear, + catalog, + backoff_config, + } + } + + /// Fetch the [`NamespaceName`] from the [`Catalog`] for specified + /// `namespace_id`, retrying endlessly when errors occur. + pub(crate) async fn fetch( + namespace_id: NamespaceId, + catalog: Arc<dyn Catalog>, + backoff_config: BackoffConfig, + ) -> NamespaceName { + Backoff::new(&backoff_config) + .retry_all_errors("fetch partition sort key", || async { + let s = catalog + .repositories() + .await + .namespaces() + .get_by_id(namespace_id) + .await? + .expect("resolving sort key for non-existent partition") + .name + .into(); + + Result::<_, iox_catalog::interface::Error>::Ok(s) + }) + .await + .expect("retry forever") + } +} + +impl NamespaceNameProvider for NamespaceNameResolver { + fn for_namespace(&self, id: NamespaceId) -> DeferredLoad<NamespaceName> { + DeferredLoad::new( + self.max_smear, + Self::fetch(id, Arc::clone(&self.catalog), self.backoff_config.clone()), + ) + } +} + +#[cfg(test)] +pub(crate) mod mock { + use super::*; + + #[derive(Debug)] + pub(crate) struct MockNamespaceNameProvider { + name: NamespaceName, + } + + impl MockNamespaceNameProvider { + pub(crate) fn new(name: impl Into<NamespaceName>) -> Self { + Self { name: name.into() } + } + } + + impl Default for MockNamespaceNameProvider { + fn default() -> Self { + Self::new("bananas") + } + } + + impl NamespaceNameProvider for MockNamespaceNameProvider { + fn for_namespace(&self, _id: NamespaceId) -> DeferredLoad<NamespaceName> { + let name = self.name.clone(); + DeferredLoad::new(Duration::from_secs(1), async { name }) + } + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use data_types::ShardIndex; + use test_helpers::timeout::FutureTimeout; + + use crate::test_util::populate_catalog; + + use super::*; + + const SHARD_INDEX: ShardIndex = ShardIndex::new(24); + const TABLE_NAME: &str = "bananas"; + const NAMESPACE_NAME: &str = "platanos"; + + #[tokio::test] + async fn test_fetch() { + let metrics = Arc::new(metric::Registry::default()); + let backoff_config = BackoffConfig::default(); + let catalog: Arc<dyn Catalog> = + Arc::new(iox_catalog::mem::MemCatalog::new(Arc::clone(&metrics))); + + // Populate the catalog with the shard / namespace / table + let (_shard_id, ns_id, _table_id) = + populate_catalog(&*catalog, SHARD_INDEX, NAMESPACE_NAME, TABLE_NAME).await; + + let fetcher = Arc::new(NamespaceNameResolver::new( + Duration::from_secs(10), + Arc::clone(&catalog), + backoff_config.clone(), + )); + + let got = fetcher + .for_namespace(ns_id) + .get() + .with_timeout_panic(Duration::from_secs(5)) + .await; + assert_eq!(&**got, NAMESPACE_NAME); + } +} diff --git a/ingester/src/data/shard.rs b/ingester/src/data/shard.rs index d69a57728a..02ca70ea04 100644 --- a/ingester/src/data/shard.rs +++ b/ingester/src/data/shard.rs @@ -1,47 +1,18 @@ //! Shard level data buffer structures. -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; use data_types::{NamespaceId, ShardId, ShardIndex}; use dml::DmlOperation; use metric::U64Counter; -use parking_lot::RwLock; use write_summary::ShardProgress; use super::{ - namespace::{NamespaceData, NamespaceName}, + namespace::{name_resolver::NamespaceNameProvider, NamespaceData}, partition::resolver::PartitionProvider, DmlApplyAction, }; -use crate::{arcmap::ArcMap, deferred_load::DeferredLoad, lifecycle::LifecycleHandle}; - -/// A double-referenced map where [`NamespaceData`] can be looked up by name, or -/// ID. -#[derive(Debug, Default)] -struct DoubleRef { - // TODO(4880): this can be removed when IDs are sent over the wire. - by_name: ArcMap<NamespaceName, NamespaceData>, - by_id: ArcMap<NamespaceId, NamespaceData>, -} - -impl DoubleRef { - fn insert(&mut self, name: NamespaceName, ns: NamespaceData) -> Arc<NamespaceData> { - let id = ns.namespace_id(); - - let ns = Arc::new(ns); - self.by_name.insert(name, Arc::clone(&ns)); - self.by_id.insert(id, Arc::clone(&ns)); - ns - } - - fn by_name(&self, name: &NamespaceName) -> Option<Arc<NamespaceData>> { - self.by_name.get(name) - } - - fn by_id(&self, id: NamespaceId) -> Option<Arc<NamespaceData>> { - self.by_id.get(&id) - } -} +use crate::{arcmap::ArcMap, lifecycle::LifecycleHandle}; /// Data of a Shard #[derive(Debug)] @@ -57,8 +28,17 @@ pub(crate) struct ShardData { /// [`PartitionData`]: super::partition::PartitionData partition_provider: Arc<dyn PartitionProvider>, - // New namespaces can come in at any time so we need to be able to add new ones - namespaces: RwLock<DoubleRef>, + /// A set of namespaces this [`ShardData`] instance has processed + /// [`DmlOperation`]'s for. + /// + /// The [`NamespaceNameProvider`] acts as a [`DeferredLoad`] constructor to + /// resolve the [`NamespaceName`] for new [`NamespaceData`] out of the hot + /// path. + /// + /// [`DeferredLoad`]: crate::deferred_load::DeferredLoad + /// [`NamespaceName`]: data_types::NamespaceName + namespaces: ArcMap<NamespaceId, NamespaceData>, + namespace_name_resolver: Arc<dyn NamespaceNameProvider>, metrics: Arc<metric::Registry>, namespace_count: U64Counter, @@ -69,6 +49,7 @@ impl ShardData { pub(crate) fn new( shard_index: ShardIndex, shard_id: ShardId, + namespace_name_resolver: Arc<dyn NamespaceNameProvider>, partition_provider: Arc<dyn PartitionProvider>, metrics: Arc<metric::Registry>, ) -> Self { @@ -83,28 +64,33 @@ impl ShardData { shard_index, shard_id, namespaces: Default::default(), + namespace_name_resolver, metrics, partition_provider, namespace_count, } } - /// Store the write or delete in the shard. Deletes will - /// be written into the catalog before getting stored in the buffer. - /// Any writes that create new IOx partitions will have those records - /// created in the catalog before putting into the buffer. + /// Buffer the provided [`DmlOperation`] into the ingester state. pub(super) async fn buffer_operation( &self, dml_operation: DmlOperation, lifecycle_handle: &dyn LifecycleHandle, ) -> Result<DmlApplyAction, super::Error> { - let namespace_data = match self.namespace(&NamespaceName::from(dml_operation.namespace())) { - Some(d) => d, - None => { - self.insert_namespace(dml_operation.namespace(), dml_operation.namespace_id()) - .await? - } - }; + let namespace_id = dml_operation.namespace_id(); + let namespace_data = self.namespaces.get_or_insert_with(&namespace_id, || { + // Increase the metric that records the number of namespaces + // buffered in this ingester instance. + self.namespace_count.inc(1); + + Arc::new(NamespaceData::new( + namespace_id, + self.namespace_name_resolver.for_namespace(namespace_id), + self.shard_id, + Arc::clone(&self.partition_provider), + &self.metrics, + )) + }); namespace_data .buffer_operation(dml_operation, lifecycle_handle) @@ -112,55 +98,13 @@ impl ShardData { } /// Gets the namespace data out of the map - pub(crate) fn namespace(&self, namespace: &NamespaceName) -> Option<Arc<NamespaceData>> { - let n = self.namespaces.read(); - n.by_name(namespace) - } - - /// Gets the namespace data out of the map - pub(crate) fn namespace_by_id(&self, namespace_id: NamespaceId) -> Option<Arc<NamespaceData>> { - // TODO: this should be the default once IDs are pushed over the wire. - // - // At which point the map should be indexed by IDs, instead of namespace - // names. - let n = self.namespaces.read(); - n.by_id(namespace_id) - } - - /// Retrieves the namespace from the catalog and initializes an empty buffer, or - /// retrieves the buffer if some other caller gets it first - async fn insert_namespace( - &self, - namespace: &str, - namespace_id: NamespaceId, - ) -> Result<Arc<NamespaceData>, super::Error> { - let ns_name = NamespaceName::from(namespace); - - let mut n = self.namespaces.write(); - - Ok(match n.by_name(&ns_name) { - Some(v) => v, - None => { - self.namespace_count.inc(1); - - // Insert the table and then return a ref to it. - n.insert( - ns_name.clone(), - NamespaceData::new( - namespace_id, - DeferredLoad::new(Duration::from_millis(1), async { ns_name }), - self.shard_id, - Arc::clone(&self.partition_provider), - &self.metrics, - ), - ) - } - }) + pub(crate) fn namespace(&self, namespace_id: NamespaceId) -> Option<Arc<NamespaceData>> { + self.namespaces.get(&namespace_id) } /// Return the progress of this shard pub(super) async fn progress(&self) -> ShardProgress { - let namespaces: Vec<_> = self.namespaces.read().by_id.values(); + let namespaces: Vec<_> = self.namespaces.values(); let mut progress = ShardProgress::new(); @@ -184,7 +128,10 @@ mod tests { use metric::{Attributes, Metric}; use crate::{ - data::partition::{resolver::MockPartitionProvider, PartitionData, SortKeyState}, + data::{ + namespace::name_resolver::mock::MockNamespaceNameProvider, + partition::{resolver::MockPartitionProvider, PartitionData, SortKeyState}, + }, lifecycle::mock_handle::MockLifecycleHandle, test_util::{make_write_op, TEST_TABLE}, }; @@ -199,7 +146,7 @@ mod tests { const NAMESPACE_ID: NamespaceId = NamespaceId::new(42); #[tokio::test] - async fn test_shard_double_ref() { + async fn test_shard_init_namespace() { let metrics = Arc::new(metric::Registry::default()); // Configure the mock partition provider to return a partition for this @@ -220,13 +167,13 @@ mod tests { let shard = ShardData::new( SHARD_INDEX, SHARD_ID, + Arc::new(MockNamespaceNameProvider::new(NAMESPACE_NAME)), partition_provider, Arc::clone(&metrics), ); // Assert the namespace does not contain the test data - assert!(shard.namespace(&NAMESPACE_NAME.into()).is_none()); - assert!(shard.namespace_by_id(NAMESPACE_ID).is_none()); + assert!(shard.namespace(NAMESPACE_ID).is_none()); // Write some test data shard @@ -246,8 +193,7 @@ mod tests { .expect("buffer op should succeed"); // Both forms of referencing the table should succeed - assert!(shard.namespace(&NAMESPACE_NAME.into()).is_some()); - assert!(shard.namespace_by_id(NAMESPACE_ID).is_some()); + assert!(shard.namespace(NAMESPACE_ID).is_some()); // And the table counter metric should increase let tables = metrics diff --git a/ingester/src/handler.rs b/ingester/src/handler.rs index 9a99bc1d65..0f824eb069 100644 --- a/ingester/src/handler.rs +++ b/ingester/src/handler.rs @@ -46,7 +46,16 @@ use crate::{ /// /// [`PartitionData`]: crate::data::partition::PartitionData /// [`SortKey`]: schema::sort::SortKey -const SORT_KEY_PRE_FETCH: Duration = Duration::from_secs(30); +pub(crate) const SORT_KEY_PRE_FETCH: Duration = Duration::from_secs(60); + +/// The maximum duration of time between observing an initialising the +/// [`NamespaceData`] in response to observing an operation for a namespace, and +/// fetching the string identifier for it in the background via a +/// [`DeferredLoad`]. +/// +/// [`NamespaceData`]: crate::data::namespace::NamespaceData +/// [`DeferredLoad`]: crate::deferred_load::DeferredLoad +pub(crate) const NAMESPACE_NAME_PRE_FETCH: Duration = Duration::from_secs(60); #[derive(Debug, Snafu)] #[allow(missing_copy_implementations, missing_docs)] diff --git a/ingester/src/querier_handler.rs b/ingester/src/querier_handler.rs index 8b75c502bb..abb607925e 100644 --- a/ingester/src/querier_handler.rs +++ b/ingester/src/querier_handler.rs @@ -263,7 +263,7 @@ pub async fn prepare_data_to_querier( let mut found_namespace = false; for (shard_id, shard_data) in ingest_data.shards() { - let namespace_data = match shard_data.namespace_by_id(request.namespace_id) { + let namespace_data = match shard_data.namespace(request.namespace_id) { Some(namespace_data) => { trace!( shard_id=%shard_id.get(),
7f7f0ca170ab099db78bfcc61de781e4316c5021
Dom Dwyer
2023-01-09 16:25:35
bump tokio 1.22 -> 1.24
Fixes a bug on windows:
https://github.com/tokio-rs/tokio/security/advisories/GHSA-7rrj-xr53-82p7
chore: bump tokio 1.22 -> 1.24 Fixes a bug on windows: https://github.com/tokio-rs/tokio/security/advisories/GHSA-7rrj-xr53-82p7
diff --git a/Cargo.lock b/Cargo.lock index 36c791854f..906c7c1e6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5627,9 +5627,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.22.0" +version = "1.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76ce4a75fb488c605c54bf610f221cea8b0dafb53333c1a67e8ee199dcd2ae3" +checksum = "7125661431c26622a80ca5051a2f936c9a678318e0351007b0cc313143024e5c" dependencies = [ "autocfg", "bytes", @@ -5643,7 +5643,7 @@ dependencies = [ "socket2", "tokio-macros", "tracing", - "winapi", + "windows-sys", ] [[package]] diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 15ffe8003a..9c98801415 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -167,15 +167,15 @@ once_cell = { version = "1", default-features = false, features = ["unstable"] } [target.x86_64-pc-windows-msvc.dependencies] once_cell = { version = "1", default-features = false, features = ["unstable"] } scopeguard = { version = "1", features = ["use_std"] } -tokio = { version = "1", default-features = false, features = ["winapi"] } -winapi = { version = "0.3", default-features = false, features = ["accctrl", "aclapi", "activation", "basetsd", "combaseapi", "consoleapi", "errhandlingapi", "fileapi", "handleapi", "impl-debug", "impl-default", "knownfolders", "minwinbase", "minwindef", "namedpipeapi", "ntsecapi", "ntstatus", "objbase", "processenv", "roapi", "shellapi", "shlobj", "std", "stringapiset", "synchapi", "timezoneapi", "winbase", "wincon", "winerror", "winnt", "winreg", "winstring", "winuser", "ws2ipdef", "ws2tcpip", "wtypesbase"] } +tokio = { version = "1", default-features = false, features = ["windows-sys"] } +winapi = { version = "0.3", default-features = false, features = ["activation", "basetsd", "combaseapi", "consoleapi", "errhandlingapi", "fileapi", "handleapi", "impl-debug", "impl-default", "knownfolders", "minwinbase", "minwindef", "ntsecapi", "ntstatus", "objbase", "processenv", "roapi", "shellapi", "shlobj", "std", "stringapiset", "synchapi", "timezoneapi", "winbase", "wincon", "winerror", "winnt", "winreg", "winstring", "winuser", "ws2ipdef", "ws2tcpip", "wtypesbase"] } windows-sys = { version = "0.42", features = ["Win32", "Win32_Foundation", "Win32_Networking", "Win32_Networking_WinSock", "Win32_Security", "Win32_Storage", "Win32_Storage_FileSystem", "Win32_System", "Win32_System_Console", "Win32_System_IO", "Win32_System_LibraryLoader", "Win32_System_Pipes", "Win32_System_SystemServices", "Win32_System_Threading", "Win32_System_WindowsProgramming", "Win32_UI", "Win32_UI_Input", "Win32_UI_Input_KeyboardAndMouse"] } [target.x86_64-pc-windows-msvc.build-dependencies] once_cell = { version = "1", default-features = false, features = ["unstable"] } scopeguard = { version = "1", features = ["use_std"] } -tokio = { version = "1", default-features = false, features = ["winapi"] } -winapi = { version = "0.3", default-features = false, features = ["accctrl", "aclapi", "activation", "basetsd", "combaseapi", "consoleapi", "errhandlingapi", "fileapi", "handleapi", "impl-debug", "impl-default", "knownfolders", "minwinbase", "minwindef", "namedpipeapi", "ntsecapi", "ntstatus", "objbase", "processenv", "roapi", "shellapi", "shlobj", "std", "stringapiset", "synchapi", "timezoneapi", "winbase", "wincon", "winerror", "winnt", "winreg", "winstring", "winuser", "ws2ipdef", "ws2tcpip", "wtypesbase"] } +tokio = { version = "1", default-features = false, features = ["windows-sys"] } +winapi = { version = "0.3", default-features = false, features = ["activation", "basetsd", "combaseapi", "consoleapi", "errhandlingapi", "fileapi", "handleapi", "impl-debug", "impl-default", "knownfolders", "minwinbase", "minwindef", "ntsecapi", "ntstatus", "objbase", "processenv", "roapi", "shellapi", "shlobj", "std", "stringapiset", "synchapi", "timezoneapi", "winbase", "wincon", "winerror", "winnt", "winreg", "winstring", "winuser", "ws2ipdef", "ws2tcpip", "wtypesbase"] } windows-sys = { version = "0.42", features = ["Win32", "Win32_Foundation", "Win32_Networking", "Win32_Networking_WinSock", "Win32_Security", "Win32_Storage", "Win32_Storage_FileSystem", "Win32_System", "Win32_System_Console", "Win32_System_IO", "Win32_System_LibraryLoader", "Win32_System_Pipes", "Win32_System_SystemServices", "Win32_System_Threading", "Win32_System_WindowsProgramming", "Win32_UI", "Win32_UI_Input", "Win32_UI_Input_KeyboardAndMouse"] } ### END HAKARI SECTION
c9349a685f28a5a09ca933b8c16c4420904568d5
Marco Neumann
2023-06-22 10:30:42
remove pointless handler abstraction (#8044)
If your abstraction has one implementation, it ain't an abstraction.
null
refactor: remove pointless handler abstraction (#8044) If your abstraction has one implementation, it ain't an abstraction.
diff --git a/Cargo.lock b/Cargo.lock index 41d9e49c34..657a7d041b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4365,7 +4365,6 @@ dependencies = [ "service_grpc_schema", "snafu", "test_helpers", - "thiserror", "tokio", "tokio-util", "tonic", diff --git a/ioxd_querier/src/lib.rs b/ioxd_querier/src/lib.rs index 8ca37084e4..d1e8c8e0c2 100644 --- a/ioxd_querier/src/lib.rs +++ b/ioxd_querier/src/lib.rs @@ -32,10 +32,7 @@ use ioxd_common::{ }; use metric::Registry; use object_store::DynObjectStore; -use querier::{ - create_ingester_connections, QuerierCatalogCache, QuerierDatabase, QuerierHandler, - QuerierHandlerImpl, QuerierServer, -}; +use querier::{create_ingester_connections, QuerierCatalogCache, QuerierDatabase, QuerierServer}; use std::{ fmt::{Debug, Display}, sync::Arc, @@ -47,22 +44,22 @@ use trace::TraceCollector; mod rpc; -pub struct QuerierServerType<C: QuerierHandler> { +pub struct QuerierServerType { database: Arc<QuerierDatabase>, - server: QuerierServer<C>, + server: QuerierServer, trace_collector: Option<Arc<dyn TraceCollector>>, authz: Option<Arc<dyn Authorizer>>, } -impl<C: QuerierHandler> std::fmt::Debug for QuerierServerType<C> { +impl std::fmt::Debug for QuerierServerType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Querier") } } -impl<C: QuerierHandler> QuerierServerType<C> { +impl QuerierServerType { pub fn new( - server: QuerierServer<C>, + server: QuerierServer, database: Arc<QuerierDatabase>, common_state: &CommonServerState, authz: Option<Arc<dyn Authorizer>>, @@ -77,7 +74,7 @@ impl<C: QuerierHandler> QuerierServerType<C> { } #[async_trait] -impl<C: QuerierHandler + std::fmt::Debug + 'static> ServerType for QuerierServerType<C> { +impl ServerType for QuerierServerType { /// Human name for this server type fn name(&self) -> &str { "querier" @@ -119,9 +116,9 @@ impl<C: QuerierHandler + std::fmt::Debug + 'static> ServerType for QuerierServer builder, rpc::namespace::namespace_service(Arc::clone(&self.database)) ); - add_service!(builder, self.server.handler().schema_service()); - add_service!(builder, self.server.handler().catalog_service()); - add_service!(builder, self.server.handler().object_store_service()); + add_service!(builder, self.server.schema_service()); + add_service!(builder, self.server.catalog_service()); + add_service!(builder, self.server.object_store_service()); serve_builder!(builder); @@ -260,13 +257,13 @@ pub async fn create_querier_server_type( ) .await?, ); - let querier_handler = Arc::new(QuerierHandlerImpl::new( + + let querier = QuerierServer::new( args.catalog, Arc::clone(&database), - Arc::clone(&args.object_store), - )); - - let querier = QuerierServer::new(args.metric_registry, querier_handler); + args.metric_registry, + args.object_store, + ); Ok(Arc::new(QuerierServerType::new( querier, database, diff --git a/querier/Cargo.toml b/querier/Cargo.toml index 33a35ca95f..7e4bcc081c 100644 --- a/querier/Cargo.toml +++ b/querier/Cargo.toml @@ -37,7 +37,6 @@ service_grpc_schema = { path = "../service_grpc_schema" } service_grpc_object_store = { path = "../service_grpc_object_store" } schema = { path = "../schema" } snafu = "0.7" -thiserror = "1.0" tokio = { version = "1.28", features = ["macros", "parking_lot", "rt-multi-thread", "sync", "time"] } tokio-util = { version = "0.7.8" } tonic = { workspace = true } diff --git a/querier/src/handler.rs b/querier/src/handler.rs deleted file mode 100644 index f83f1c4043..0000000000 --- a/querier/src/handler.rs +++ /dev/null @@ -1,180 +0,0 @@ -//! Querier handler - -use async_trait::async_trait; -use influxdb_iox_client::{ - catalog::generated_types::catalog_service_server::CatalogServiceServer, - schema::generated_types::schema_service_server::SchemaServiceServer, - store::generated_types::object_store_service_server::ObjectStoreServiceServer, -}; -use iox_catalog::interface::Catalog; -use object_store::ObjectStore; -use observability_deps::tracing::warn; -use service_grpc_catalog::CatalogService; -use service_grpc_object_store::ObjectStoreService; -use service_grpc_schema::SchemaService; -use std::sync::Arc; -use thiserror::Error; -use tokio_util::sync::CancellationToken; - -use crate::database::QuerierDatabase; - -#[derive(Debug, Error)] -#[allow(missing_copy_implementations, missing_docs)] -pub enum Error {} - -/// The [`QuerierHandler`] does nothing at this point -#[async_trait] -pub trait QuerierHandler: Send + Sync { - /// Acquire a [`SchemaServiceServer`] gRPC service implementation. - fn schema_service(&self) -> SchemaServiceServer<SchemaService>; - - /// Acquire a [`CatalogServiceServer`] gRPC service implementation. - fn catalog_service(&self) -> CatalogServiceServer<CatalogService>; - - /// Acquire an [`ObjectStoreServiceServer`] gRPC service implementation. - fn object_store_service(&self) -> ObjectStoreServiceServer<ObjectStoreService>; - - /// Wait until the handler finished to shutdown. - /// - /// Use [`shutdown`](Self::shutdown) to trigger a shutdown. - async fn join(&self); - - /// Shut down background workers. - fn shutdown(&self); -} - -/// Implementation of the `QuerierHandler` trait (that currently does nothing) -#[derive(Debug)] -pub struct QuerierHandlerImpl { - /// Catalog (for other services) - catalog: Arc<dyn Catalog>, - - /// Database that handles query operation - database: Arc<QuerierDatabase>, - - /// The object store - object_store: Arc<dyn ObjectStore>, - - /// Remembers if `shutdown` was called but also blocks the `join` call. - shutdown: CancellationToken, -} - -impl QuerierHandlerImpl { - /// Initialize the Querier - pub fn new( - catalog: Arc<dyn Catalog>, - database: Arc<QuerierDatabase>, - object_store: Arc<dyn ObjectStore>, - ) -> Self { - Self { - catalog, - database, - object_store, - shutdown: CancellationToken::new(), - } - } -} - -#[async_trait] -impl QuerierHandler for QuerierHandlerImpl { - fn schema_service(&self) -> SchemaServiceServer<SchemaService> { - SchemaServiceServer::new(SchemaService::new(Arc::clone(&self.catalog))) - } - - fn catalog_service(&self) -> CatalogServiceServer<CatalogService> { - CatalogServiceServer::new(CatalogService::new(Arc::clone(&self.catalog))) - } - - fn object_store_service(&self) -> ObjectStoreServiceServer<ObjectStoreService> { - ObjectStoreServiceServer::new(ObjectStoreService::new( - Arc::clone(&self.catalog), - Arc::clone(&self.object_store), - )) - } - - async fn join(&self) { - self.shutdown.cancelled().await; - self.database.exec().join().await; - } - - fn shutdown(&self) { - self.shutdown.cancel(); - self.database.exec().shutdown(); - } -} - -impl Drop for QuerierHandlerImpl { - fn drop(&mut self) { - if !self.shutdown.is_cancelled() { - warn!("QuerierHandlerImpl dropped without calling shutdown()"); - self.shutdown(); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{cache::CatalogCache, create_ingester_connection_for_testing}; - use iox_catalog::mem::MemCatalog; - use iox_query::exec::Executor; - use iox_time::{MockProvider, Time}; - use object_store::memory::InMemory; - use std::{collections::HashMap, time::Duration}; - use tokio::runtime::Handle; - - #[tokio::test] - async fn test_shutdown() { - let querier = TestQuerier::new().await.querier; - - // does not exit w/o shutdown - tokio::select! { - _ = querier.join() => panic!("querier finished w/o shutdown"), - _ = tokio::time::sleep(Duration::from_millis(10)) => {}, - }; - - querier.shutdown(); - - tokio::time::timeout(Duration::from_millis(1000), querier.join()) - .await - .unwrap(); - } - - struct TestQuerier { - querier: QuerierHandlerImpl, - } - - impl TestQuerier { - async fn new() -> Self { - let metric_registry = Arc::new(metric::Registry::new()); - let catalog = Arc::new(MemCatalog::new(Arc::clone(&metric_registry))) as _; - let object_store = Arc::new(InMemory::new()) as _; - - let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))); - let exec = Arc::new(Executor::new_testing()); - let catalog_cache = Arc::new(CatalogCache::new_testing( - Arc::clone(&catalog), - time_provider, - Arc::clone(&metric_registry), - Arc::clone(&object_store), - &Handle::current(), - )); - - let database = Arc::new( - QuerierDatabase::new( - catalog_cache, - metric_registry, - exec, - Some(create_ingester_connection_for_testing()), - QuerierDatabase::MAX_CONCURRENT_QUERIES_MAX, - Arc::new(HashMap::default()), - ) - .await - .unwrap(), - ); - let querier = QuerierHandlerImpl::new(catalog, database, object_store); - - Self { querier } - } - } -} diff --git a/querier/src/lib.rs b/querier/src/lib.rs index 32895ac1ed..24cfd84f01 100644 --- a/querier/src/lib.rs +++ b/querier/src/lib.rs @@ -19,7 +19,6 @@ use workspace_hack as _; mod cache; mod database; mod df_stats; -mod handler; mod ingester; mod namespace; mod parquet; @@ -30,7 +29,6 @@ mod table; pub use cache::CatalogCache as QuerierCatalogCache; pub use database::{Error as QuerierDatabaseError, QuerierDatabase}; -pub use handler::{QuerierHandler, QuerierHandlerImpl}; pub use ingester::{ create_ingester_connection_for_testing, create_ingester_connections, flight_client::{ diff --git a/querier/src/server.rs b/querier/src/server.rs index 2ef5a255ad..be1392122b 100644 --- a/querier/src/server.rs +++ b/querier/src/server.rs @@ -2,23 +2,57 @@ use std::sync::Arc; -use crate::handler::QuerierHandler; -use std::fmt::Debug; +use influxdb_iox_client::{ + catalog::generated_types::catalog_service_server::CatalogServiceServer, + schema::generated_types::schema_service_server::SchemaServiceServer, + store::generated_types::object_store_service_server::ObjectStoreServiceServer, +}; +use iox_catalog::interface::Catalog; +use object_store::ObjectStore; +use observability_deps::tracing::warn; +use service_grpc_catalog::CatalogService; +use service_grpc_object_store::ObjectStoreService; +use service_grpc_schema::SchemaService; +use tokio_util::sync::CancellationToken; + +use crate::QuerierDatabase; /// The [`QuerierServer`] manages the lifecycle and contains all state for a /// `querier` server instance. -#[derive(Debug, Default)] -pub struct QuerierServer<C: QuerierHandler> { +#[derive(Debug)] +pub struct QuerierServer { + /// Metrics (for other services) metrics: Arc<metric::Registry>, - handler: Arc<C>, + /// Catalog (for other services) + catalog: Arc<dyn Catalog>, + + /// Database that handles query operation + database: Arc<QuerierDatabase>, + + /// The object store + object_store: Arc<dyn ObjectStore>, + + /// Remembers if `shutdown` was called but also blocks the `join` call. + shutdown: CancellationToken, } -impl<C: QuerierHandler> QuerierServer<C> { +impl QuerierServer { /// Initialise a new [`QuerierServer`] using the provided gRPC /// handlers. - pub fn new(metrics: Arc<metric::Registry>, handler: Arc<C>) -> Self { - Self { metrics, handler } + pub fn new( + catalog: Arc<dyn Catalog>, + database: Arc<QuerierDatabase>, + metrics: Arc<metric::Registry>, + object_store: Arc<dyn ObjectStore>, + ) -> Self { + Self { + catalog, + database, + metrics, + object_store, + shutdown: CancellationToken::new(), + } } /// Return the [`metric::Registry`] used by the router. @@ -26,18 +60,111 @@ impl<C: QuerierHandler> QuerierServer<C> { Arc::clone(&self.metrics) } - /// return the handler - pub fn handler(&self) -> &Arc<C> { - &self.handler + /// Acquire a [`SchemaServiceServer`] gRPC service implementation. + pub fn schema_service(&self) -> SchemaServiceServer<SchemaService> { + SchemaServiceServer::new(SchemaService::new(Arc::clone(&self.catalog))) } - /// Join shutdown worker. + /// Acquire a [`CatalogServiceServer`] gRPC service implementation. + pub fn catalog_service(&self) -> CatalogServiceServer<CatalogService> { + CatalogServiceServer::new(CatalogService::new(Arc::clone(&self.catalog))) + } + + /// Acquire an [`ObjectStoreServiceServer`] gRPC service implementation. + pub fn object_store_service(&self) -> ObjectStoreServiceServer<ObjectStoreService> { + ObjectStoreServiceServer::new(ObjectStoreService::new( + Arc::clone(&self.catalog), + Arc::clone(&self.object_store), + )) + } + + /// Wait until the handler finished to shutdown. + /// + /// Use [`shutdown`](Self::shutdown) to trigger a shutdown. pub async fn join(&self) { - self.handler.join().await; + self.shutdown.cancelled().await; + self.database.exec().join().await; } - /// Shutdown background worker. + /// Shut down background workers. pub fn shutdown(&self) { - self.handler.shutdown(); + self.shutdown.cancel(); + self.database.exec().shutdown(); + } +} + +impl Drop for QuerierServer { + fn drop(&mut self) { + if !self.shutdown.is_cancelled() { + warn!("QuerierServer dropped without calling shutdown()"); + self.shutdown(); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{cache::CatalogCache, create_ingester_connection_for_testing}; + use iox_catalog::mem::MemCatalog; + use iox_query::exec::Executor; + use iox_time::{MockProvider, Time}; + use object_store::memory::InMemory; + use std::{collections::HashMap, time::Duration}; + use tokio::runtime::Handle; + + #[tokio::test] + async fn test_shutdown() { + let querier = TestQuerier::new().await.querier; + + // does not exit w/o shutdown + tokio::select! { + _ = querier.join() => panic!("querier finished w/o shutdown"), + _ = tokio::time::sleep(Duration::from_millis(10)) => {}, + }; + + querier.shutdown(); + + tokio::time::timeout(Duration::from_millis(1000), querier.join()) + .await + .unwrap(); + } + + struct TestQuerier { + querier: QuerierServer, + } + + impl TestQuerier { + async fn new() -> Self { + let metric_registry = Arc::new(metric::Registry::new()); + let catalog = Arc::new(MemCatalog::new(Arc::clone(&metric_registry))) as _; + let object_store = Arc::new(InMemory::new()) as _; + + let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))); + let exec = Arc::new(Executor::new_testing()); + let catalog_cache = Arc::new(CatalogCache::new_testing( + Arc::clone(&catalog), + time_provider, + Arc::clone(&metric_registry), + Arc::clone(&object_store), + &Handle::current(), + )); + + let database = Arc::new( + QuerierDatabase::new( + catalog_cache, + Arc::clone(&metric_registry), + exec, + Some(create_ingester_connection_for_testing()), + QuerierDatabase::MAX_CONCURRENT_QUERIES_MAX, + Arc::new(HashMap::default()), + ) + .await + .unwrap(), + ); + let querier = QuerierServer::new(catalog, database, metric_registry, object_store); + + Self { querier } + } } }
26daa8664831fc79e3a35fe5cad50619a72b032c
Manuel de la Peña
2022-11-23 19:18:10
bump testcontainers to latest released version (#23858)
* chore: bump testcontainers to v0.15.0 * chore: run go mod tidy * chore: update test to latest version of testcontainers * chore: update package * fix: use collectors.NewGoCollector instead SA1019 detected by staticcheck
null
chore: bump testcontainers to latest released version (#23858) * chore: bump testcontainers to v0.15.0 * chore: run go mod tidy * chore: update test to latest version of testcontainers * chore: update package * fix: use collectors.NewGoCollector instead SA1019 detected by staticcheck
diff --git a/cmd/influxd/launcher/launcher.go b/cmd/influxd/launcher/launcher.go index 0f965c1885..573a8d2a37 100644 --- a/cmd/influxd/launcher/launcher.go +++ b/cmd/influxd/launcher/launcher.go @@ -77,6 +77,7 @@ import ( telegrafservice "github.com/influxdata/influxdb/v2/telegraf/service" "github.com/influxdata/influxdb/v2/telemetry" "github.com/influxdata/influxdb/v2/tenant" + "github.com/prometheus/client_golang/prometheus/collectors" // needed for tsm1 _ "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" @@ -90,7 +91,6 @@ import ( "github.com/influxdata/influxdb/v2/vault" pzap "github.com/influxdata/influxdb/v2/zap" "github.com/opentracing/opentracing-go" - "github.com/prometheus/client_golang/prometheus" jaegerconfig "github.com/uber/jaeger-client-go/config" "go.uber.org/zap" ) @@ -249,7 +249,7 @@ func (m *Launcher) run(ctx context.Context, opts *InfluxdOpts) (err error) { } m.reg = prom.NewRegistry(m.log.With(zap.String("service", "prom_registry"))) - m.reg.MustRegister(prometheus.NewGoCollector()) + m.reg.MustRegister(collectors.NewGoCollector()) // Open KV and SQL stores. procID, err := m.openMetaStores(ctx, opts) diff --git a/go.mod b/go.mod index 930407bfa9..0bda983127 100644 --- a/go.mod +++ b/go.mod @@ -22,10 +22,10 @@ require ( github.com/go-stack/stack v1.8.0 github.com/golang-jwt/jwt v3.2.1+incompatible github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021 - github.com/golang/mock v1.5.0 + github.com/golang/mock v1.6.0 github.com/golang/snappy v0.0.4 github.com/google/btree v1.0.1 - github.com/google/go-cmp v0.5.7 + github.com/google/go-cmp v0.5.9 github.com/google/go-jsonnet v0.17.0 github.com/hashicorp/vault/api v1.0.2 github.com/influxdata/cron v0.0.0-20201006132531-4bb0a200dcbe @@ -41,21 +41,21 @@ require ( github.com/kevinburke/go-bindata v3.22.0+incompatible github.com/mattn/go-isatty v0.0.14 github.com/mattn/go-sqlite3 v1.14.7 - github.com/matttproud/golang_protobuf_extensions v1.0.1 + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5 github.com/mna/pigeon v1.0.1-0.20180808201053-bb0192cfc2ae github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.5.1 + github.com/prometheus/client_golang v1.11.1 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.9.1 + github.com/prometheus/common v0.30.0 github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 github.com/spf13/cast v1.3.0 github.com/spf13/cobra v1.0.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.6.1 github.com/stretchr/testify v1.8.0 - github.com/testcontainers/testcontainers-go v0.0.0-20190108154635-47c0da630f72 + github.com/testcontainers/testcontainers-go v0.15.0 github.com/tinylib/msgp v1.1.0 github.com/uber/jaeger-client-go v2.28.0+incompatible github.com/xlab/treeprint v1.0.0 @@ -64,13 +64,13 @@ require ( go.uber.org/multierr v1.6.0 go.uber.org/zap v1.16.0 golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 - golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 - golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a + golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f + golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 golang.org/x/text v0.3.7 - golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba - golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac + golang.org/x/tools v0.1.11 google.golang.org/protobuf v1.28.1 - gopkg.in/yaml.v2 v2.3.0 + gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 honnef.co/go/tools v0.3.0 ) @@ -82,6 +82,7 @@ require ( github.com/AlecAivazis/survey/v2 v2.3.4 // indirect github.com/Azure/azure-pipeline-go v0.2.3 // indirect github.com/Azure/azure-storage-blob-go v0.14.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.9 // indirect github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect @@ -93,7 +94,8 @@ require ( github.com/DATA-DOG/go-sqlmock v1.4.1 // indirect github.com/Masterminds/semver v1.4.2 // indirect github.com/Masterminds/sprig v2.16.0+incompatible // indirect - github.com/Microsoft/go-winio v0.4.11 // indirect + github.com/Microsoft/go-winio v0.5.2 // indirect + github.com/Microsoft/hcsshim v0.9.4 // indirect github.com/SAP/go-hdb v0.14.1 // indirect github.com/aokoli/goutils v1.0.1 // indirect github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 // indirect @@ -112,43 +114,46 @@ require ( github.com/benbjohnson/immutable v0.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0 // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/containerd/cgroups v1.0.4 // indirect + github.com/containerd/containerd v1.6.8 // indirect github.com/deepmap/oapi-codegen v1.6.0 // indirect github.com/denisenkom/go-mssqldb v0.10.0 // indirect github.com/dimchansky/utfbom v1.1.0 // indirect - github.com/docker/distribution v2.7.0+incompatible // indirect - github.com/docker/docker v1.13.1 // indirect + github.com/docker/distribution v2.8.1+incompatible // indirect + github.com/docker/docker v20.10.17+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect - github.com/docker/go-units v0.3.3 // indirect + github.com/docker/go-units v0.5.0 // indirect github.com/eclipse/paho.mqtt.golang v1.2.0 // indirect github.com/editorconfig/editorconfig-core-go/v2 v2.1.1 // indirect github.com/fatih/color v1.13.0 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect - github.com/fsnotify/fsnotify v1.4.7 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/gabriel-vasile/mimetype v1.4.0 // indirect github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 // indirect github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 // indirect - github.com/go-sql-driver/mysql v1.5.0 // indirect + github.com/go-sql-driver/mysql v1.6.0 // indirect github.com/goccy/go-json v0.9.6 // indirect github.com/gofrs/uuid v3.3.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect github.com/golang/geo v0.0.0-20190916061304-5b978397cfec // indirect - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/flatbuffers v22.9.30-0.20221019131441-5792623df42e+incompatible // indirect github.com/google/uuid v1.3.0 // indirect github.com/googleapis/gax-go/v2 v2.0.5 // indirect - github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.1 // indirect - github.com/hashicorp/go-multierror v1.0.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-retryablehttp v0.6.4 // indirect github.com/hashicorp/go-rootcerts v1.0.0 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/vault/sdk v0.1.8 // indirect github.com/huandu/xstrings v1.0.0 // indirect - github.com/imdario/mergo v0.3.9 // indirect + github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/influxdata/gosnowflake v1.6.9 // indirect github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040 // indirect @@ -162,29 +167,32 @@ require ( github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lib/pq v1.2.0 // indirect - github.com/magiconair/properties v1.8.1 // indirect + github.com/magiconair/properties v1.8.6 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/moby/sys/mount v0.3.3 // indirect + github.com/moby/sys/mountinfo v0.6.2 // indirect + github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect + github.com/morikuni/aec v1.0.0 // indirect github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae // indirect - github.com/onsi/ginkgo v1.11.0 // indirect - github.com/onsi/gomega v1.8.1 // indirect - github.com/opencontainers/go-digest v1.0.0-rc1 // indirect - github.com/pelletier/go-toml v1.2.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect + github.com/opencontainers/runc v1.1.3 // indirect + github.com/pelletier/go-toml v1.9.3 // indirect github.com/philhofer/fwd v1.0.0 // indirect github.com/pierrec/lz4 v2.0.5+incompatible // indirect github.com/pierrec/lz4/v4 v4.1.12 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/procfs v0.0.11 // indirect + github.com/prometheus/procfs v0.7.3 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect - github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect github.com/segmentio/kafka-go v0.2.0 // indirect github.com/sergi/go-diff v1.1.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/afero v1.2.2 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/stretchr/objx v0.4.0 // indirect github.com/subosito/gotenv v1.2.0 // indirect @@ -192,7 +200,7 @@ require ( github.com/uber/athenadriver v1.1.4 // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/vertica/vertica-sql-go v1.1.1 // indirect - github.com/willf/bitset v1.1.9 // indirect + github.com/willf/bitset v1.1.11 // indirect github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect github.com/yudai/pp v2.0.1+incompatible // indirect go.opencensus.io v0.23.0 // indirect @@ -200,18 +208,18 @@ require ( golang.org/x/exp v0.0.0-20211216164055-b2b84827b756 // indirect golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect - golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/net v0.0.0-20220401154927-543a649e0bdd // indirect - golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect + golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 // indirect + golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect gonum.org/v1/gonum v0.11.0 // indirect google.golang.org/api v0.47.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350 // indirect - google.golang.org/grpc v1.44.0 // indirect + google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad // indirect + google.golang.org/grpc v1.47.0 // indirect gopkg.in/ini.v1 v1.51.0 // indirect - gopkg.in/square/go-jose.v2 v2.3.1 // indirect + gopkg.in/square/go-jose.v2 v2.5.1 // indirect ) replace github.com/nats-io/nats-streaming-server v0.11.2 => github.com/influxdata/nats-streaming-server v0.11.3-0.20201112040610-c277f7560803 diff --git a/go.sum b/go.sum index 2464a37958..bdb98e995d 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,4 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -48,13 +49,19 @@ github.com/AlecAivazis/survey/v2 v2.3.4 h1:pchTU9rsLUSvWEl2Aq9Pv3k0IE2fkqtGxazsk github.com/AlecAivazis/survey/v2 v2.3.4/go.mod h1:hrV6Y/kQCLhIZXGcriDCUBtB3wnN7156gMXJ3+b23xM= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.9 h1:P0ZF0dEYoUPUVDQo3mA1CvH5b8mKev7DDcmTwauuNME= github.com/Azure/go-autorest/autorest v0.11.9/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= @@ -64,6 +71,7 @@ github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyo github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= @@ -86,19 +94,44 @@ github.com/Masterminds/sprig v2.16.0+incompatible h1:QZbMUPxRQ50EKAq3LFMnxddMu88 github.com/Masterminds/sprig v2.16.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/squirrel v1.5.0 h1:JukIZisrUXadA9pl3rMkjhiamxiB0cXiu+HGp/Y8cY8= github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= -github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.9.4 h1:mnUj0ivWy6UzbB1uLFqKR6F+ZyiDc7j4iGgHTpO+5+I= +github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.0.1 h1:iLrQrdwjDd52kHDA5op2UBJFjmOb9g+7scBan4RN8F0= github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/RoaringBitmap/roaring v0.4.16 h1:NholfewybRLOwACgfqfzn/N5xa6keKNs4fP00t0cwLo= github.com/RoaringBitmap/roaring v0.4.16/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w= github.com/SAP/go-hdb v0.14.1 h1:hkw4ozGZ/i4eak7ZuGkY5e0hxiXFdNUBNhr4AvZVNFE= github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -108,6 +141,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= @@ -126,7 +161,9 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= github.com/aws/aws-sdk-go v1.30.12 h1:KrjyosZvkpJjcwMk0RNxMZewQ47v7+ZkbQDXjWsJMs8= @@ -170,28 +207,52 @@ github.com/benbjohnson/immutable v0.3.0 h1:TVRhuZx2wG9SZ0LRdqlbs9S5BZ6Y24hJEHTCg github.com/benbjohnson/immutable v0.3.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/tmpl v1.0.0 h1:T5QPGJD0W6JJxyEEAlVnX3co/IkUrfHen1/42nlgAHo= github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0 h1:MaVh0h9+KaMnJcoDvvIGp+O3fefdWm+8MBUX6ELTJTM= github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/c-bata/go-prompt v0.2.2 h1:uyKRz6Z6DUyj49QVijyM339UJV9yhbr70gESwbNU3e0= github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -201,23 +262,129 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/containerd v1.6.8 h1:h4dOFDwzHmqFEP754PgfgTeVXFnLiRc6kiqC7tplDJs= +github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -225,21 +392,37 @@ github.com/deepmap/oapi-codegen v1.6.0 h1:w/d1ntwh91XI0b/8ja7+u5SvA4IFfM0UNNLmiD github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waNNZfHBM8= github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8 h1:akOQj8IVgoeFfBTzGOEQakCYshWD6RNo1M5pivFXt70= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/docker/distribution v2.7.0+incompatible h1:neUDAlf3wX6Ml4HdqTrbcOHXtfRN0TFIwt6YFL7N9RU= -github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.7.3-0.20180815000130-e05b657120a6/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo= -github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= +github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= @@ -256,6 +439,9 @@ github.com/editorconfig/editorconfig-core-go/v2 v2.1.1/go.mod h1:/LuhWJiQ9Gvo1Dh github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elazarl/go-bindata-assetfs v1.0.1 h1:m0kkaHRKEu7tUIUFVwhGGGYClXvyl4RE03qmvRTNfbw= github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -265,7 +451,9 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= @@ -281,13 +469,19 @@ github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVB github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= github.com/frankban/quicktest v1.11.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/gabriel-vasile/mimetype v1.4.0 h1:Cn9dkdYsMIu56tGho+fqzh7XmvY2YyGU0FnbhiOsEro= github.com/gabriel-vasile/mimetype v1.4.0/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= @@ -303,32 +497,58 @@ github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmn github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/goccy/go-json v0.7.10/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.9.6 h1:5/4CtRQdtsX0sal8fdVhTaiMN01Ri8BExZZ8iRmHQ6E= github.com/goccy/go-json v0.9.6/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -346,8 +566,9 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -355,8 +576,9 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -400,11 +622,13 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-jsonnet v0.17.0 h1:/9NIEfhK1NQRKl3sP2536b2+x5HnZMdql7x3yK/l8JY= github.com/google/go-jsonnet v0.17.0/go.mod h1:sOcuej3UW1vpPTZOr8L7RQimqai1a57bt5j22LzGZCw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -424,6 +648,7 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= @@ -431,15 +656,20 @@ github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= @@ -448,8 +678,10 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -459,8 +691,10 @@ github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxC github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.4 h1:BbgctKO892xEyOXnGiaAwIoSq1QZ/SS4AhjoAh9DnfY= @@ -492,7 +726,6 @@ github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKe github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0 h1:pO2K/gKgKaat5LdpAhxhluX2GPQMaI3W5FUz/I/UnWk= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= @@ -500,8 +733,12 @@ github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmK github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/cron v0.0.0-20201006132531-4bb0a200dcbe h1:7j4SdN/BvQwN6WoUq7mv0kg5U9NhnFBxPGMafYRKym0= @@ -534,6 +771,9 @@ github.com/influxdata/pkg-config v0.2.11 h1:RDlWAvkTARzPRGChq34x179TYlRndq8OU5Ro github.com/influxdata/pkg-config v0.2.11/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b h1:i44CesU68ZBRvtCjBi3QSosCIKrjmMbYlQMFAwVLds4= github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -542,11 +782,14 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.3.4 h1:wv+0IJZfL5z0uZoUjlpKgHkgaFSYD+r9CfrXjEXsO7w= github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= @@ -555,6 +798,7 @@ github.com/jsternberg/zap-logfmt v1.2.0/go.mod h1:kz+1CUmCutPWABnNkOu9hOHKdT2q3T github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef h1:2jNeR4YUziVtswNP9sEFAI913cVrzH85T+8Q6LpYbT0= @@ -568,17 +812,23 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.1/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw= github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -592,12 +842,17 @@ github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -619,20 +874,26 @@ github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9 github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.7 h1:fxWBnXkxfM6sRiuH3bqJ4CfzZojMOLVc0UTsTglEghA= github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-tty v0.0.4 h1:NVikla9X8MN0SQAqCYzpGyXv0jY7MNl3HOWD2dkle7E= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.22 h1:Jm64b3bO9kP43ddLjL2EY3Io6bmy1qGb9Xxz6TqS6rc= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5 h1:pXqZHmHOz6LN+zbbUgqyGgAWRnnZEI40IzG3tMsXcSI= github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -646,16 +907,36 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mna/pigeon v1.0.1-0.20180808201053-bb0192cfc2ae h1:mQO+oxi0kpii/TX+ltfTCFuYkOjEn53JhaOObiMuvnk= github.com/mna/pigeon v1.0.1-0.20180808201053-bb0192cfc2ae/go.mod h1:Iym28+kJVnC1hfQvv5MUtI6AiFFzvQjHcvI4RFTG/04= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs= +github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= @@ -663,22 +944,64 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34= -github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -693,9 +1016,12 @@ github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIw github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= @@ -710,6 +1036,7 @@ github.com/pierrec/lz4/v4 v4.1.12/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFu github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -718,13 +1045,19 @@ github.com/pkg/term v1.2.0-beta.2 h1:L3y/h2jkuBVFdWiJvNfYfKmzcCnILw7mJWm2JQuMppw github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= -github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -732,20 +1065,31 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= -github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0= @@ -762,18 +1106,25 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= -github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -787,15 +1138,19 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -803,13 +1158,17 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.6.1 h1:VPZzIkznI1YhVMRi6vNFLHSwhnhReBfgTxIPccpfdZk= github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -822,8 +1181,12 @@ github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PK github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/testcontainers/testcontainers-go v0.0.0-20190108154635-47c0da630f72 h1:3dsrMloqeog2f5ZoQCWJbTPR/tKIDFePkB0zg3GLjY8= -github.com/testcontainers/testcontainers-go v0.0.0-20190108154635-47c0da630f72/go.mod h1:wt/nMz68+kIO4RoguOZzsdv1B3kTYw+SuIKyJYRQpgE= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/testcontainers/testcontainers-go v0.15.0 h1:3Ex7PUGFv0b2bBsdOv6R42+SK2qoZnWBd21LvZYhUtQ= +github.com/testcontainers/testcontainers-go v0.15.0/go.mod h1:PkohMRH2X8Hib0IWtifVexDfLPVT+tb5E9hsf7cW12w= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -837,15 +1200,27 @@ github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/vertica/vertica-sql-go v1.1.1 h1:sZYijzBbvdAbJcl4cYlKjR+Eh/X1hGKzukWuhh8PjvI= github.com/vertica/vertica-sql-go v1.1.1/go.mod h1:fGr44VWdEvL+f+Qt5LkKLOT7GoxaWdoUCnPBU9h6t04= -github.com/willf/bitset v1.1.9 h1:GBtFynGY9ZWZmEC9sWuu41/7VBXPFCOAbCbqTflOg9c= -github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.0.0 h1:J0TkWtiuYgtdlrkkrDLISYBQ92M+X5m4LrIIMKrbDTs= github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= @@ -862,12 +1237,18 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/zeebo/xxh3 v1.0.1/go.mod h1:8VHV24/3AZLn3b6Mlp/KuC33LWH687Wq6EnziEB+rsA= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -904,19 +1285,24 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 h1:tkVvjkPTB7pnW3jnid7kNyAMPVWllTNOf/qKDze4p9o= @@ -977,11 +1363,13 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -996,10 +1384,13 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1015,23 +1406,27 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211118161319-6a13c67c3ce4/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220401154927-543a649e0bdd h1:zYlwaUHTmxuf6H7hwO2dgwqozQmH7zf4x+/qql4oVWc= -golang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 h1:Yqz/iviulwKwAREEeUd3nbBFn0XuyJqkoft2IlrvOhc= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1044,8 +1439,9 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1057,8 +1453,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 h1:w8s32wxx3sY+OjLlv9qltkLU5yvJzxjjgiHWLjdIcw4= -golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1067,7 +1463,6 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1077,23 +1472,41 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1102,14 +1515,25 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1118,22 +1542,30 @@ golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210601080250-7ecdf8ef093b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= @@ -1155,13 +1587,17 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1174,8 +1610,11 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1204,14 +1643,17 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1222,8 +1664,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a h1:ofrrl6c6NG5/IOSx/R1cyiQxxjqlur0h/TvbUhkH0II= -golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY= +golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1240,6 +1682,7 @@ gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6d gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -1274,12 +1717,14 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= @@ -1289,6 +1734,7 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1302,12 +1748,14 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1321,8 +1769,9 @@ google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQ google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210601144548-a796c710e9b6/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350 h1:YxHp5zqIcAShDEvRr5/0rVESVS+njYF68PSdazrNLJo= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad h1:kqrS+lhvaMHCxul6sKQvKJ8nAAhlVItmZV822hYFH/U= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1334,6 +1783,7 @@ google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -1355,8 +1805,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1370,11 +1820,14 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1382,16 +1835,20 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.46.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= @@ -1402,13 +1859,18 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1420,10 +1882,46 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= honnef.co/go/tools v0.3.0 h1:2LdYUZ7CIxnYgskbUZfY7FPggmqnh6shBqfWa8Tn3XU= honnef.co/go/tools v0.3.0/go.mod h1:vlRD9XErLMGT+mDuofSr0mMMquscM/1nQqtRSsh6m70= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/vault/secret_test.go b/vault/secret_test.go index 3cda473307..d698671cac 100644 --- a/vault/secret_test.go +++ b/vault/secret_test.go @@ -10,24 +10,34 @@ import ( "github.com/influxdata/influxdb/v2" influxdbtesting "github.com/influxdata/influxdb/v2/testing" "github.com/influxdata/influxdb/v2/vault" - testcontainer "github.com/testcontainers/testcontainers-go" + testcontainers "github.com/testcontainers/testcontainers-go" ) func initSecretService(f influxdbtesting.SecretServiceFields, t *testing.T) (influxdb.SecretService, func()) { token := "test" ctx := context.Background() - vaultC, err := testcontainer.RunContainer(ctx, "vault", testcontainer.RequestContainer{ - ExportedPort: []string{ - "8200/tcp", + vaultC, err := GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: testcontainers.ContainerRequest{ + Image: "docker.io/vault:latest", + ExposedPorts: []string{ + "8200/tcp", + }, + Cmd: fmt.Sprintf(`vault server -dev -dev-listen-address 0.0.0.0:8200 -dev-root-token-id=%s`, token), }, - Cmd: fmt.Sprintf(`vault server -dev -dev-listen-address 0.0.0.0:8200 -dev-root-token-id=%s`, token), + Started: true, }) if err != nil { - t.Fatalf("failed to initialize vault testcontiner: %v", err) + t.Fatalf("failed to initialize vault container: %v", err) } - ip, port, err := vaultC.GetHostEndpoint(ctx, "8200/tcp") + + host, err := vaultC.Host(ctx) if err != nil { - t.Fatal(err) + t.Fatalf("failed to get host from vault container: %v", err) + } + + port, err := vaultC.MappedPort(ctx, "8200/tcp") + if err != nil { + t.Fatalf("failed to get exposed 8200 port from vault container: %v", err) } s, err := vault.NewSecretService() @@ -35,7 +45,7 @@ func initSecretService(f influxdbtesting.SecretServiceFields, t *testing.T) (inf t.Fatal(err) } s.Client.SetToken(token) - s.Client.SetAddress(fmt.Sprintf("http://%v:%v", ip, port)) + s.Client.SetAddress(fmt.Sprintf("http://%v:%v", host, port.Int())) for _, sec := range f.Secrets { for k, v := range sec.Env {
e46d3b8881aaaa050df1e22b412ce0227e1b6c83
praveen-influx
2025-03-04 11:44:58
rust ci job fix to address rustup 1.28.0 changes (#26099)
- activate toolchain explicitly - add clippy and rustfmt components
null
chore: rust ci job fix to address rustup 1.28.0 changes (#26099) - activate toolchain explicitly - add clippy and rustfmt components
diff --git a/.circleci/config.yml b/.circleci/config.yml index 38088a0086..dbb5ecb734 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -78,6 +78,13 @@ commands: rust_components: description: Verify installed components steps: + - run: + name: Activate toolchain + command: | + rustup toolchain install + rustup component add clippy + rustup component add rustfmt + - run: name: Verify installed components command: |
eeb1aa7905cf203ad614278ff05a4dc5b020228b
Michael Gattozzi
2024-10-03 14:47:46
swap over to DbId and TableId everywhere (#25421)
* feat: Add TableId and ColumnId * feat: swap over to DbId and TableId everywhere This commit swaps us over to using the DbId and TableId types everywhere for our internal systems. Anywhere that's external facing, such as names for last cache tables or line protocol parsing, use names. In these cases we have the `Catalog` which keeps a map of TableIds and DbIds in a bidirectional mapping for easy lookup i.e. id <-> names. While in essence the change itself isn't that complicated given the nature of how much we depended on names for things, the changes end up being quite invasive and extensive. Luckily it shouldn't be too hard to review. Note this does not add the column ids which will be done in a follow up PR. Closes #25375 Closes #25403 Closes #25404 Closes #25405 Closes #25412 Closes #25413
null
feat: swap over to DbId and TableId everywhere (#25421) * feat: Add TableId and ColumnId * feat: swap over to DbId and TableId everywhere This commit swaps us over to using the DbId and TableId types everywhere for our internal systems. Anywhere that's external facing, such as names for last cache tables or line protocol parsing, use names. In these cases we have the `Catalog` which keeps a map of TableIds and DbIds in a bidirectional mapping for easy lookup i.e. id <-> names. While in essence the change itself isn't that complicated given the nature of how much we depended on names for things, the changes end up being quite invasive and extensive. Luckily it shouldn't be too hard to review. Note this does not add the column ids which will be done in a follow up PR. Closes #25375 Closes #25403 Closes #25404 Closes #25405 Closes #25412 Closes #25413
diff --git a/Cargo.lock b/Cargo.lock index 262ca0466c..ad170b8100 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -661,6 +661,12 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "bimap" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" + [[package]] name = "bit-set" version = "0.5.3" @@ -2621,6 +2627,7 @@ name = "influxdb3_catalog" version = "0.1.0" dependencies = [ "arrow", + "bimap", "influxdb-line-protocol", "influxdb3_id", "influxdb3_wal", @@ -2824,6 +2831,7 @@ dependencies = [ "schema", "serde", "serde_json", + "serde_with", "thiserror", "tokio", ] @@ -2836,6 +2844,7 @@ dependencies = [ "arrow", "arrow_util", "async-trait", + "bimap", "byteorder", "bytes", "chrono", diff --git a/Cargo.toml b/Cargo.toml index d40188b3d3..0f0266d5f6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,6 +48,7 @@ assert_cmd = "2.0.14" async-trait = "0.1" backtrace = "0.3" base64 = "0.22.0" +bimap = "0.6.3" byteorder = "1.3.4" bytes = "1.5" chrono = "0.4" diff --git a/influxdb3/src/commands/serve.rs b/influxdb3/src/commands/serve.rs index 941972cb07..a4d85930db 100644 --- a/influxdb3/src/commands/serve.rs +++ b/influxdb3/src/commands/serve.rs @@ -425,7 +425,7 @@ pub async fn command(config: Config) -> Result<()> { .map_err(Error::InitializePersistedCatalog)?, ); - let last_cache = LastCacheProvider::new_from_catalog(&catalog.clone_inner()) + let last_cache = LastCacheProvider::new_from_catalog(Arc::clone(&catalog)) .map_err(Error::InitializeLastCache)?; info!(instance_id = ?catalog.instance_id(), "Catalog initialized with"); diff --git a/influxdb3_catalog/Cargo.toml b/influxdb3_catalog/Cargo.toml index 080561f816..e24ef603fb 100644 --- a/influxdb3_catalog/Cargo.toml +++ b/influxdb3_catalog/Cargo.toml @@ -17,6 +17,7 @@ influxdb3_wal = { path = "../influxdb3_wal" } # crates.io dependencies arrow.workspace = true +bimap.workspace = true parking_lot.workspace = true serde.workspace = true serde_json.workspace = true diff --git a/influxdb3_catalog/src/catalog.rs b/influxdb3_catalog/src/catalog.rs index b4e8eb484a..0370f47820 100644 --- a/influxdb3_catalog/src/catalog.rs +++ b/influxdb3_catalog/src/catalog.rs @@ -1,7 +1,8 @@ //! Implementation of the Catalog that sits entirely in memory. use crate::catalog::Error::TableNotFound; -use influxdb3_id::DbId; +use bimap::BiHashMap; +use influxdb3_id::{DbId, TableId}; use influxdb3_wal::{ CatalogBatch, CatalogOp, FieldAdditions, LastCacheDefinition, LastCacheDelete, }; @@ -138,7 +139,9 @@ impl Catalog { } pub fn db_or_create(&self, db_name: &str) -> Result<Arc<DatabaseSchema>> { - let db = self.inner.read().databases.get(db_name).cloned(); + let db = self + .db_name_to_id(db_name.into()) + .and_then(|db_id| self.inner.read().databases.get(&db_id).map(Arc::clone)); let db = match db { Some(db) => db, @@ -150,12 +153,13 @@ impl Catalog { } info!("return new db {}", db_name); - let db = Arc::new(DatabaseSchema::new(DbId::new(), db_name.into())); - inner - .databases - .insert(Arc::clone(&db.name), Arc::clone(&db)); + let db_id = DbId::new(); + let db_name = db_name.into(); + let db = Arc::new(DatabaseSchema::new(db_id, Arc::clone(&db_name))); + inner.databases.insert(db.id, Arc::clone(&db)); inner.sequence = inner.sequence.next(); inner.updated = true; + inner.db_map.insert(db_id, db_name); db } }; @@ -163,8 +167,42 @@ impl Catalog { Ok(db) } - pub fn db_schema(&self, name: &str) -> Option<Arc<DatabaseSchema>> { - self.inner.read().databases.get(name).cloned() + pub fn add_table_to_lookup(&self, db_id: DbId, table_id: TableId, name: Arc<str>) { + self.inner + .write() + .table_map + .entry(db_id) + .or_default() + .insert(table_id, name); + } + + pub fn db_name_to_id(&self, db_name: Arc<str>) -> Option<DbId> { + self.inner.read().db_map.get_by_right(&db_name).copied() + } + + pub fn db_id_to_name(&self, db_id: DbId) -> Option<Arc<str>> { + self.inner.read().db_map.get_by_left(&db_id).map(Arc::clone) + } + + pub fn table_name_to_id(&self, db_id: DbId, table_name: Arc<str>) -> Option<TableId> { + self.inner + .read() + .table_map + .get(&db_id) + .and_then(|map| map.get_by_right(&table_name).copied()) + } + + pub fn table_id_to_name(&self, db_id: DbId, table_id: TableId) -> Option<Arc<str>> { + self.inner + .read() + .table_map + .get(&db_id) + .and_then(|map| map.get_by_left(&table_id)) + .map(Arc::clone) + } + + pub fn db_schema(&self, id: &DbId) -> Option<Arc<DatabaseSchema>> { + self.inner.read().databases.get(id).cloned() } pub fn sequence_number(&self) -> SequenceNumber { @@ -179,37 +217,37 @@ impl Catalog { self.inner .read() .databases - .keys() - .map(|db| db.to_string()) + .values() + .map(|db| db.name.to_string()) .collect() } - pub fn add_last_cache(&self, db_name: &str, table_name: &str, last_cache: LastCacheDefinition) { + pub fn add_last_cache(&self, db_id: DbId, table_id: TableId, last_cache: LastCacheDefinition) { let mut inner = self.inner.write(); let mut db = inner .databases - .get(db_name) + .get(&db_id) .expect("db should exist") .as_ref() .clone(); - let table = db.tables.get_mut(table_name).expect("table should exist"); + let table = db.tables.get_mut(&table_id).expect("table should exist"); table.add_last_cache(last_cache); - inner.databases.insert(Arc::clone(&db.name), Arc::new(db)); + inner.databases.insert(db_id, Arc::new(db)); inner.sequence = inner.sequence.next(); inner.updated = true; } - pub fn delete_last_cache(&self, db_name: &str, table_name: &str, name: &str) { + pub fn delete_last_cache(&self, db_id: DbId, table_id: TableId, name: &str) { let mut inner = self.inner.write(); let mut db = inner .databases - .get(db_name) + .get(&db_id) .expect("db should exist") .as_ref() .clone(); - let table = db.tables.get_mut(table_name).expect("table should exist"); + let table = db.tables.get_mut(&table_id).expect("table should exist"); table.remove_last_cache(name); - inner.databases.insert(Arc::clone(&db.name), Arc::new(db)); + inner.databases.insert(db_id, Arc::new(db)); inner.sequence = inner.sequence.next(); inner.updated = true; } @@ -223,13 +261,27 @@ impl Catalog { } #[cfg(test)] - pub fn db_exists(&self, db_name: &str) -> bool { - self.inner.read().db_exists(db_name) + pub fn db_exists(&self, db_id: DbId) -> bool { + self.inner.read().db_exists(db_id) } pub fn insert_database(&mut self, db: DatabaseSchema) { let mut inner = self.inner.write(); - inner.databases.insert(Arc::clone(&db.name), Arc::new(db)); + for (table_id, table_def) in db.tables.iter() { + inner + .table_map + .entry(db.id) + .and_modify(|map: &mut BiHashMap<TableId, Arc<str>>| { + map.insert(*table_id, Arc::clone(&table_def.table_name)); + }) + .or_insert_with(|| { + let mut map = BiHashMap::new(); + map.insert(*table_id, Arc::clone(&table_def.table_name)); + map + }); + } + inner.db_map.insert(db.id, Arc::clone(&db.name)); + inner.databases.insert(db.id, Arc::new(db)); inner.sequence = inner.sequence.next(); inner.updated = true; } @@ -247,14 +299,18 @@ impl Catalog { inner.updated = false; } } + + pub fn inner(&self) -> &RwLock<InnerCatalog> { + &self.inner + } } #[serde_with::serde_as] #[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone, Default)] pub struct InnerCatalog { /// The catalog is a map of databases with their table schemas - #[serde_as(as = "serde_with::MapPreventDuplicates<_, _>")] - databases: HashMap<Arc<str>, Arc<DatabaseSchema>>, + #[serde_as(as = "DatabasesAsArray")] + databases: HashMap<DbId, Arc<DatabaseSchema>>, sequence: SequenceNumber, /// The host_id is the prefix that is passed in when starting up (`host_identifier_prefix`) host_id: Arc<str>, @@ -263,6 +319,117 @@ pub struct InnerCatalog { /// If true, the catalog has been updated since the last time it was serialized #[serde(skip)] updated: bool, + #[serde_as(as = "DbMapAsArray")] + db_map: BiHashMap<DbId, Arc<str>>, + #[serde_as(as = "TableMapAsArray")] + pub table_map: HashMap<DbId, BiHashMap<TableId, Arc<str>>>, +} + +serde_with::serde_conv!( + DbMapAsArray, + BiHashMap<DbId, Arc<str>>, + |map: &BiHashMap<DbId, Arc<str>>| { + map.iter().fold(Vec::new(), |mut acc, (id, name)| { + acc.push(DbMap { + db_id: *id, + name: Arc::clone(&name) + }); + acc + }) + }, + |vec: Vec<DbMap>| -> Result<_, std::convert::Infallible> { + Ok(vec.into_iter().fold(BiHashMap::new(), |mut acc, db| { + acc.insert(db.db_id, db.name); + acc + })) + } +); + +#[derive(Debug, Serialize, Deserialize)] +struct DbMap { + db_id: DbId, + name: Arc<str>, +} + +serde_with::serde_conv!( + TableMapAsArray, + HashMap<DbId, BiHashMap<TableId, Arc<str>>>, + |map: &HashMap<DbId, BiHashMap<TableId, Arc<str>>>| { + map.iter().fold(Vec::new(), |mut acc, (db_id, table_map)| { + for (table_id, name) in table_map.iter() { + acc.push(TableMap { + db_id: *db_id, + table_id: *table_id, + name: Arc::clone(&name) + }); + } + acc + }) + }, + |vec: Vec<TableMap>| -> Result<_, std::convert::Infallible> { + let mut map = HashMap::new(); + for item in vec { + map.entry(item.db_id) + .and_modify(|entry: &mut BiHashMap<TableId, Arc<str>>| { + entry.insert(item.table_id, Arc::clone(&item.name)); + }) + .or_insert_with(||{ + let mut inner_map = BiHashMap::new(); + inner_map.insert(item.table_id, Arc::clone(&item.name)); + inner_map + }); + } + Ok(map) + } +); + +#[derive(Debug, Serialize, Deserialize)] +struct TableMap { + db_id: DbId, + table_id: TableId, + name: Arc<str>, +} + +serde_with::serde_conv!( + DatabasesAsArray, + HashMap<DbId, Arc<DatabaseSchema>>, + |map: &HashMap<DbId, Arc<DatabaseSchema>>| { + map.values().fold(Vec::new(), |mut acc, db| { + acc.push(DatabasesSerialized { + id: db.id, + name: Arc::clone(&db.name), + tables: db.tables.values().cloned().collect(), + }); + acc + }) + }, + |vec: Vec<DatabasesSerialized>| -> Result<_, String> { + vec.into_iter().fold(Ok(HashMap::new()), |acc, db| { + let mut acc = acc?; + if let Some(_) = acc.insert(db.id, Arc::new(DatabaseSchema { + id: db.id, + name: Arc::clone(&db.name), + tables: db.tables.into_iter().fold(Ok(BTreeMap::new()), |acc, table| { + let mut acc = acc?; + let table_name = Arc::clone(&table.table_name); + if let Some(_) = acc.insert(table.table_id, table) { + return Err(format!("found duplicate table: {}", table_name)); + } + Ok(acc) + })? + })) { + return Err(format!("found duplicate db: {}", db.name)); + } + Ok(acc) + }) + } +); + +#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone, Default)] +struct DatabasesSerialized { + pub id: DbId, + pub name: Arc<str>, + pub tables: Vec<TableDefinition>, } impl InnerCatalog { @@ -273,6 +440,8 @@ impl InnerCatalog { host_id, instance_id, updated: false, + db_map: BiHashMap::new(), + table_map: HashMap::new(), } } @@ -289,7 +458,7 @@ impl InnerCatalog { pub fn apply_catalog_batch(&mut self, catalog_batch: &CatalogBatch) -> Result<()> { let table_count = self.table_count(); - if let Some(db) = self.databases.get(catalog_batch.database_name.as_ref()) { + if let Some(db) = self.databases.get(&catalog_batch.database_id) { let existing_table_count = db.tables.len(); if let Some(new_db) = db.new_if_updated_from_batch(catalog_batch)? { @@ -297,11 +466,23 @@ impl InnerCatalog { if table_count + new_table_count > Catalog::NUM_TABLES_LIMIT { return Err(Error::TooManyTables); } - - self.databases - .insert(Arc::clone(&new_db.name), Arc::new(new_db)); + let new_db = Arc::new(new_db); + self.databases.insert(new_db.id, Arc::clone(&new_db)); self.sequence = self.sequence.next(); self.updated = true; + self.db_map.insert(new_db.id, Arc::clone(&new_db.name)); + for (table_id, table_def) in new_db.tables.iter() { + self.table_map + .entry(new_db.id) + .and_modify(|map| { + map.insert(*table_id, Arc::clone(&table_def.table_name)); + }) + .or_insert_with(|| { + let mut map = BiHashMap::new(); + map.insert(*table_id, Arc::clone(&table_def.table_name)); + map + }); + } } } else { if self.databases.len() >= Catalog::NUM_DBS_LIMIT { @@ -313,10 +494,23 @@ impl InnerCatalog { return Err(Error::TooManyTables); } - self.databases - .insert(Arc::clone(&new_db.name), Arc::new(new_db)); + let new_db = Arc::new(new_db); + self.databases.insert(new_db.id, Arc::clone(&new_db)); self.sequence = self.sequence.next(); self.updated = true; + self.db_map.insert(new_db.id, Arc::clone(&new_db.name)); + for (table_id, table_def) in new_db.tables.iter() { + self.table_map + .entry(new_db.id) + .and_modify(|map| { + map.insert(*table_id, Arc::clone(&table_def.table_name)); + }) + .or_insert_with(|| { + let mut map = BiHashMap::new(); + map.insert(*table_id, Arc::clone(&table_def.table_name)); + map + }); + } } Ok(()) @@ -326,8 +520,8 @@ impl InnerCatalog { self.databases.values() } - pub fn db_exists(&self, db_name: &str) -> bool { - self.databases.contains_key(db_name) + pub fn db_exists(&self, db_id: DbId) -> bool { + self.databases.contains_key(&db_id) } } @@ -337,8 +531,7 @@ pub struct DatabaseSchema { pub id: DbId, pub name: Arc<str>, /// The database is a map of tables - #[serde_as(as = "serde_with::MapPreventDuplicates<_, _>")] - pub tables: BTreeMap<Arc<str>, TableDefinition>, + pub tables: BTreeMap<TableId, TableDefinition>, } impl DatabaseSchema { @@ -361,28 +554,28 @@ impl DatabaseSchema { CatalogOp::CreateDatabase(_) => (), CatalogOp::CreateTable(table_definition) => { let new_or_existing_table = updated_or_new_tables - .get(table_definition.table_name.as_ref()) - .or_else(|| self.tables.get(table_definition.table_name.as_ref())); + .get(&table_definition.table_id) + .or_else(|| self.tables.get(&table_definition.table_id)); if let Some(existing_table) = new_or_existing_table { if let Some(new_table) = existing_table.new_if_definition_adds_new_fields(table_definition)? { - updated_or_new_tables.insert(Arc::clone(&new_table.name), new_table); + updated_or_new_tables.insert(new_table.table_id, new_table); } } else { let new_table = TableDefinition::new_from_op(table_definition); - updated_or_new_tables.insert(Arc::clone(&new_table.name), new_table); + updated_or_new_tables.insert(new_table.table_id, new_table); } } CatalogOp::AddFields(field_additions) => { let new_or_existing_table = updated_or_new_tables - .get(field_additions.table_name.as_ref()) - .or_else(|| self.tables.get(field_additions.table_name.as_ref())); + .get(&field_additions.table_id) + .or_else(|| self.tables.get(&field_additions.table_id)); if let Some(existing_table) = new_or_existing_table { if let Some(new_table) = existing_table.new_if_field_additions_add_fields(field_additions)? { - updated_or_new_tables.insert(Arc::clone(&new_table.name), new_table); + updated_or_new_tables.insert(new_table.table_id, new_table); } } else { let fields = field_additions @@ -391,17 +584,18 @@ impl DatabaseSchema { .map(|f| (f.name.to_string(), f.data_type.into())) .collect::<Vec<_>>(); let new_table = TableDefinition::new( + field_additions.table_id, Arc::clone(&field_additions.table_name), fields, SeriesKey::None, )?; - updated_or_new_tables.insert(Arc::clone(&new_table.name), new_table); + updated_or_new_tables.insert(new_table.table_id, new_table); } } CatalogOp::CreateLastCache(last_cache_definition) => { let new_or_existing_table = updated_or_new_tables - .get(last_cache_definition.table.as_str()) - .or_else(|| self.tables.get(last_cache_definition.table.as_str())); + .get(&last_cache_definition.table_id) + .or_else(|| self.tables.get(&last_cache_definition.table_id)); let table = new_or_existing_table.ok_or(TableNotFound { db_name: self.name.to_string(), @@ -411,23 +605,23 @@ impl DatabaseSchema { if let Some(new_table) = table.new_if_last_cache_definition_is_new(last_cache_definition) { - updated_or_new_tables.insert(Arc::clone(&new_table.name), new_table); + updated_or_new_tables.insert(new_table.table_id, new_table); } } CatalogOp::DeleteLastCache(last_cache_deletion) => { let new_or_existing_table = updated_or_new_tables - .get(last_cache_deletion.table.as_str()) - .or_else(|| self.tables.get(last_cache_deletion.table.as_str())); + .get(&last_cache_deletion.table_id) + .or_else(|| self.tables.get(&last_cache_deletion.table_id)); let table = new_or_existing_table.ok_or(TableNotFound { db_name: self.name.to_string(), - table_name: last_cache_deletion.table.clone(), + table_name: last_cache_deletion.table_name.clone(), })?; if let Some(new_table) = table.new_if_last_cache_deletes_existing(last_cache_deletion) { - updated_or_new_tables.insert(Arc::clone(&new_table.name), new_table); + updated_or_new_tables.insert(new_table.table_id, new_table); } } } @@ -436,9 +630,9 @@ impl DatabaseSchema { if updated_or_new_tables.is_empty() { Ok(None) } else { - for (n, t) in &self.tables { - if !updated_or_new_tables.contains_key(n) { - updated_or_new_tables.insert(Arc::clone(n), t.clone()); + for (table_id, table_def) in &self.tables { + if !updated_or_new_tables.contains_key(table_id) { + updated_or_new_tables.insert(*table_id, table_def.clone()); } } @@ -461,20 +655,27 @@ impl DatabaseSchema { Ok(new_db) } - pub fn get_table_schema(&self, table_name: &str) -> Option<&Schema> { - self.tables.get(table_name).map(|table| &table.schema) + pub fn get_table_schema(&self, table_id: TableId) -> Option<&Schema> { + self.tables.get(&table_id).map(|table| &table.schema) } - pub fn get_table(&self, table_name: &str) -> Option<&TableDefinition> { - self.tables.get(table_name) + pub fn get_table(&self, table_id: TableId) -> Option<&TableDefinition> { + self.tables.get(&table_id) } - pub fn table_names(&self) -> Vec<Arc<str>> { + pub fn table_ids(&self) -> Vec<TableId> { self.tables.keys().cloned().collect() } - pub fn table_exists(&self, table_name: &str) -> bool { - self.tables.contains_key(table_name) + pub fn table_names(&self) -> Vec<Arc<str>> { + self.tables + .values() + .map(|td| Arc::clone(&td.table_name)) + .collect() + } + + pub fn table_exists(&self, table_id: TableId) -> bool { + self.tables.contains_key(&table_id) } pub fn tables(&self) -> impl Iterator<Item = &TableDefinition> { @@ -484,7 +685,8 @@ impl DatabaseSchema { #[derive(Debug, Eq, PartialEq, Clone)] pub struct TableDefinition { - pub name: Arc<str>, + pub table_id: TableId, + pub table_name: Arc<str>, pub schema: Schema, pub last_caches: BTreeMap<String, LastCacheDefinition>, } @@ -494,7 +696,8 @@ impl TableDefinition { /// /// Ensures the provided columns will be ordered before constructing the schema. pub fn new<CN: AsRef<str>>( - name: Arc<str>, + table_id: TableId, + table_name: Arc<str>, columns: impl AsRef<[(CN, InfluxColumnType)]>, series_key: Option<impl IntoIterator<Item: AsRef<str>>>, ) -> Result<Self> { @@ -509,7 +712,7 @@ impl TableDefinition { ordered_columns.insert(name.as_ref(), column_type); } let mut schema_builder = SchemaBuilder::with_capacity(columns.as_ref().len()); - schema_builder.measurement(name.as_ref()); + schema_builder.measurement(table_name.as_ref()); if let Some(sk) = series_key { schema_builder.with_series_key(sk); } @@ -519,7 +722,8 @@ impl TableDefinition { let schema = schema_builder.build().unwrap(); Ok(Self { - name, + table_id, + table_name, schema, last_caches: BTreeMap::new(), }) @@ -532,6 +736,7 @@ impl TableDefinition { columns.push((field_def.name.as_ref(), field_def.data_type.into())); } Self::new( + table_definition.table_id, Arc::clone(&table_definition.table_name), columns, table_definition.key.clone(), @@ -553,7 +758,7 @@ impl TableDefinition { if table_definition.key != existing_key { return Err(Error::SeriesKeyMismatch { - table_name: self.name.to_string(), + table_name: self.table_name.to_string(), existing: existing_key.unwrap_or_default().join("/"), attempted: table_definition.key.clone().unwrap_or_default().join("/"), }); @@ -565,7 +770,7 @@ impl TableDefinition { if let Some(existing_type) = self.schema.field_type_by_name(field_def.name.as_ref()) { if existing_type != field_def.data_type.into() { return Err(Error::FieldTypeMismatch { - table_name: self.name.to_string(), + table_name: self.table_name.to_string(), column_name: field_def.name.to_string(), existing: existing_type, attempted: field_def.data_type.into(), @@ -601,7 +806,7 @@ impl TableDefinition { Some(existing_field_type) => { if existing_field_type != field_type { return Err(Error::FieldTypeMismatch { - table_name: self.name.to_string(), + table_name: self.table_name.to_string(), column_name: c.name.to_string(), existing: existing_field_type, attempted: field_type, @@ -759,8 +964,9 @@ mod tests { use InfluxColumnType::*; use InfluxFieldType::*; database.tables.insert( - "test_table_1".into(), + TableId::from(1), TableDefinition::new( + TableId::from(1), "test_table_1".into(), [ ("tag_1", Tag), @@ -778,8 +984,9 @@ mod tests { .unwrap(), ); database.tables.insert( - "test_table_2".into(), + TableId::from(2), TableDefinition::new( + TableId::from(2), "test_table_2".into(), [ ("tag_1", Tag), @@ -800,7 +1007,7 @@ mod tests { .inner .write() .databases - .insert(Arc::clone(&database.name), Arc::new(database)); + .insert(database.id, Arc::new(database)); // Perform a snapshot test to check that the JSON serialized catalog does not change in an // undesired way when introducing features etc. @@ -819,53 +1026,58 @@ mod tests { // Duplicate databases { let json = r#"{ - "databases": { - "db1": { + "databases": [ + { "id": 0, "name": "db1", - "tables": {} + "tables": [] }, - "db1": { + { "id": 0, "name": "db1", - "tables": {} + "tables": [] } - } + ] }"#; let err = serde_json::from_str::<InnerCatalog>(json).unwrap_err(); - assert_contains!(err.to_string(), "found duplicate key"); + assert_contains!(err.to_string(), "found duplicate db: db1"); } // Duplicate tables { let json = r#"{ - "databases": { - "db1": { + "databases": [ + { + "id": 0, "name": "db1", - "tables": { - "tbl1": { - "name": "tbl1", + "tables": [ + { + "table_id": 0, + "table_name": "tbl1", "cols": {} }, - "tbl1": { - "name": "tbl1", + { + "table_id": 0, + "table_name": "tbl1", "cols": {} } - } + ] } - } + ] }"#; let err = serde_json::from_str::<InnerCatalog>(json).unwrap_err(); - assert_contains!(err.to_string(), "found duplicate key"); + assert_contains!(err.to_string(), "found duplicate table: tbl1"); } // Duplicate columns { let json = r#"{ - "databases": { - "db1": { + "databases": [ + { + "id": 0, "name": "db1", - "tables": { - "tbl1": { - "name": "tbl1", + "tables": [ + { + "table_id": 0, + "table_name": "tbl1", "cols": { "col1": { "type": "i64", @@ -879,9 +1091,9 @@ mod tests { } } } - } + ] } - } + ] }"#; let err = serde_json::from_str::<InnerCatalog>(json).unwrap_err(); assert_contains!(err.to_string(), "found duplicate key"); @@ -896,8 +1108,9 @@ mod tests { tables: BTreeMap::new(), }; database.tables.insert( - "test".into(), + TableId::from(0), TableDefinition::new( + TableId::from(0), "test".into(), [( "test".to_string(), @@ -908,7 +1121,7 @@ mod tests { .unwrap(), ); - let table = database.tables.get_mut("test").unwrap(); + let table = database.tables.get_mut(&TableId::from(0)).unwrap(); table .add_columns(vec![("test2".to_string(), InfluxColumnType::Tag)]) .unwrap(); @@ -933,8 +1146,9 @@ mod tests { use InfluxColumnType::*; use InfluxFieldType::*; database.tables.insert( - "test_table_1".into(), + TableId::from(1), TableDefinition::new( + TableId::from(1), "test_table_1".into(), [ ("tag_1", Tag), @@ -955,7 +1169,7 @@ mod tests { .inner .write() .databases - .insert(Arc::clone(&database.name), Arc::new(database)); + .insert(database.id, Arc::new(database)); assert_json_snapshot!(catalog); @@ -978,6 +1192,7 @@ mod tests { use InfluxColumnType::*; use InfluxFieldType::*; let mut table_def = TableDefinition::new( + TableId::from(0), "test".into(), [ ("tag_1", Tag), @@ -991,6 +1206,7 @@ mod tests { .unwrap(); table_def.add_last_cache( LastCacheDefinition::new_with_explicit_value_columns( + TableId::from(0), "test", "test_table_last_cache", ["tag_2", "tag_3"], @@ -1000,12 +1216,12 @@ mod tests { ) .unwrap(), ); - database.tables.insert("test_table_1".into(), table_def); + database.tables.insert(TableId::from(0), table_def); catalog .inner .write() .databases - .insert(Arc::clone(&database.name), Arc::new(database)); + .insert(database.id, Arc::new(database)); assert_json_snapshot!(catalog); diff --git a/influxdb3_catalog/src/serialize.rs b/influxdb3_catalog/src/serialize.rs index 635762e4a7..8d0f8264a5 100644 --- a/influxdb3_catalog/src/serialize.rs +++ b/influxdb3_catalog/src/serialize.rs @@ -1,5 +1,6 @@ use crate::catalog::TableDefinition; use arrow::datatypes::DataType as ArrowDataType; +use influxdb3_id::TableId; use influxdb3_wal::{LastCacheDefinition, LastCacheValueColumnsDef}; use schema::{InfluxColumnType, SchemaBuilder}; use serde::{Deserialize, Serialize}; @@ -33,7 +34,8 @@ impl<'de> Deserialize<'de> for TableDefinition { #[serde_with::serde_as] #[derive(Debug, Serialize, Deserialize)] struct TableSnapshot<'a> { - name: &'a str, + table_id: TableId, + table_name: &'a str, #[serde(default, skip_serializing_if = "Option::is_none")] key: Option<Vec<&'a str>>, #[serde_as(as = "serde_with::MapPreventDuplicates<_, _>")] @@ -147,7 +149,8 @@ impl<'a> From<&'a TableDefinition> for TableSnapshot<'a> { let keys = def.schema().series_key(); let last_caches = def.last_caches.values().map(Into::into).collect(); Self { - name: def.name.as_ref(), + table_id: def.table_id, + table_name: def.table_name.as_ref(), cols, key: keys, last_caches, @@ -206,9 +209,10 @@ impl<'a> From<&'a ArrowDataType> for DataType<'a> { impl<'a> From<TableSnapshot<'a>> for TableDefinition { fn from(snap: TableSnapshot<'a>) -> Self { - let name = snap.name.into(); + let table_name = snap.table_name.into(); + let table_id = snap.table_id; let mut b = SchemaBuilder::new(); - b.measurement(snap.name.to_string()); + b.measurement(snap.table_name.to_string()); if let Some(keys) = snap.key { b.with_series_key(keys); } @@ -234,7 +238,8 @@ impl<'a> From<TableSnapshot<'a>> for TableDefinition { .collect(); Self { - name, + table_name, + table_id, schema, last_caches, } @@ -262,6 +267,7 @@ impl<'a> From<DataType<'a>> for schema::InfluxFieldType { #[derive(Debug, Serialize, Deserialize)] struct LastCacheSnapshot<'a> { + table_id: TableId, table: &'a str, name: &'a str, keys: Vec<&'a str>, @@ -273,6 +279,7 @@ struct LastCacheSnapshot<'a> { impl<'a> From<&'a LastCacheDefinition> for LastCacheSnapshot<'a> { fn from(lcd: &'a LastCacheDefinition) -> Self { Self { + table_id: lcd.table_id, table: &lcd.table, name: &lcd.name, keys: lcd.key_columns.iter().map(|v| v.as_str()).collect(), @@ -291,6 +298,7 @@ impl<'a> From<&'a LastCacheDefinition> for LastCacheSnapshot<'a> { impl<'a> From<LastCacheSnapshot<'a>> for LastCacheDefinition { fn from(snap: LastCacheSnapshot<'a>) -> Self { Self { + table_id: snap.table_id, table: snap.table.to_string(), name: snap.name.to_string(), key_columns: snap.keys.iter().map(|s| s.to_string()).collect(), diff --git a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__catalog_serialization.snap b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__catalog_serialization.snap index 11ed2b7f24..558559fcc3 100644 --- a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__catalog_serialization.snap +++ b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__catalog_serialization.snap @@ -3,13 +3,14 @@ source: influxdb3_catalog/src/catalog.rs expression: catalog --- { - "databases": { - "test_db": { + "databases": [ + { "id": 0, "name": "test_db", - "tables": { - "test_table_1": { - "name": "test_table_1", + "tables": [ + { + "table_id": 1, + "table_name": "test_table_1", "cols": { "bool_field": { "type": "bool", @@ -78,8 +79,9 @@ expression: catalog } } }, - "test_table_2": { - "name": "test_table_2", + { + "table_id": 2, + "table_name": "test_table_2", "cols": { "bool_field": { "type": "bool", @@ -148,10 +150,12 @@ expression: catalog } } } - } + ] } - }, + ], "sequence": 0, "host_id": "dummy-host-id", - "instance_id": "instance-id" + "instance_id": "instance-id", + "db_map": [], + "table_map": [] } diff --git a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_last_cache.snap b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_last_cache.snap index 7c79043108..36453f45d2 100644 --- a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_last_cache.snap +++ b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_last_cache.snap @@ -3,13 +3,14 @@ source: influxdb3_catalog/src/catalog.rs expression: catalog --- { - "databases": { - "test_db": { + "databases": [ + { "id": 0, "name": "test_db", - "tables": { - "test_table_1": { - "name": "test", + "tables": [ + { + "table_id": 0, + "table_name": "test", "cols": { "field": { "type": "str", @@ -59,6 +60,7 @@ expression: catalog }, "last_caches": [ { + "table_id": 0, "table": "test", "name": "test_table_last_cache", "keys": [ @@ -73,10 +75,12 @@ expression: catalog } ] } - } + ] } - }, + ], "sequence": 0, "host_id": "dummy-host-id", - "instance_id": "instance-id" + "instance_id": "instance-id", + "db_map": [], + "table_map": [] } diff --git a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_series_keys.snap b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_series_keys.snap index 2d8489dfd4..5b66988d25 100644 --- a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_series_keys.snap +++ b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_series_keys.snap @@ -3,13 +3,14 @@ source: influxdb3_catalog/src/catalog.rs expression: catalog --- { - "databases": { - "test_db": { + "databases": [ + { "id": 0, "name": "test_db", - "tables": { - "test_table_1": { - "name": "test_table_1", + "tables": [ + { + "table_id": 1, + "table_name": "test_table_1", "key": [ "tag_1", "tag_2", @@ -63,10 +64,12 @@ expression: catalog } } } - } + ] } - }, + ], "sequence": 0, "host_id": "dummy-host-id", - "instance_id": "instance-id" + "instance_id": "instance-id", + "db_map": [], + "table_map": [] } diff --git a/influxdb3_id/src/lib.rs b/influxdb3_id/src/lib.rs index 548a8472b4..26a1d4c7f9 100644 --- a/influxdb3_id/src/lib.rs +++ b/influxdb3_id/src/lib.rs @@ -1,9 +1,10 @@ use serde::Deserialize; use serde::Serialize; +use std::fmt::Display; use std::sync::atomic::AtomicU32; use std::sync::atomic::Ordering; -#[derive(Debug, Copy, Clone, Eq, PartialOrd, Ord, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Copy, Clone, Eq, PartialOrd, Ord, PartialEq, Serialize, Deserialize, Hash)] pub struct DbId(u32); static NEXT_DB_ID: AtomicU32 = AtomicU32::new(0); @@ -37,3 +38,77 @@ impl From<u32> for DbId { Self(value) } } +impl Display for DbId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +#[derive(Debug, Copy, Clone, Eq, PartialOrd, Ord, PartialEq, Serialize, Deserialize, Hash)] +pub struct TableId(u32); + +static NEXT_TABLE_ID: AtomicU32 = AtomicU32::new(0); + +impl TableId { + pub fn new() -> Self { + Self(NEXT_TABLE_ID.fetch_add(1, Ordering::SeqCst)) + } + + pub fn next_id() -> Self { + Self(NEXT_TABLE_ID.load(Ordering::SeqCst)) + } + + pub fn set_next_id(&self) { + NEXT_TABLE_ID.store(self.0, Ordering::SeqCst) + } + + pub fn as_u32(&self) -> u32 { + self.0 + } +} + +impl Default for TableId { + fn default() -> Self { + Self::new() + } +} + +impl From<u32> for TableId { + fn from(value: u32) -> Self { + Self(value) + } +} + +impl Display for TableId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +#[derive(Debug, Copy, Clone, Eq, PartialOrd, Ord, PartialEq, Serialize, Deserialize, Hash)] +pub struct ColumnId(u16); + +impl ColumnId { + pub fn new(id: u16) -> Self { + Self(id) + } + + pub fn next_id(&self) -> Self { + Self(self.0 + 1) + } + + pub fn as_u16(&self) -> u16 { + self.0 + } +} +impl From<u16> for ColumnId { + fn from(value: u16) -> Self { + Self(value) + } +} + +impl Display for ColumnId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} diff --git a/influxdb3_server/src/http.rs b/influxdb3_server/src/http.rs index 315773f828..5e2ecb9066 100644 --- a/influxdb3_server/src/http.rs +++ b/influxdb3_server/src/http.rs @@ -692,11 +692,21 @@ where ttl, } = self.read_body_json(req).await?; + let db_id = self + .write_buffer + .catalog() + .db_name_to_id(db.as_str().into()) + .ok_or_else(|| WriteBufferError::DbDoesNotExist)?; + let table_id = self + .write_buffer + .catalog() + .table_name_to_id(db_id, table.as_str().into()) + .ok_or_else(|| WriteBufferError::TableDoesNotExist)?; match self .write_buffer .create_last_cache( - &db, - &table, + db_id, + table_id, name.as_deref(), count, ttl.map(Duration::from_secs), @@ -730,8 +740,18 @@ where self.read_body_json(req).await? }; + let db_id = self + .write_buffer + .catalog() + .db_name_to_id(db.into()) + .ok_or_else(|| WriteBufferError::DbDoesNotExist)?; + let table_id = self + .write_buffer + .catalog() + .table_name_to_id(db_id, table.into()) + .ok_or_else(|| WriteBufferError::TableDoesNotExist)?; self.write_buffer - .delete_last_cache(&db, &table, &name) + .delete_last_cache(db_id, table_id, &name) .await?; Ok(Response::builder() diff --git a/influxdb3_server/src/lib.rs b/influxdb3_server/src/lib.rs index 030db41ea0..3c4beb541f 100644 --- a/influxdb3_server/src/lib.rs +++ b/influxdb3_server/src/lib.rs @@ -233,6 +233,7 @@ mod tests { use datafusion::parquet::data_type::AsBytes; use hyper::{body, Body, Client, Request, Response, StatusCode}; use influxdb3_catalog::catalog::Catalog; + use influxdb3_id::{DbId, TableId}; use influxdb3_telemetry::store::TelemetryStore; use influxdb3_wal::WalConfig; use influxdb3_write::last_cache::LastCacheProvider; @@ -628,7 +629,9 @@ mod tests { let start_time = 0; let (url, shutdown, wbuf) = setup_server(start_time).await; let db_name = "foo"; + let db_id = DbId::from(0); let tbl_name = "cpu"; + let tbl_id = TableId::from(0); // Write to generate a db/table in the catalog: let resp = write_lp( @@ -643,7 +646,7 @@ mod tests { assert_eq!(resp.status(), StatusCode::OK); // Create the last cache: - wbuf.create_last_cache(db_name, tbl_name, None, None, None, None, None) + wbuf.create_last_cache(db_id, tbl_id, None, None, None, None, None) .await .expect("create last cache"); @@ -769,11 +772,12 @@ mod tests { let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host")); let dummy_host_id = Arc::from("dummy-host-id"); let instance_id = Arc::from("dummy-instance-id"); + let catalog = Arc::new(Catalog::new(dummy_host_id, instance_id)); let write_buffer_impl = Arc::new( influxdb3_write::write_buffer::WriteBufferImpl::new( Arc::clone(&persister), - Arc::new(Catalog::new(dummy_host_id, instance_id)), - Arc::new(LastCacheProvider::new()), + Arc::clone(&catalog), + Arc::new(LastCacheProvider::new(catalog)), Arc::<MockProvider>::clone(&time_provider), Arc::clone(&exec), WalConfig::test_config(), diff --git a/influxdb3_server/src/query_executor.rs b/influxdb3_server/src/query_executor.rs index 3f634c7ef2..947fe71d6f 100644 --- a/influxdb3_server/src/query_executor.rs +++ b/influxdb3_server/src/query_executor.rs @@ -304,12 +304,12 @@ impl QueryDatabase for QueryExecutorImpl { ) -> Result<Option<Arc<dyn QueryNamespace>>, DataFusionError> { let _span_recorder = SpanRecorder::new(span); - let db_schema = self.catalog.db_schema(name).ok_or_else(|| { + let db_id = self.catalog.db_name_to_id(name.into()).ok_or_else(|| { DataFusionError::External(Box::new(Error::DatabaseNotFound { db_name: name.into(), })) })?; - + let db_schema = self.catalog.db_schema(&db_id).expect("database exists"); Ok(Some(Arc::new(Database::new( db_schema, Arc::clone(&self.write_buffer), @@ -350,7 +350,7 @@ impl Database { query_log: Arc<QueryLog>, ) -> Self { let system_schema_provider = Arc::new(SystemSchemaProvider::new( - Arc::clone(&db_schema.name), + db_schema.id, Arc::clone(&query_log), Arc::clone(&write_buffer), )); @@ -376,10 +376,15 @@ impl Database { } async fn query_table(&self, table_name: &str) -> Option<Arc<QueryTable>> { - self.db_schema.get_table_schema(table_name).map(|schema| { + let table_name = table_name.into(); + let table_id = self + .write_buffer + .catalog() + .table_name_to_id(self.db_schema.id, Arc::clone(&table_name))?; + self.db_schema.get_table_schema(table_id).map(|schema| { Arc::new(QueryTable { db_schema: Arc::clone(&self.db_schema), - name: table_name.into(), + table_name, schema: schema.clone(), write_buffer: Arc::clone(&self.write_buffer), }) @@ -449,7 +454,7 @@ impl QueryNamespace for Database { ctx.inner().register_udtf( LAST_CACHE_UDTF_NAME, Arc::new(LastCacheFunction::new( - self.db_schema.name.to_string(), + self.db_schema.id, self.write_buffer.last_cache_provider(), )), ); @@ -502,19 +507,25 @@ impl SchemaProvider for Database { .collect() } - async fn table(&self, name: &str) -> Result<Option<Arc<dyn TableProvider>>, DataFusionError> { - Ok(self.query_table(name).await.map(|qt| qt as _)) + async fn table( + &self, + table_name: &str, + ) -> Result<Option<Arc<dyn TableProvider>>, DataFusionError> { + Ok(self.query_table(table_name).await.map(|qt| qt as _)) } fn table_exist(&self, name: &str) -> bool { - self.db_schema.table_exists(name) + self.write_buffer + .catalog() + .table_name_to_id(self.db_schema.id, name.into()) + .is_some() } } #[derive(Debug)] pub struct QueryTable { db_schema: Arc<DatabaseSchema>, - name: Arc<str>, + table_name: Arc<str>, schema: Schema, write_buffer: Arc<dyn WriteBuffer>, } @@ -529,7 +540,7 @@ impl QueryTable { ) -> Result<Vec<Arc<dyn QueryChunk>>, DataFusionError> { self.write_buffer.get_table_chunks( &self.db_schema.name, - self.name.as_ref(), + &self.table_name, filters, projection, ctx, @@ -572,7 +583,7 @@ impl TableProvider for QueryTable { ?limit, "QueryTable as TableProvider::scan" ); - let mut builder = ProviderBuilder::new(Arc::clone(&self.name), self.schema.clone()); + let mut builder = ProviderBuilder::new(Arc::clone(&self.table_name), self.schema.clone()); let chunks = self.chunks(ctx, projection, &filters, limit)?; for chunk in chunks { @@ -645,11 +656,12 @@ mod tests { let executor = make_exec(Arc::clone(&object_store)); let host_id = Arc::from("dummy-host-id"); let instance_id = Arc::from("instance-id"); - let write_buffer_impl = Arc::new( + let catalog = Arc::new(Catalog::new(host_id, instance_id)); + let write_buffer = Arc::new( WriteBufferImpl::new( Arc::clone(&persister), - Arc::new(Catalog::new(host_id, instance_id)), - Arc::new(LastCacheProvider::new()), + Arc::clone(&catalog), + Arc::new(LastCacheProvider::new(catalog)), Arc::<MockProvider>::clone(&time_provider), Arc::clone(&executor), WalConfig { @@ -665,9 +677,9 @@ mod tests { ); let dummy_telem_store = TelemetryStore::new_without_background_runners(Arc::clone( - &write_buffer_impl.persisted_files(), + &write_buffer.persisted_files(), )); - let write_buffer: Arc<dyn WriteBuffer> = write_buffer_impl; + let write_buffer: Arc<dyn WriteBuffer> = write_buffer; let metrics = Arc::new(Registry::new()); let df_config = Arc::new(Default::default()); let query_executor = QueryExecutorImpl::new( diff --git a/influxdb3_server/src/system_tables/last_caches.rs b/influxdb3_server/src/system_tables/last_caches.rs index 170dbc6aa1..fb23c7643d 100644 --- a/influxdb3_server/src/system_tables/last_caches.rs +++ b/influxdb3_server/src/system_tables/last_caches.rs @@ -4,20 +4,21 @@ use arrow::array::{GenericListBuilder, StringBuilder}; use arrow_array::{ArrayRef, RecordBatch, StringArray, UInt64Array}; use arrow_schema::{DataType, Field, Schema, SchemaRef}; use datafusion::{error::DataFusionError, logical_expr::Expr}; +use influxdb3_id::DbId; use influxdb3_wal::{LastCacheDefinition, LastCacheValueColumnsDef}; use influxdb3_write::last_cache::LastCacheProvider; use iox_system_tables::IoxSystemTable; pub(super) struct LastCachesTable { - db_name: Arc<str>, + db_id: DbId, schema: SchemaRef, provider: Arc<LastCacheProvider>, } impl LastCachesTable { - pub(super) fn new(db_name: Arc<str>, provider: Arc<LastCacheProvider>) -> Self { + pub(super) fn new(db_id: DbId, provider: Arc<LastCacheProvider>) -> Self { Self { - db_name, + db_id, schema: last_caches_schema(), provider, } @@ -55,7 +56,7 @@ impl IoxSystemTable for LastCachesTable { _filters: Option<Vec<Expr>>, _limit: Option<usize>, ) -> Result<RecordBatch, DataFusionError> { - let caches = self.provider.get_last_caches_for_db(&self.db_name); + let caches = self.provider.get_last_caches_for_db(self.db_id); from_last_cache_definitions(self.schema(), &caches) } } diff --git a/influxdb3_server/src/system_tables/mod.rs b/influxdb3_server/src/system_tables/mod.rs index 611f4bb5c9..8d42c5e949 100644 --- a/influxdb3_server/src/system_tables/mod.rs +++ b/influxdb3_server/src/system_tables/mod.rs @@ -1,6 +1,7 @@ use std::{any::Any, collections::HashMap, sync::Arc}; use datafusion::{catalog::SchemaProvider, datasource::TableProvider, error::DataFusionError}; +use influxdb3_id::DbId; use influxdb3_write::WriteBuffer; use iox_query::query_log::QueryLog; use iox_system_tables::SystemTableProvider; @@ -37,23 +38,19 @@ impl std::fmt::Debug for SystemSchemaProvider { } impl SystemSchemaProvider { - pub(crate) fn new( - db_name: Arc<str>, - query_log: Arc<QueryLog>, - buffer: Arc<dyn WriteBuffer>, - ) -> Self { + pub(crate) fn new(db_id: DbId, query_log: Arc<QueryLog>, buffer: Arc<dyn WriteBuffer>) -> Self { let mut tables = HashMap::<&'static str, Arc<dyn TableProvider>>::new(); let queries = Arc::new(SystemTableProvider::new(Arc::new(QueriesTable::new( query_log, )))); tables.insert(QUERIES_TABLE_NAME, queries); let last_caches = Arc::new(SystemTableProvider::new(Arc::new(LastCachesTable::new( - Arc::clone(&db_name), + db_id, buffer.last_cache_provider(), )))); tables.insert(LAST_CACHES_TABLE_NAME, last_caches); let parquet_files = Arc::new(SystemTableProvider::new(Arc::new(ParquetFilesTable::new( - db_name, buffer, + db_id, buffer, )))); tables.insert(PARQUET_FILES_TABLE_NAME, parquet_files); Self { tables } diff --git a/influxdb3_server/src/system_tables/parquet_files.rs b/influxdb3_server/src/system_tables/parquet_files.rs index d0f4075a28..3283e34d0b 100644 --- a/influxdb3_server/src/system_tables/parquet_files.rs +++ b/influxdb3_server/src/system_tables/parquet_files.rs @@ -8,21 +8,22 @@ use datafusion::{ logical_expr::{col, BinaryExpr, Expr, Operator}, scalar::ScalarValue, }; +use influxdb3_id::DbId; use influxdb3_write::{ParquetFile, WriteBuffer}; use iox_system_tables::IoxSystemTable; use super::{PARQUET_FILES_TABLE_NAME, SYSTEM_SCHEMA_NAME}; pub(super) struct ParquetFilesTable { - db_name: Arc<str>, + db_id: DbId, schema: SchemaRef, buffer: Arc<dyn WriteBuffer>, } impl ParquetFilesTable { - pub(super) fn new(db_name: Arc<str>, buffer: Arc<dyn WriteBuffer>) -> Self { + pub(super) fn new(db_id: DbId, buffer: Arc<dyn WriteBuffer>) -> Self { Self { - db_name, + db_id, schema: parquet_files_schema(), buffer, } @@ -90,9 +91,13 @@ impl IoxSystemTable for ParquetFilesTable { }) .ok_or_else(table_name_predicate_error)?; - let parquet_files: Vec<ParquetFile> = self - .buffer - .parquet_files(&self.db_name, table_name.as_str()); + let parquet_files: Vec<ParquetFile> = self.buffer.parquet_files( + self.db_id, + self.buffer + .catalog() + .table_name_to_id(self.db_id, table_name.as_str().into()) + .expect("table exists"), + ); from_parquet_files(&table_name, schema, parquet_files) } diff --git a/influxdb3_wal/Cargo.toml b/influxdb3_wal/Cargo.toml index 657b7370c6..8b82d6b350 100644 --- a/influxdb3_wal/Cargo.toml +++ b/influxdb3_wal/Cargo.toml @@ -27,6 +27,7 @@ object_store.workspace = true parking_lot.workspace = true serde.workspace = true serde_json.workspace = true +serde_with.workspace = true thiserror.workspace = true tokio.workspace = true diff --git a/influxdb3_wal/src/lib.rs b/influxdb3_wal/src/lib.rs index 84ba533caf..6873074d27 100644 --- a/influxdb3_wal/src/lib.rs +++ b/influxdb3_wal/src/lib.rs @@ -11,13 +11,14 @@ use crate::snapshot_tracker::SnapshotInfo; use async_trait::async_trait; use data_types::Timestamp; use hashbrown::HashMap; -use influxdb3_id::DbId; +use influxdb3_id::{DbId, TableId}; use influxdb_line_protocol::v3::SeriesValue; use influxdb_line_protocol::FieldValue; use iox_time::Time; use observability_deps::tracing::error; use schema::{InfluxColumnType, InfluxFieldType}; use serde::{Deserialize, Serialize}; +use serde_with::serde_as; use std::fmt::Debug; use std::str::FromStr; use std::sync::Arc; @@ -229,13 +230,16 @@ pub enum CatalogOp { #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct DatabaseDefinition { + pub database_id: DbId, pub database_name: Arc<str>, } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct TableDefinition { + pub database_id: DbId, pub database_name: Arc<str>, pub table_name: Arc<str>, + pub table_id: TableId, pub field_definitions: Vec<FieldDefinition>, pub key: Option<Vec<String>>, } @@ -243,7 +247,9 @@ pub struct TableDefinition { #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct FieldAdditions { pub database_name: Arc<str>, + pub database_id: DbId, pub table_name: Arc<str>, + pub table_id: TableId, pub field_definitions: Vec<FieldDefinition>, } @@ -298,6 +304,8 @@ impl From<FieldDataType> for InfluxColumnType { /// Defines a last cache in a given table and database #[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)] pub struct LastCacheDefinition { + /// The table id the cache is associated with + pub table_id: TableId, /// The table name the cache is associated with pub table: String, /// Given name of the cache @@ -315,6 +323,7 @@ pub struct LastCacheDefinition { impl LastCacheDefinition { /// Create a new [`LastCacheDefinition`] with explicit value columns pub fn new_with_explicit_value_columns( + table_id: TableId, table: impl Into<String>, name: impl Into<String>, key_columns: impl IntoIterator<Item: Into<String>>, @@ -323,6 +332,7 @@ impl LastCacheDefinition { ttl: u64, ) -> Result<Self, Error> { Ok(Self { + table_id, table: table.into(), name: name.into(), key_columns: key_columns.into_iter().map(Into::into).collect(), @@ -336,6 +346,7 @@ impl LastCacheDefinition { /// Create a new [`LastCacheDefinition`] with explicit value columns pub fn new_all_non_key_value_columns( + table_id: TableId, table: impl Into<String>, name: impl Into<String>, key_columns: impl IntoIterator<Item: Into<String>>, @@ -343,6 +354,7 @@ impl LastCacheDefinition { ttl: u64, ) -> Result<Self, Error> { Ok(Self { + table_id, table: table.into(), name: name.into(), key_columns: key_columns.into_iter().map(Into::into).collect(), @@ -420,24 +432,61 @@ impl PartialEq<LastCacheSize> for usize { #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct LastCacheDelete { - pub table: String, + pub table_name: String, + pub table_id: TableId, pub name: String, } +#[serde_as] #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct WriteBatch { pub database_id: DbId, pub database_name: Arc<str>, - pub table_chunks: HashMap<Arc<str>, TableChunks>, + #[serde_as(as = "TableChunksMapAsVec")] + pub table_chunks: HashMap<TableId, TableChunks>, pub min_time_ns: i64, pub max_time_ns: i64, } +#[derive(Debug, Serialize, Deserialize)] +pub struct TableChunksMap { + table_id: TableId, + min_time: i64, + max_time: i64, + chunk_time_to_chunk: HashMap<i64, TableChunk>, +} + +serde_with::serde_conv!( + TableChunksMapAsVec, + HashMap<TableId,TableChunks>, + |map: &HashMap<TableId, TableChunks>| + map.iter() + .map(|(table_id, chunk)| { + TableChunksMap { + table_id: *table_id, + min_time: chunk.min_time, + max_time: chunk.max_time, + chunk_time_to_chunk: chunk.chunk_time_to_chunk.clone() + } + }) + .collect::<Vec<TableChunksMap>>(), + |vec: Vec<TableChunksMap>| -> Result<_, std::convert::Infallible> { + Ok(vec.into_iter().fold(HashMap::new(), |mut acc, chunk| { + acc.insert(chunk.table_id, TableChunks{ + min_time: chunk.min_time, + max_time: chunk.max_time, + chunk_time_to_chunk: chunk.chunk_time_to_chunk + }); + acc + })) + } +); + impl WriteBatch { pub fn new( database_id: DbId, database_name: Arc<str>, - table_chunks: HashMap<Arc<str>, TableChunks>, + table_chunks: HashMap<TableId, TableChunks>, ) -> Self { // find the min and max times across the table chunks let (min_time_ns, max_time_ns) = table_chunks.values().fold( @@ -461,7 +510,7 @@ impl WriteBatch { pub fn add_write_batch( &mut self, - new_table_chunks: HashMap<Arc<str>, TableChunks>, + new_table_chunks: HashMap<TableId, TableChunks>, min_time_ns: i64, max_time_ns: i64, ) { diff --git a/influxdb3_wal/src/object_store.rs b/influxdb3_wal/src/object_store.rs index a3e55a43c8..eeb2d005fa 100644 --- a/influxdb3_wal/src/object_store.rs +++ b/influxdb3_wal/src/object_store.rs @@ -613,7 +613,7 @@ mod tests { Field, FieldData, Gen1Duration, Row, SnapshotSequenceNumber, TableChunk, TableChunks, }; use async_trait::async_trait; - use influxdb3_id::DbId; + use influxdb3_id::{DbId, TableId}; use object_store::memory::InMemory; use std::any::Any; use tokio::sync::oneshot::Receiver; @@ -638,13 +638,12 @@ mod tests { ); let db_name: Arc<str> = "db1".into(); - let table_name: Arc<str> = "table1".into(); let op1 = WalOp::Write(WriteBatch { database_id: DbId::from(0), database_name: Arc::clone(&db_name), table_chunks: HashMap::from([( - Arc::clone(&table_name), + TableId::from(0), TableChunks { min_time: 1, max_time: 3, @@ -692,7 +691,7 @@ mod tests { database_id: DbId::from(0), database_name: Arc::clone(&db_name), table_chunks: HashMap::from([( - Arc::clone(&table_name), + TableId::from(0), TableChunks { min_time: 12, max_time: 12, @@ -732,7 +731,7 @@ mod tests { database_id: DbId::from(0), database_name: "db1".into(), table_chunks: HashMap::from([( - "table1".into(), + TableId::from(0), TableChunks { min_time: 1, max_time: 12, @@ -802,7 +801,7 @@ mod tests { database_id: DbId::from(0), database_name: "db1".into(), table_chunks: HashMap::from([( - "table1".into(), + TableId::from(0), TableChunks { min_time: 12, max_time: 12, @@ -874,7 +873,7 @@ mod tests { database_id: DbId::from(0), database_name: Arc::clone(&db_name), table_chunks: HashMap::from([( - Arc::clone(&table_name), + TableId::from(0), TableChunks { min_time: 26, max_time: 26, @@ -934,7 +933,7 @@ mod tests { database_id: DbId::from(0), database_name: "db1".into(), table_chunks: HashMap::from([( - "table1".into(), + TableId::from(0), TableChunks { min_time: 26, max_time: 26, diff --git a/influxdb3_wal/src/serialize.rs b/influxdb3_wal/src/serialize.rs index 9bdbc2db24..da52c13273 100644 --- a/influxdb3_wal/src/serialize.rs +++ b/influxdb3_wal/src/serialize.rs @@ -92,8 +92,7 @@ mod tests { Field, FieldData, Row, TableChunk, TableChunks, WalFileSequenceNumber, WalOp, WriteBatch, }; use hashbrown::HashMap; - use influxdb3_id::DbId; - use std::sync::Arc; + use influxdb3_id::{DbId, TableId}; #[test] fn test_serialize_deserialize() { @@ -117,9 +116,9 @@ mod tests { max_time: 10, chunk_time_to_chunk: [(1, chunk)].iter().cloned().collect(), }; - let table_name: Arc<str> = "table2".into(); + let table_id = TableId::from(2); let mut table_chunks = HashMap::new(); - table_chunks.insert(table_name, chunks); + table_chunks.insert(table_id, chunks); let contents = WalContents { min_timestamp_ns: 0, diff --git a/influxdb3_write/Cargo.toml b/influxdb3_write/Cargo.toml index a7abb6dc8c..359735b8e9 100644 --- a/influxdb3_write/Cargo.toml +++ b/influxdb3_write/Cargo.toml @@ -30,6 +30,7 @@ arrow.workspace = true async-trait.workspace = true byteorder.workspace = true bytes.workspace = true +bimap.workspace = true chrono.workspace = true crc32fast.workspace = true crossbeam-channel.workspace = true diff --git a/influxdb3_write/src/last_cache/mod.rs b/influxdb3_write/src/last_cache/mod.rs index 51b27390f1..3b69c7187e 100644 --- a/influxdb3_write/src/last_cache/mod.rs +++ b/influxdb3_write/src/last_cache/mod.rs @@ -23,7 +23,9 @@ use datafusion::{ }; use hashbrown::{HashMap, HashSet}; use indexmap::{IndexMap, IndexSet}; -use influxdb3_catalog::catalog::InnerCatalog; +use influxdb3_catalog::catalog::Catalog; +use influxdb3_id::DbId; +use influxdb3_id::TableId; use influxdb3_wal::{ Field, FieldData, LastCacheDefinition, LastCacheSize, LastCacheValueColumnsDef, Row, WalContents, WalOp, @@ -59,12 +61,12 @@ impl Error { } } -/// A three level hashmap storing Database Name -> Table Name -> Cache Name -> LastCache -type CacheMap = RwLock<HashMap<String, HashMap<String, HashMap<String, LastCache>>>>; +/// A three level hashmap storing DbId -> TableId -> Cache Name -> LastCache +type CacheMap = RwLock<HashMap<DbId, HashMap<TableId, HashMap<String, LastCache>>>>; /// Provides all last-N-value caches for the entire database -#[derive(Default)] pub struct LastCacheProvider { + catalog: Arc<Catalog>, cache_map: CacheMap, } @@ -79,10 +81,14 @@ pub const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(60 * 60 * 4); /// Arguments to the [`LastCacheProvider::create_cache`] method pub struct CreateCacheArguments { + /// The id of the database to create the cache for + pub db_id: DbId, /// The name of the database to create the cache for pub db_name: String, + /// The id of the table in the database to create the cache for + pub table_id: TableId, /// The name of the table in the database to create the cache for - pub tbl_name: String, + pub table_name: String, /// The Influx Schema of the table pub schema: Schema, /// An optional name for the cache @@ -111,22 +117,33 @@ pub struct CreateCacheArguments { impl LastCacheProvider { /// Create a new [`LastCacheProvider`] - pub fn new() -> Self { - Self::default() + pub fn new(catalog: Arc<Catalog>) -> Self { + Self { + catalog, + cache_map: Default::default(), + } } - /// Initialize a [`LastCacheProvider`] from a [`InnerCatalog`] - pub fn new_from_catalog(catalog: &InnerCatalog) -> Result<Self, Error> { - let provider = LastCacheProvider::new(); - for db_schema in catalog.databases() { - for tbl_def in db_schema.tables() { - for (cache_name, cache_def) in tbl_def.last_caches() { + /// Initialize a [`LastCacheProvider`] from a [`Catalog`] + pub fn new_from_catalog(catalog: Arc<Catalog>) -> Result<Self, Error> { + let provider = LastCacheProvider::new(Arc::clone(&catalog)); + let inner_catalog = catalog.inner().read(); + for db_schema in inner_catalog.databases() { + for table_def in db_schema.tables() { + for (cache_name, cache_def) in table_def.last_caches() { assert!( provider .create_cache(CreateCacheArguments { + db_id: db_schema.id, db_name: db_schema.name.to_string(), - tbl_name: tbl_def.name.to_string(), - schema: tbl_def.schema.clone(), + table_id: catalog + .table_name_to_id( + db_schema.id, + Arc::clone(&table_def.table_name) + ) + .expect("table exists"), + table_name: table_def.table_name.to_string(), + schema: table_def.schema.clone(), cache_name: Some(cache_name.to_owned()), count: Some(cache_def.count.into()), ttl: Some(Duration::from_secs(cache_def.ttl)), @@ -152,20 +169,22 @@ impl LastCacheProvider { /// traits. fn get_cache_name_and_schema( &self, - db_name: &str, - tbl_name: &str, + db_id: DbId, + table_id: TableId, cache_name: Option<&str>, ) -> Option<(String, ArrowSchemaRef)> { self.cache_map .read() - .get(db_name) - .and_then(|db| db.get(tbl_name)) - .and_then(|tbl| { + .get(&db_id) + .and_then(|db| db.get(&table_id)) + .and_then(|table| { if let Some(name) = cache_name { - tbl.get(name) + table + .get(name) .map(|lc| (name.to_string(), Arc::clone(&lc.schema))) - } else if tbl.len() == 1 { - tbl.iter() + } else if table.len() == 1 { + table + .iter() .map(|(name, lc)| (name.to_string(), Arc::clone(&lc.schema))) .next() } else { @@ -175,15 +194,23 @@ impl LastCacheProvider { } /// Get the [`LastCacheDefinition`] for all caches contained in a database - pub fn get_last_caches_for_db(&self, db: &str) -> Vec<LastCacheDefinition> { + pub fn get_last_caches_for_db(&self, db: DbId) -> Vec<LastCacheDefinition> { let read = self.cache_map.read(); - read.get(db) - .map(|tbl| { - tbl.iter() - .flat_map(|(tbl_name, tbl_map)| { - tbl_map - .iter() - .map(|(lc_name, lc)| lc.to_definition(&**tbl_name, lc_name)) + read.get(&db) + .map(|table| { + table + .iter() + .flat_map(|(table_id, table_map)| { + table_map.iter().map(|(lc_name, lc)| { + lc.to_definition( + *table_id, + self.catalog + .table_id_to_name(db, *table_id) + .expect("table exists") + .to_string(), + lc_name, + ) + }) }) .collect() }) @@ -198,14 +225,16 @@ impl LastCacheProvider { pub fn create_cache( &self, CreateCacheArguments { - db_name, - tbl_name, + db_id, + table_id, + table_name, schema, cache_name, count, ttl, key_columns, value_columns, + .. }: CreateCacheArguments, ) -> Result<Option<LastCacheDefinition>, Error> { let key_columns = if let Some(keys) = key_columns { @@ -241,7 +270,10 @@ impl LastCacheProvider { // Generate the cache name if it was not provided let cache_name = cache_name.unwrap_or_else(|| { - format!("{tbl_name}_{keys}_last_cache", keys = key_columns.join("_")) + format!( + "{table_name}_{keys}_last_cache", + keys = key_columns.join("_") + ) }); let accept_new_fields = value_columns.is_none(); @@ -280,21 +312,22 @@ impl LastCacheProvider { // then this is an error. let mut lock = self.cache_map.write(); if let Some(lc) = lock - .get(&db_name) - .and_then(|db| db.get(&tbl_name)) - .and_then(|tbl| tbl.get(&cache_name)) + .get(&db_id) + .and_then(|db| db.get(&table_id)) + .and_then(|table| table.get(&cache_name)) { return lc.compare_config(&last_cache).map(|_| None); } - lock.entry(db_name) + lock.entry(db_id) .or_default() - .entry_ref(&tbl_name) + .entry(table_id) .or_default() .insert(cache_name.clone(), last_cache); Ok(Some(LastCacheDefinition { - table: tbl_name, + table_id, + table: table_name, name: cache_name, key_columns, value_columns: last_cache_value_columns_def, @@ -348,7 +381,7 @@ impl LastCacheProvider { pub fn create_cache_from_definition( &self, - db_name: &str, + db_id: DbId, schema: &Schema, definition: &LastCacheDefinition, ) { @@ -375,9 +408,9 @@ impl LastCacheProvider { let mut lock = self.cache_map.write(); - lock.entry(db_name.to_string()) + lock.entry(db_id) .or_default() - .entry_ref(&definition.table) + .entry(definition.table_id) .or_default() .insert(definition.name.clone(), last_cache); } @@ -389,30 +422,30 @@ impl LastCacheProvider { /// table's database; likewise for the database's entry in the provider's cache map. pub fn delete_cache( &self, - db_name: &str, - table_name: &str, + db_id: DbId, + table_id: TableId, cache_name: &str, ) -> Result<(), Error> { let mut lock = self.cache_map.write(); - let Some(db) = lock.get_mut(db_name) else { + let Some(db) = lock.get_mut(&db_id) else { return Err(Error::CacheDoesNotExist); }; - let Some(tbl) = db.get_mut(table_name) else { + let Some(table) = db.get_mut(&table_id) else { return Err(Error::CacheDoesNotExist); }; - if tbl.remove(cache_name).is_none() { + if table.remove(cache_name).is_none() { return Err(Error::CacheDoesNotExist); } - if tbl.is_empty() { - db.remove(table_name); + if table.is_empty() { + db.remove(&table_id); } if db.is_empty() { - lock.remove(db_name); + lock.remove(&db_id); } Ok(()) @@ -427,14 +460,14 @@ impl LastCacheProvider { for op in &wal_contents.ops { match op { WalOp::Write(batch) => { - if let Some(db_cache) = cache_map.get_mut(batch.database_name.as_ref()) { + if let Some(db_cache) = cache_map.get_mut(&batch.database_id) { if db_cache.is_empty() { continue; } - for (tbl_name, tbl_chunks) in &batch.table_chunks { - if let Some(tbl_cache) = db_cache.get_mut(tbl_name.as_ref()) { - for (_, last_cache) in tbl_cache.iter_mut() { - for chunk in tbl_chunks.chunk_time_to_chunk.values() { + for (table_id, table_chunks) in &batch.table_chunks { + if let Some(table_cache) = db_cache.get_mut(table_id) { + for (_, last_cache) in table_cache.iter_mut() { + for chunk in table_chunks.chunk_time_to_chunk.values() { for row in &chunk.rows { last_cache.push(row); } @@ -455,7 +488,7 @@ impl LastCacheProvider { let mut cache_map = self.cache_map.write(); cache_map.iter_mut().for_each(|(_, db)| { db.iter_mut() - .for_each(|(_, tbl)| tbl.iter_mut().for_each(|(_, lc)| lc.remove_expired())) + .for_each(|(_, table)| table.iter_mut().for_each(|(_, lc)| lc.remove_expired())) }); } @@ -463,20 +496,20 @@ impl LastCacheProvider { #[cfg(test)] pub(crate) fn get_cache_record_batches( &self, - db_name: &str, - tbl_name: &str, + db_id: DbId, + table_id: TableId, cache_name: Option<&str>, predicates: &[Predicate], ) -> Option<Result<Vec<RecordBatch>, ArrowError>> { self.cache_map .read() - .get(db_name) - .and_then(|db| db.get(tbl_name)) - .and_then(|tbl| { + .get(&db_id) + .and_then(|db| db.get(&table_id)) + .and_then(|table| { if let Some(name) = cache_name { - tbl.get(name) - } else if tbl.len() == 1 { - tbl.iter().next().map(|(_, lc)| lc) + table.get(name) + } else if table.len() == 1 { + table.iter().next().map(|(_, lc)| lc) } else { None } @@ -490,7 +523,7 @@ impl LastCacheProvider { self.cache_map .read() .iter() - .flat_map(|(_, db)| db.iter().flat_map(|(_, tbl)| tbl.iter())) + .flat_map(|(_, db)| db.iter().flat_map(|(_, table)| table.iter())) .count() } } @@ -803,10 +836,12 @@ impl LastCache { /// Convert the `LastCache` into a `LastCacheDefinition` fn to_definition( &self, + table_id: TableId, table: impl Into<String>, name: impl Into<String>, ) -> LastCacheDefinition { LastCacheDefinition { + table_id, table: table.into(), name: name.into(), key_columns: self.key_columns.iter().cloned().collect(), @@ -1576,7 +1611,7 @@ mod tests { use arrow_util::{assert_batches_eq, assert_batches_sorted_eq}; use data_types::NamespaceName; use influxdb3_catalog::catalog::{Catalog, DatabaseSchema, TableDefinition}; - use influxdb3_id::DbId; + use influxdb3_id::{DbId, TableId}; use influxdb3_wal::{LastCacheDefinition, WalConfig}; use insta::assert_json_snapshot; use iox_time::{MockProvider, Time, TimeProvider}; @@ -1590,10 +1625,11 @@ mod tests { let persister = Arc::new(Persister::new(obj_store, "test_host")); let host_id = Arc::from("dummy-host-id"); let instance_id = Arc::from("dummy-instance-id"); + let catalog = Arc::new(Catalog::new(host_id, instance_id)); WriteBufferImpl::new( persister, - Arc::new(Catalog::new(host_id, instance_id)), - Arc::new(LastCacheProvider::new()), + Arc::clone(&catalog), + Arc::new(LastCacheProvider::new(catalog)), time_provider, crate::test_help::make_exec(), WalConfig::test_config(), @@ -1606,7 +1642,9 @@ mod tests { #[tokio::test] async fn pick_up_latest_write() { let db_name = "foo"; + let db_id = DbId::from(0); let tbl_name = "cpu"; + let tbl_id = TableId::from(0); let wbuf = setup_write_buffer().await; @@ -1623,8 +1661,8 @@ mod tests { // Create the last cache: wbuf.create_last_cache( - db_name, - tbl_name, + db_id, + tbl_id, Some("cache"), None, None, @@ -1650,7 +1688,7 @@ mod tests { // Check what is in the last cache: let batch = wbuf .last_cache_provider() - .get_cache_record_batches(db_name, tbl_name, None, predicates) + .get_cache_record_batches(db_id, tbl_id, None, predicates) .unwrap() .unwrap(); @@ -1678,7 +1716,7 @@ mod tests { let batch = wbuf .last_cache_provider() - .get_cache_record_batches(db_name, tbl_name, None, predicates) + .get_cache_record_batches(db_id, tbl_id, None, predicates) .unwrap() .unwrap(); @@ -1712,7 +1750,9 @@ mod tests { #[tokio::test] async fn cache_key_column_predicates() { let db_name = "foo"; + let db_id = DbId::from(0); let tbl_name = "cpu"; + let tbl_id = TableId::from(0); let wbuf = setup_write_buffer().await; // Do one write to update the catalog with a db and table: @@ -1728,8 +1768,8 @@ mod tests { // Create the last cache with keys on all tag columns: wbuf.create_last_cache( - db_name, - tbl_name, + db_id, + tbl_id, Some("cache"), None, None, @@ -1927,7 +1967,7 @@ mod tests { for t in test_cases { let batches = wbuf .last_cache_provider() - .get_cache_record_batches(db_name, tbl_name, None, t.predicates) + .get_cache_record_batches(db_id, tbl_id, None, t.predicates) .unwrap() .unwrap(); @@ -1938,7 +1978,9 @@ mod tests { #[tokio::test] async fn non_default_cache_size() { let db_name = "foo"; + let db_id = DbId::from(0); let tbl_name = "cpu"; + let tbl_id = TableId::from(0); let wbuf = setup_write_buffer().await; // Do one write to update the catalog with a db and table: @@ -1954,8 +1996,8 @@ mod tests { // Create the last cache with keys on all tag columns: wbuf.create_last_cache( - db_name, - tbl_name, + db_id, + tbl_id, Some("cache"), Some(10), None, @@ -2101,7 +2143,7 @@ mod tests { for t in test_cases { let batches = wbuf .last_cache_provider() - .get_cache_record_batches(db_name, tbl_name, None, t.predicates) + .get_cache_record_batches(db_id, tbl_id, None, t.predicates) .unwrap() .unwrap(); @@ -2112,7 +2154,9 @@ mod tests { #[tokio::test] async fn cache_ttl() { let db_name = "foo"; + let db_id = DbId::from(0); let tbl_name = "cpu"; + let tbl_id = TableId::from(0); let wbuf = setup_write_buffer().await; // Do one write to update the catalog with a db and table: @@ -2128,8 +2172,8 @@ mod tests { // Create the last cache with keys on all tag columns: wbuf.create_last_cache( - db_name, - tbl_name, + db_id, + tbl_id, Some("cache"), // use a cache size greater than 1 to ensure the TTL is doing the evicting Some(10), @@ -2170,7 +2214,7 @@ mod tests { // Check what is in the last cache: let batches = wbuf .last_cache_provider() - .get_cache_record_batches(db_name, tbl_name, None, predicates) + .get_cache_record_batches(db_id, tbl_id, None, predicates) .unwrap() .unwrap(); @@ -2209,7 +2253,7 @@ mod tests { // Check what is in the last cache: let batches = wbuf .last_cache_provider() - .get_cache_record_batches(db_name, tbl_name, None, predicates) + .get_cache_record_batches(db_id, tbl_id, None, predicates) .unwrap() .unwrap(); @@ -2238,7 +2282,7 @@ mod tests { // Check what is in the last cache: let batches = wbuf .last_cache_provider() - .get_cache_record_batches(db_name, tbl_name, None, predicates) + .get_cache_record_batches(db_id, tbl_id, None, predicates) .unwrap() .unwrap(); @@ -2257,7 +2301,9 @@ mod tests { #[tokio::test] async fn fields_as_key_columns() { let db_name = "cassini_mission"; + let db_id = DbId::from(0); let tbl_name = "temp"; + let tbl_id = TableId::from(0); let wbuf = setup_write_buffer().await; // Do one write to update the catalog with a db and table: @@ -2276,8 +2322,8 @@ mod tests { // Create the last cache with keys on some field columns: wbuf.create_last_cache( - db_name, - tbl_name, + db_id, + tbl_id, Some("cache"), None, Some(Duration::from_millis(50)), @@ -2376,7 +2422,7 @@ mod tests { for t in test_cases { let batches = wbuf .last_cache_provider() - .get_cache_record_batches(db_name, tbl_name, None, t.predicates) + .get_cache_record_batches(db_id, tbl_id, None, t.predicates) .unwrap() .unwrap(); @@ -2387,7 +2433,9 @@ mod tests { #[tokio::test] async fn series_key_as_default() { let db_name = "windmills"; + let db_id = DbId::from(0); let tbl_name = "wind_speed"; + let tbl_id = TableId::from(0); let wbuf = setup_write_buffer().await; // Do one write to update the catalog with a db and table: @@ -2402,7 +2450,7 @@ mod tests { .unwrap(); // Create the last cache with keys on some field columns: - wbuf.create_last_cache(db_name, tbl_name, Some("cache"), None, None, None, None) + wbuf.create_last_cache(db_id, tbl_id, Some("cache"), None, None, None, None) .await .expect("create last cache"); @@ -2508,7 +2556,7 @@ mod tests { for t in test_cases { let batches = wbuf .last_cache_provider() - .get_cache_record_batches(db_name, tbl_name, None, t.predicates) + .get_cache_record_batches(db_id, tbl_id, None, t.predicates) .unwrap() .unwrap(); @@ -2519,7 +2567,9 @@ mod tests { #[tokio::test] async fn tag_set_as_default() { let db_name = "windmills"; + let db_id = DbId::from(0); let tbl_name = "wind_speed"; + let tbl_id = TableId::from(0); let wbuf = setup_write_buffer().await; // Do one write to update the catalog with a db and table: @@ -2534,7 +2584,7 @@ mod tests { .unwrap(); // Create the last cache with keys on some field columns: - wbuf.create_last_cache(db_name, tbl_name, Some("cache"), None, None, None, None) + wbuf.create_last_cache(db_id, tbl_id, Some("cache"), None, None, None, None) .await .expect("create last cache"); @@ -2640,7 +2690,7 @@ mod tests { for t in test_cases { let batches = wbuf .last_cache_provider() - .get_cache_record_batches(db_name, tbl_name, None, t.predicates) + .get_cache_record_batches(db_id, tbl_id, None, t.predicates) .unwrap() .unwrap(); @@ -2651,7 +2701,9 @@ mod tests { #[tokio::test] async fn null_values() { let db_name = "weather"; + let db_id = DbId::from(0); let tbl_name = "temp"; + let tbl_id = TableId::from(0); let wbuf = setup_write_buffer().await; // Do a write to update catalog @@ -2667,7 +2719,7 @@ mod tests { .unwrap(); // Create the last cache using default tags as keys - wbuf.create_last_cache(db_name, tbl_name, None, Some(10), None, None, None) + wbuf.create_last_cache(db_id, tbl_id, None, Some(10), None, None, None) .await .expect("create last cache"); @@ -2694,7 +2746,7 @@ mod tests { let batches = wbuf .last_cache_provider() - .get_cache_record_batches(db_name, tbl_name, None, &[]) + .get_cache_record_batches(db_id, tbl_id, None, &[]) .unwrap() .unwrap(); @@ -2718,7 +2770,9 @@ mod tests { #[tokio::test] async fn new_fields_added_to_default_cache() { let db_name = "nhl_stats"; + let db_id = DbId::from(0); let tbl_name = "plays"; + let tbl_id = TableId::from(0); let wbuf = setup_write_buffer().await; // Do a write to setup catalog: @@ -2733,7 +2787,7 @@ mod tests { .unwrap(); // Create the last cache using default tags as keys - wbuf.create_last_cache(db_name, tbl_name, None, Some(10), None, None, None) + wbuf.create_last_cache(db_id, tbl_id, None, Some(10), None, None, None) .await .expect("create last cache"); @@ -2804,7 +2858,7 @@ mod tests { for t in test_cases { let batches = wbuf .last_cache_provider() - .get_cache_record_batches(db_name, tbl_name, None, t.predicates) + .get_cache_record_batches(db_id, tbl_id, None, t.predicates) .unwrap() .unwrap(); @@ -2815,7 +2869,9 @@ mod tests { #[tokio::test] async fn new_field_ordering() { let db_name = "db"; + let db_id = DbId::from(0); let tbl_name = "tbl"; + let tbl_id = TableId::from(0); let wbuf = setup_write_buffer().await; // Do a write to setup catalog: @@ -2833,8 +2889,8 @@ mod tests { // and using the default for fields, so that new fields will get added // to the cache. wbuf.create_last_cache( - db_name, - tbl_name, + db_id, + tbl_id, None, None, // use default cache size of 1 None, @@ -2937,7 +2993,7 @@ mod tests { for t in test_cases { let batches = wbuf .last_cache_provider() - .get_cache_record_batches(db_name, tbl_name, None, t.predicates) + .get_cache_record_batches(db_id, tbl_id, None, t.predicates) .unwrap() .unwrap(); @@ -2948,7 +3004,9 @@ mod tests { #[tokio::test] async fn idempotent_cache_creation() { let db_name = "db"; + let db_id = DbId::from(0); let tbl_name = "tbl"; + let tbl_id = TableId::from(0); let wbuf = setup_write_buffer().await; // Do a write to setup catalog: @@ -2963,21 +3021,21 @@ mod tests { .unwrap(); // Create a last cache using all default settings - wbuf.create_last_cache(db_name, tbl_name, None, None, None, None, None) + wbuf.create_last_cache(db_id, tbl_id, None, None, None, None, None) .await .expect("create last cache"); assert_eq!(wbuf.last_cache_provider().size(), 1); // Doing the same should be fine: - wbuf.create_last_cache(db_name, tbl_name, None, None, None, None, None) + wbuf.create_last_cache(db_id, tbl_id, None, None, None, None, None) .await .expect("create last cache"); assert_eq!(wbuf.last_cache_provider().size(), 1); // Specify the same arguments as what the defaults would produce (minus the value columns) wbuf.create_last_cache( - db_name, - tbl_name, + db_id, + tbl_id, Some("tbl_t1_t2_last_cache"), Some(1), Some(DEFAULT_CACHE_TTL), @@ -2991,8 +3049,8 @@ mod tests { // Specify value columns, which would deviate from above, as that implies different cache // behaviour, i.e., no new fields are accepted: wbuf.create_last_cache( - db_name, - tbl_name, + db_id, + tbl_id, None, None, None, @@ -3005,8 +3063,8 @@ mod tests { // Specify different key columns, along with the same cache name will produce error: wbuf.create_last_cache( - db_name, - tbl_name, + db_id, + tbl_id, Some("tbl_t1_t2_last_cache"), None, None, @@ -3021,8 +3079,8 @@ mod tests { // different generated cache name, and therefore cache, so it will work: let name = wbuf .create_last_cache( - db_name, - tbl_name, + db_id, + tbl_id, None, None, None, @@ -3039,8 +3097,8 @@ mod tests { // Specify different TTL: wbuf.create_last_cache( - db_name, - tbl_name, + db_id, + tbl_id, None, None, Some(Duration::from_secs(10)), @@ -3052,7 +3110,7 @@ mod tests { assert_eq!(wbuf.last_cache_provider().size(), 2); // Specify different count: - wbuf.create_last_cache(db_name, tbl_name, None, Some(10), None, None, None) + wbuf.create_last_cache(db_id, tbl_id, None, Some(10), None, None, None) .await .expect_err("create last cache should have failed"); assert_eq!(wbuf.last_cache_provider().size(), 2); @@ -3069,10 +3127,12 @@ mod tests { name: db_name.into(), tables: BTreeMap::new(), }; + let table_id = TableId::from(0); use schema::InfluxColumnType::*; use schema::InfluxFieldType::*; // Add a table to it: let mut table_def = TableDefinition::new( + table_id, "test_table_1".into(), [ ("t1", Tag), @@ -3088,6 +3148,7 @@ mod tests { // Give that table a last cache: table_def.add_last_cache( LastCacheDefinition::new_all_non_key_value_columns( + table_id, "test_table_1", "test_cache_1", ["t1", "t2"], @@ -3096,11 +3157,11 @@ mod tests { ) .unwrap(), ); - database - .tables - .insert(Arc::clone(&table_def.name), table_def); + database.tables.insert(table_def.table_id, table_def); // Add another table to it: + let table_id = TableId::from(1); let mut table_def = TableDefinition::new( + table_id, "test_table_2".into(), [ ("t1", Tag), @@ -3114,6 +3175,7 @@ mod tests { // Give that table a last cache: table_def.add_last_cache( LastCacheDefinition::new_with_explicit_value_columns( + table_id, "test_table_2", "test_cache_2", ["t1"], @@ -3126,6 +3188,7 @@ mod tests { // Give that table another last cache: table_def.add_last_cache( LastCacheDefinition::new_with_explicit_value_columns( + table_id, "test_table_2", "test_cache_3", &[] as &[std::string::String], @@ -3135,24 +3198,23 @@ mod tests { ) .unwrap(), ); - database - .tables - .insert(Arc::clone(&table_def.name), table_def); + database.tables.insert(table_def.table_id, table_def); // Create the catalog and clone its InnerCatalog (which is what the LastCacheProvider is // initialized from): let host_id = Arc::from("dummy-host-id"); let instance_id = Arc::from("dummy-instance-id"); let mut catalog = Catalog::new(host_id, instance_id); + let db_id = database.id; catalog.insert_database(database); - let inner = catalog.clone_inner(); + let catalog = Arc::new(catalog); // This is the function we are testing, which initializes the LastCacheProvider from the catalog: - let provider = LastCacheProvider::new_from_catalog(&inner) + let provider = LastCacheProvider::new_from_catalog(Arc::clone(&catalog)) .expect("create last cache provider from catalog"); // There should be a total of 3 caches: assert_eq!(3, provider.size()); // Get the cache definitions and snapshot them to check their content. They are sorted to // ensure order, since the provider uses hashmaps and their order may not be guaranteed. - let mut caches = provider.get_last_caches_for_db(db_name); + let mut caches = provider.get_last_caches_for_db(db_id); caches.sort_by(|a, b| match a.table.partial_cmp(&b.table).unwrap() { ord @ Ordering::Less | ord @ Ordering::Greater => ord, Ordering::Equal => a.name.partial_cmp(&b.name).unwrap(), diff --git a/influxdb3_write/src/last_cache/snapshots/influxdb3_write__last_cache__tests__catalog_initialization.snap b/influxdb3_write/src/last_cache/snapshots/influxdb3_write__last_cache__tests__catalog_initialization.snap index a5d19e15d3..79e9797b63 100644 --- a/influxdb3_write/src/last_cache/snapshots/influxdb3_write__last_cache__tests__catalog_initialization.snap +++ b/influxdb3_write/src/last_cache/snapshots/influxdb3_write__last_cache__tests__catalog_initialization.snap @@ -4,6 +4,7 @@ expression: caches --- [ { + "table_id": 0, "table": "test_table_1", "name": "test_cache_1", "key_columns": [ @@ -17,6 +18,7 @@ expression: caches "ttl": 600 }, { + "table_id": 1, "table": "test_table_2", "name": "test_cache_2", "key_columns": [ @@ -33,6 +35,7 @@ expression: caches "ttl": 60 }, { + "table_id": 1, "table": "test_table_2", "name": "test_cache_3", "key_columns": [], diff --git a/influxdb3_write/src/last_cache/table_function.rs b/influxdb3_write/src/last_cache/table_function.rs index d94131c0a9..08db8f2a45 100644 --- a/influxdb3_write/src/last_cache/table_function.rs +++ b/influxdb3_write/src/last_cache/table_function.rs @@ -10,12 +10,14 @@ use datafusion::{ physical_plan::{memory::MemoryExec, ExecutionPlan}, scalar::ScalarValue, }; +use influxdb3_id::DbId; +use influxdb3_id::TableId; use super::LastCacheProvider; struct LastCacheFunctionProvider { - db_name: String, - table_name: String, + db_id: DbId, + table_id: TableId, cache_name: String, schema: SchemaRef, provider: Arc<LastCacheProvider>, @@ -51,8 +53,8 @@ impl TableProvider for LastCacheFunctionProvider { ) -> Result<Arc<dyn ExecutionPlan>> { let read = self.provider.cache_map.read(); let batches = if let Some(cache) = read - .get(&self.db_name) - .and_then(|db| db.get(&self.table_name)) + .get(&self.db_id) + .and_then(|db| db.get(&self.table_id)) .and_then(|tbl| tbl.get(&self.cache_name)) { let predicates = cache.convert_filter_exprs(filters); @@ -72,16 +74,13 @@ impl TableProvider for LastCacheFunctionProvider { } pub struct LastCacheFunction { - db_name: String, + db_id: DbId, provider: Arc<LastCacheProvider>, } impl LastCacheFunction { - pub fn new(db_name: impl Into<String>, provider: Arc<LastCacheProvider>) -> Self { - Self { - db_name: db_name.into(), - provider, - } + pub fn new(db_id: DbId, provider: Arc<LastCacheProvider>) -> Self { + Self { db_id, provider } } } @@ -98,15 +97,20 @@ impl TableFunctionImpl for LastCacheFunction { } None => None, }; + let table_id = self + .provider + .catalog + .table_name_to_id(self.db_id, table_name.as_str().into()) + .expect("table exists"); match self.provider.get_cache_name_and_schema( - &self.db_name, - table_name, + self.db_id, + table_id, cache_name.map(|x| x.as_str()), ) { Some((cache_name, schema)) => Ok(Arc::new(LastCacheFunctionProvider { - db_name: self.db_name.clone(), - table_name: table_name.clone(), + db_id: self.db_id, + table_id, cache_name, schema, provider: Arc::clone(&self.provider), diff --git a/influxdb3_write/src/lib.rs b/influxdb3_write/src/lib.rs index b12575ea0a..72ea338d86 100644 --- a/influxdb3_write/src/lib.rs +++ b/influxdb3_write/src/lib.rs @@ -18,6 +18,7 @@ use datafusion::error::DataFusionError; use datafusion::prelude::Expr; use influxdb3_catalog::catalog::{self, SequenceNumber}; use influxdb3_id::DbId; +use influxdb3_id::TableId; use influxdb3_wal::{LastCacheDefinition, SnapshotSequenceNumber, WalFileSequenceNumber}; use iox_query::QueryChunk; use iox_time::Time; @@ -78,7 +79,7 @@ pub trait Bufferer: Debug + Send + Sync + 'static { fn catalog(&self) -> Arc<catalog::Catalog>; /// Returns the parquet files for a given database and table - fn parquet_files(&self, db_name: &str, table_name: &str) -> Vec<ParquetFile>; + fn parquet_files(&self, db_id: DbId, table_id: TableId) -> Vec<ParquetFile>; /// A channel to watch for when new persisted snapshots are created fn watch_persisted_snapshots(&self) -> tokio::sync::watch::Receiver<Option<PersistedSnapshot>>; @@ -111,8 +112,8 @@ pub trait LastCacheManager: Debug + Send + Sync + 'static { #[allow(clippy::too_many_arguments)] async fn create_last_cache( &self, - db_name: &str, - tbl_name: &str, + db_id: DbId, + tbl_id: TableId, cache_name: Option<&str>, count: Option<usize>, ttl: Option<Duration>, @@ -124,8 +125,8 @@ pub trait LastCacheManager: Debug + Send + Sync + 'static { /// This should handle removal of the cache's information from the catalog as well async fn delete_last_cache( &self, - db_name: &str, - tbl_name: &str, + db_id: DbId, + tbl_id: TableId, cache_name: &str, ) -> Result<(), write_buffer::Error>; } @@ -166,8 +167,10 @@ pub struct PersistedSnapshot { pub host_id: String, /// The next file id to be used with `ParquetFile`s when the snapshot is loaded pub next_file_id: ParquetFileId, - /// The next db id to be used with databases when the snapshot is loaded + /// The next db id to be used for databases when the snapshot is loaded pub next_db_id: DbId, + /// The next table id to be used for tables when the snapshot is loaded + pub next_table_id: TableId, /// The snapshot sequence number associated with this snapshot pub snapshot_sequence_number: SnapshotSequenceNumber, /// The wal file sequence number that triggered this snapshot @@ -184,7 +187,7 @@ pub struct PersistedSnapshot { pub max_time: i64, /// The collection of databases that had tables persisted in this snapshot. The tables will then have their /// name and the parquet file. - pub databases: HashMap<Arc<str>, DatabaseTables>, + pub databases: HashMap<DbId, DatabaseTables>, } impl PersistedSnapshot { @@ -198,6 +201,7 @@ impl PersistedSnapshot { host_id, next_file_id: ParquetFileId::current(), next_db_id: DbId::next_id(), + next_table_id: TableId::next_id(), snapshot_sequence_number, wal_file_sequence_number, catalog_sequence_number, @@ -211,8 +215,8 @@ impl PersistedSnapshot { fn add_parquet_file( &mut self, - database_name: Arc<str>, - table_name: Arc<str>, + database_id: DbId, + table_id: TableId, parquet_file: ParquetFile, ) { if self.next_file_id < parquet_file.id { @@ -224,10 +228,10 @@ impl PersistedSnapshot { self.max_time = self.max_time.max(parquet_file.max_time); self.databases - .entry(database_name) + .entry(database_id) .or_default() .tables - .entry(table_name) + .entry(table_id) .or_default() .push(parquet_file); } @@ -235,7 +239,7 @@ impl PersistedSnapshot { #[derive(Debug, Serialize, Deserialize, Default, Eq, PartialEq, Clone)] pub struct DatabaseTables { - pub tables: hashbrown::HashMap<Arc<str>, Vec<ParquetFile>>, + pub tables: hashbrown::HashMap<TableId, Vec<ParquetFile>>, } /// The next file id to be used when persisting `ParquetFile`s diff --git a/influxdb3_write/src/paths.rs b/influxdb3_write/src/paths.rs index 78bda2e94a..f6ba854bf9 100644 --- a/influxdb3_write/src/paths.rs +++ b/influxdb3_write/src/paths.rs @@ -57,11 +57,12 @@ impl ParquetFilePath { db_name: &str, db_id: u32, table_name: &str, + table_id: u32, date: DateTime<Utc>, wal_file_sequence_number: WalFileSequenceNumber, ) -> Self { let path = ObjPath::from(format!( - "{host_prefix}/dbs/{db_name}-{db_id}/{table_name}/{}/{}.{}", + "{host_prefix}/dbs/{db_name}-{db_id}/{table_name}-{table_id}/{}/{}.{}", date.format("%Y-%m-%d/%H-%M"), wal_file_sequence_number.as_u64(), PARQUET_FILE_EXTENSION @@ -73,13 +74,14 @@ impl ParquetFilePath { db_name: &str, db_id: u32, table_name: &str, + table_id: u32, chunk_time: i64, wal_file_sequence_number: WalFileSequenceNumber, ) -> Self { // Convert the chunk time into a date time string for YYYY-MM-DDTHH-MM let date_time = DateTime::<Utc>::from_timestamp_nanos(chunk_time); let path = ObjPath::from(format!( - "dbs/{db_name}-{db_id}/{table_name}/{}/{:010}.{}", + "dbs/{db_name}-{db_id}/{table_name}-{table_id}/{}/{:010}.{}", date_time.format("%Y-%m-%d/%H-%M"), wal_file_sequence_number.as_u64(), PARQUET_FILE_EXTENSION @@ -150,10 +152,11 @@ fn parquet_file_path_new() { "my_db", 0, "my_table", + 0, Utc.with_ymd_and_hms(2038, 1, 19, 3, 14, 7).unwrap(), WalFileSequenceNumber::new(0), ), - ObjPath::from("my_host/dbs/my_db-0/my_table/2038-01-19/03-14/0.parquet") + ObjPath::from("my_host/dbs/my_db-0/my_table-0/2038-01-19/03-14/0.parquet") ); } @@ -161,16 +164,17 @@ fn parquet_file_path_new() { fn parquet_file_percent_encoded() { assert_eq!( ParquetFilePath::new( - "my_host", + "..", "..", 0, "..", + 0, Utc.with_ymd_and_hms(2038, 1, 19, 3, 14, 7).unwrap(), WalFileSequenceNumber::new(0), ) .as_ref() .as_ref(), - "my_host/dbs/..-0/%2E%2E/2038-01-19/03-14/0.parquet" + "%2E%2E/dbs/..-0/..-0/2038-01-19/03-14/0.parquet" ); } diff --git a/influxdb3_write/src/persister.rs b/influxdb3_write/src/persister.rs index 0408afb223..aae3cfb4c7 100644 --- a/influxdb3_write/src/persister.rs +++ b/influxdb3_write/src/persister.rs @@ -416,7 +416,7 @@ mod tests { use super::*; use crate::ParquetFileId; use influxdb3_catalog::catalog::SequenceNumber; - use influxdb3_id::DbId; + use influxdb3_id::{DbId, TableId}; use influxdb3_wal::SnapshotSequenceNumber; use object_store::memory::InMemory; use observability_deps::tracing::info; @@ -477,8 +477,10 @@ mod tests { catalog.wal_file_sequence_number, WalFileSequenceNumber::new(1) ); - assert!(catalog.catalog.db_exists("my_second_db")); - assert!(!catalog.catalog.db_exists("my_db")); + // my_second_db + assert!(catalog.catalog.db_exists(DbId::from(1))); + // my_db + assert!(!catalog.catalog.db_exists(DbId::from(0))); } #[tokio::test] @@ -489,7 +491,8 @@ mod tests { let info_file = PersistedSnapshot { host_id: "test_host".to_string(), next_file_id: ParquetFileId::from(0), - next_db_id: DbId::from(0), + next_db_id: DbId::from(1), + next_table_id: TableId::from(1), snapshot_sequence_number: SnapshotSequenceNumber::new(0), wal_file_sequence_number: WalFileSequenceNumber::new(0), catalog_sequence_number: SequenceNumber::new(0), @@ -511,7 +514,8 @@ mod tests { let info_file = PersistedSnapshot { host_id: "test_host".to_string(), next_file_id: ParquetFileId::from(0), - next_db_id: DbId::from(0), + next_db_id: DbId::from(1), + next_table_id: TableId::from(1), snapshot_sequence_number: SnapshotSequenceNumber::new(0), wal_file_sequence_number: WalFileSequenceNumber::new(0), catalog_sequence_number: SequenceNumber::default(), @@ -524,7 +528,8 @@ mod tests { let info_file_2 = PersistedSnapshot { host_id: "test_host".to_string(), next_file_id: ParquetFileId::from(1), - next_db_id: DbId::from(0), + next_db_id: DbId::from(1), + next_table_id: TableId::from(1), snapshot_sequence_number: SnapshotSequenceNumber::new(1), wal_file_sequence_number: WalFileSequenceNumber::new(1), catalog_sequence_number: SequenceNumber::default(), @@ -537,7 +542,8 @@ mod tests { let info_file_3 = PersistedSnapshot { host_id: "test_host".to_string(), next_file_id: ParquetFileId::from(2), - next_db_id: DbId::from(0), + next_db_id: DbId::from(1), + next_table_id: TableId::from(1), snapshot_sequence_number: SnapshotSequenceNumber::new(2), wal_file_sequence_number: WalFileSequenceNumber::new(2), catalog_sequence_number: SequenceNumber::default(), @@ -571,7 +577,8 @@ mod tests { let info_file = PersistedSnapshot { host_id: "test_host".to_string(), next_file_id: ParquetFileId::from(0), - next_db_id: DbId::from(0), + next_db_id: DbId::from(1), + next_table_id: TableId::from(1), snapshot_sequence_number: SnapshotSequenceNumber::new(0), wal_file_sequence_number: WalFileSequenceNumber::new(0), catalog_sequence_number: SequenceNumber::default(), @@ -598,7 +605,8 @@ mod tests { let info_file = PersistedSnapshot { host_id: "test_host".to_string(), next_file_id: ParquetFileId::from(id), - next_db_id: DbId::from(0), + next_db_id: DbId::from(1), + next_table_id: TableId::from(1), snapshot_sequence_number: SnapshotSequenceNumber::new(id), wal_file_sequence_number: WalFileSequenceNumber::new(id), catalog_sequence_number: SequenceNumber::new(id as u32), @@ -634,8 +642,8 @@ mod tests { ); info_file.add_parquet_file( - "foo".into(), - "bar".into(), + DbId::from(0), + TableId::from(0), crate::ParquetFile { // Use a number that will be bigger than what's created in the // PersistedSnapshot automatically @@ -717,6 +725,7 @@ mod tests { "db_one", 0, "table_one", + 0, Utc::now(), WalFileSequenceNumber::new(1), ); @@ -754,10 +763,12 @@ mod tests { // write raw json to catalog let catalog_json = r#" { - "databases": {}, + "databases": [], "sequence": 0, "host_id": "test_host", - "instance_id": "24b1e1bf-b301-4101-affa-e3d668fe7d20" + "instance_id": "24b1e1bf-b301-4101-affa-e3d668fe7d20", + "db_map": [], + "table_map": [] } "#; let local_disk = diff --git a/influxdb3_write/src/write_buffer/mod.rs b/influxdb3_write/src/write_buffer/mod.rs index d07e19abef..ea53b05550 100644 --- a/influxdb3_write/src/write_buffer/mod.rs +++ b/influxdb3_write/src/write_buffer/mod.rs @@ -23,6 +23,7 @@ use datafusion::common::DataFusionError; use datafusion::datasource::object_store::ObjectStoreUrl; use datafusion::logical_expr::Expr; use influxdb3_catalog::catalog::Catalog; +use influxdb3_id::{DbId, TableId}; use influxdb3_wal::object_store::WalObjectStore; use influxdb3_wal::CatalogOp::CreateLastCache; use influxdb3_wal::{ @@ -145,6 +146,11 @@ impl WriteBufferImpl { .first() .map(|s| s.next_db_id.set_next_id()) .unwrap_or(()); + // Set the next table id to use when adding a new database + persisted_snapshots + .first() + .map(|s| s.next_table_id.set_next_id()) + .unwrap_or(()); // Set the next file id to use when persisting ParquetFiles NEXT_FILE_ID.store( persisted_snapshots @@ -292,21 +298,32 @@ impl WriteBufferImpl { projection: Option<&Vec<usize>>, ctx: &dyn Session, ) -> Result<Vec<Arc<dyn QueryChunk>>, DataFusionError> { - let db_schema = self + let db_id = self .catalog - .db_schema(database_name) + .db_name_to_id(database_name.into()) .ok_or_else(|| DataFusionError::Execution(format!("db {} not found", database_name)))?; - let table_schema = { - let table = db_schema.tables.get(table_name).ok_or_else(|| { + let db_schema = self + .catalog + .db_schema(&db_id) + .expect("Already checked db exists"); + + let table_id = self + .catalog + .table_name_to_id(db_id, table_name.into()) + .ok_or_else(|| { DataFusionError::Execution(format!( "table {} not found in db {}", table_name, database_name )) })?; - table.schema.clone() - }; + let table_schema = db_schema + .tables + .get(&table_id) + .expect("Already checked id exists") + .schema + .clone(); let mut chunks = self.buffer.get_table_chunks( Arc::clone(&db_schema), @@ -315,7 +332,7 @@ impl WriteBufferImpl { projection, ctx, )?; - let parquet_files = self.persisted_files.get_files(database_name, table_name); + let parquet_files = self.persisted_files.get_files(db_schema.id, table_id); let mut chunk_order = chunks.len() as i64; @@ -412,8 +429,8 @@ impl Bufferer for WriteBufferImpl { self.catalog() } - fn parquet_files(&self, db_name: &str, table_name: &str) -> Vec<ParquetFile> { - self.buffer.persisted_parquet_files(db_name, table_name) + fn parquet_files(&self, db_id: DbId, table_id: TableId) -> Vec<ParquetFile> { + self.buffer.persisted_parquet_files(db_id, table_id) } fn watch_persisted_snapshots(&self) -> Receiver<Option<PersistedSnapshot>> { @@ -448,8 +465,8 @@ impl LastCacheManager for WriteBufferImpl { #[allow(clippy::too_many_arguments)] async fn create_last_cache( &self, - db_name: &str, - tbl_name: &str, + db_id: DbId, + table_id: TableId, cache_name: Option<&str>, count: Option<usize>, ttl: Option<Duration>, @@ -458,16 +475,21 @@ impl LastCacheManager for WriteBufferImpl { ) -> Result<Option<LastCacheDefinition>, Error> { let cache_name = cache_name.map(Into::into); let catalog = self.catalog(); - let db_schema = catalog.db_schema(db_name).ok_or(Error::DbDoesNotExist)?; + let db_schema = catalog.db_schema(&db_id).ok_or(Error::DbDoesNotExist)?; let schema = db_schema - .get_table(tbl_name) + .get_table(table_id) .ok_or(Error::TableDoesNotExist)? .schema() .clone(); if let Some(info) = self.last_cache.create_cache(CreateCacheArguments { - db_name: db_name.to_string(), - tbl_name: tbl_name.to_string(), + db_id, + db_name: catalog.db_id_to_name(db_id).expect("db exists").to_string(), + table_id, + table_name: catalog + .table_id_to_name(db_id, table_id) + .expect("table exists") + .to_string(), schema, cache_name, count, @@ -475,7 +497,7 @@ impl LastCacheManager for WriteBufferImpl { key_columns, value_columns, })? { - self.catalog.add_last_cache(db_name, tbl_name, info.clone()); + self.catalog.add_last_cache(db_id, table_id, info.clone()); let add_cache_catalog_batch = WalOp::Catalog(CatalogBatch { time_ns: self.time_provider.now().timestamp_nanos(), database_id: db_schema.id, @@ -492,24 +514,27 @@ impl LastCacheManager for WriteBufferImpl { async fn delete_last_cache( &self, - db_name: &str, - tbl_name: &str, + db_id: DbId, + tbl_id: TableId, cache_name: &str, ) -> crate::Result<(), self::Error> { let catalog = self.catalog(); - self.last_cache - .delete_cache(db_name, tbl_name, cache_name)?; - catalog.delete_last_cache(db_name, tbl_name, cache_name); + self.last_cache.delete_cache(db_id, tbl_id, cache_name)?; + catalog.delete_last_cache(db_id, tbl_id, cache_name); // NOTE: if this fails then the cache will be gone from the running server, but will be // resurrected on server restart. self.wal .write_ops(vec![WalOp::Catalog(CatalogBatch { time_ns: self.time_provider.now().timestamp_nanos(), - database_id: catalog.db_schema(db_name).expect("db exists").id, - database_name: db_name.into(), + database_id: db_id, + database_name: catalog.db_id_to_name(db_id).expect("database exists"), ops: vec![CatalogOp::DeleteLastCache(LastCacheDelete { - table: tbl_name.into(), + table_id: tbl_id, + table_name: catalog + .table_id_to_name(db_id, tbl_id) + .expect("table exists") + .to_string(), name: cache_name.into(), })], })]) @@ -561,11 +586,13 @@ mod tests { Precision::Nanosecond, ); - let db = catalog.db_schema("foo").unwrap(); + let db = catalog.db_schema(&DbId::from(0)).unwrap(); assert_eq!(db.tables.len(), 2); - assert_eq!(db.tables.get("cpu").unwrap().num_columns(), 3); - assert_eq!(db.tables.get("foo").unwrap().num_columns(), 2); + // cpu table + assert_eq!(db.tables.get(&TableId::from(0)).unwrap().num_columns(), 3); + // foo table + assert_eq!(db.tables.get(&TableId::from(1)).unwrap().num_columns(), 2); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -576,11 +603,11 @@ mod tests { let (object_store, parquet_cache) = test_cached_obj_store_and_oracle(object_store, Arc::clone(&time_provider)); let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host")); - let catalog = persister.load_or_create_catalog().await.unwrap(); - let last_cache = LastCacheProvider::new_from_catalog(&catalog.clone_inner()).unwrap(); + let catalog = Arc::new(persister.load_or_create_catalog().await.unwrap()); + let last_cache = LastCacheProvider::new_from_catalog(Arc::clone(&catalog)).unwrap(); let write_buffer = WriteBufferImpl::new( Arc::clone(&persister), - Arc::new(catalog), + catalog, Arc::new(last_cache), Arc::clone(&time_provider), crate::test_help::make_exec(), @@ -650,11 +677,11 @@ mod tests { assert_batches_eq!(&expected, &actual); // now load a new buffer from object storage - let catalog = persister.load_or_create_catalog().await.unwrap(); - let last_cache = LastCacheProvider::new_from_catalog(&catalog.clone_inner()).unwrap(); + let catalog = Arc::new(persister.load_or_create_catalog().await.unwrap()); + let last_cache = LastCacheProvider::new_from_catalog(Arc::clone(&catalog)).unwrap(); let write_buffer = WriteBufferImpl::new( Arc::clone(&persister), - Arc::new(catalog), + catalog, Arc::new(last_cache), Arc::clone(&time_provider), crate::test_help::make_exec(), @@ -688,7 +715,9 @@ mod tests { ) .await; let db_name = "db"; + let db_id = DbId::from(0); let tbl_name = "table"; + let tbl_id = TableId::from(0); let cache_name = "cache"; // Write some data to the current segment and update the catalog: wbuf.write_lp( @@ -701,16 +730,16 @@ mod tests { .await .unwrap(); // Create a last cache: - wbuf.create_last_cache(db_name, tbl_name, Some(cache_name), None, None, None, None) + wbuf.create_last_cache(db_id, tbl_id, Some(cache_name), None, None, None, None) .await .unwrap(); // load a new write buffer to ensure its durable - let catalog = wbuf.persister.load_or_create_catalog().await.unwrap(); - let last_cache = LastCacheProvider::new_from_catalog(&catalog.clone_inner()).unwrap(); + let catalog = Arc::new(wbuf.persister.load_or_create_catalog().await.unwrap()); + let last_cache = LastCacheProvider::new_from_catalog(Arc::clone(&catalog)).unwrap(); let wbuf = WriteBufferImpl::new( Arc::clone(&wbuf.persister), - Arc::new(catalog), + catalog, Arc::new(last_cache), Arc::clone(&wbuf.time_provider), Arc::clone(&wbuf.buffer.executor), @@ -744,11 +773,11 @@ mod tests { .unwrap(); // and do another replay and verification - let catalog = wbuf.persister.load_or_create_catalog().await.unwrap(); - let last_cache = LastCacheProvider::new_from_catalog(&catalog.clone_inner()).unwrap(); + let catalog = Arc::new(wbuf.persister.load_or_create_catalog().await.unwrap()); + let last_cache = LastCacheProvider::new_from_catalog(Arc::clone(&catalog)).unwrap(); let wbuf = WriteBufferImpl::new( Arc::clone(&wbuf.persister), - Arc::new(catalog), + catalog, Arc::new(last_cache), Arc::clone(&wbuf.time_provider), Arc::clone(&wbuf.buffer.executor), @@ -791,21 +820,21 @@ mod tests { ]; let actual = wbuf .last_cache_provider() - .get_cache_record_batches(db_name, tbl_name, None, &[]) + .get_cache_record_batches(db_id, tbl_id, None, &[]) .unwrap() .unwrap(); assert_batches_eq!(&expected, &actual); // Delete the last cache: - wbuf.delete_last_cache(db_name, tbl_name, cache_name) + wbuf.delete_last_cache(db_id, tbl_id, cache_name) .await .unwrap(); // do another reload and verify it's gone - let catalog = wbuf.persister.load_or_create_catalog().await.unwrap(); - let last_cache = LastCacheProvider::new_from_catalog(&catalog.clone_inner()).unwrap(); + let catalog = Arc::new(wbuf.persister.load_or_create_catalog().await.unwrap()); + let last_cache = LastCacheProvider::new_from_catalog(Arc::clone(&catalog)).unwrap(); let wbuf = WriteBufferImpl::new( Arc::clone(&wbuf.persister), - Arc::new(catalog), + catalog, Arc::new(last_cache), Arc::clone(&wbuf.time_provider), Arc::clone(&wbuf.buffer.executor), @@ -951,15 +980,17 @@ mod tests { let actual = get_table_batches(&write_buffer, "foo", "cpu", &session_context).await; assert_batches_sorted_eq!(&expected, &actual); // and now replay in a new write buffer and attempt to write - let catalog = write_buffer - .persister - .load_or_create_catalog() - .await - .unwrap(); - let last_cache = LastCacheProvider::new_from_catalog(&catalog.clone_inner()).unwrap(); + let catalog = Arc::new( + write_buffer + .persister + .load_or_create_catalog() + .await + .unwrap(), + ); + let last_cache = LastCacheProvider::new_from_catalog(Arc::clone(&catalog)).unwrap(); let write_buffer = WriteBufferImpl::new( Arc::clone(&write_buffer.persister), - Arc::new(catalog), + catalog, Arc::new(last_cache), Arc::clone(&write_buffer.time_provider), Arc::clone(&write_buffer.buffer.executor), @@ -1595,7 +1626,9 @@ mod tests { ) .await; let db_name = "my_corp"; + let db_id = DbId::from(0); let tbl_name = "temp"; + let tbl_id = TableId::from(0); // make some writes to generate a snapshot: do_writes( @@ -1641,7 +1674,7 @@ mod tests { verify_snapshot_count(1, &wbuf.persister).await; // get the path for the created parquet file: - let persisted_files = wbuf.persisted_files().get_files(db_name, tbl_name); + let persisted_files = wbuf.persisted_files().get_files(db_id, tbl_id); assert_eq!(1, persisted_files.len()); let path = ObjPath::from(persisted_files[0].path.as_str()); @@ -1699,7 +1732,9 @@ mod tests { ) .await; let db_name = "my_corp"; + let db_id = DbId::from(0); let tbl_name = "temp"; + let tbl_id = TableId::from(0); // make some writes to generate a snapshot: do_writes( @@ -1745,7 +1780,7 @@ mod tests { verify_snapshot_count(1, &wbuf.persister).await; // get the path for the created parquet file: - let persisted_files = wbuf.persisted_files().get_files(db_name, tbl_name); + let persisted_files = wbuf.persisted_files().get_files(db_id, tbl_id); assert_eq!(1, persisted_files.len()); let path = ObjPath::from(persisted_files[0].path.as_str()); @@ -1884,11 +1919,11 @@ mod tests { (object_store, None) }; let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host")); - let catalog = persister.load_or_create_catalog().await.unwrap(); - let last_cache = LastCacheProvider::new_from_catalog(&catalog.clone_inner()).unwrap(); + let catalog = Arc::new(persister.load_or_create_catalog().await.unwrap()); + let last_cache = LastCacheProvider::new_from_catalog(Arc::clone(&catalog)).unwrap(); let wbuf = WriteBufferImpl::new( Arc::clone(&persister), - Arc::new(catalog), + catalog, Arc::new(last_cache), Arc::clone(&time_provider), crate::test_help::make_exec(), diff --git a/influxdb3_write/src/write_buffer/persisted_files.rs b/influxdb3_write/src/write_buffer/persisted_files.rs index f6d7e69082..ea742de97c 100644 --- a/influxdb3_write/src/write_buffer/persisted_files.rs +++ b/influxdb3_write/src/write_buffer/persisted_files.rs @@ -4,11 +4,12 @@ use crate::{ParquetFile, PersistedSnapshot}; use hashbrown::HashMap; +use influxdb3_id::DbId; +use influxdb3_id::TableId; use parking_lot::RwLock; -use std::sync::Arc; -type DatabaseToTables = HashMap<Arc<str>, TableToFiles>; -type TableToFiles = HashMap<Arc<str>, Vec<ParquetFile>>; +type DatabaseToTables = HashMap<DbId, TableToFiles>; +type TableToFiles = HashMap<TableId, Vec<ParquetFile>>; #[derive(Debug, Default)] pub struct PersistedFiles { @@ -25,10 +26,10 @@ impl PersistedFiles { } /// Add a file to the list of persisted files - pub fn add_file(&self, db_name: &str, table_name: &str, file: ParquetFile) { + pub fn add_file(&self, db_id: DbId, table_id: TableId, file: ParquetFile) { let mut inner = self.inner.write(); - let tables = inner.files.entry_ref(db_name).or_default(); - let table_files = tables.entry_ref(table_name).or_default(); + let tables = inner.files.entry(db_id).or_default(); + let table_files = tables.entry(table_id).or_default(); table_files.push(file); } @@ -39,13 +40,13 @@ impl PersistedFiles { } /// Get the list of files for a given database and table, always return in descending order of min_time - pub fn get_files(&self, db_name: &str, table_name: &str) -> Vec<ParquetFile> { + pub fn get_files(&self, db_id: DbId, table_id: TableId) -> Vec<ParquetFile> { let mut files = { let inner = self.inner.read(); inner .files - .get(db_name) - .and_then(|tables| tables.get(table_name)) + .get(&db_id) + .and_then(|tables| tables.get(&table_id)) .cloned() .unwrap_or_default() }; @@ -121,21 +122,21 @@ fn as_mb(bytes: u64) -> f64 { fn update_persisted_files_with_snapshot( initial_load: bool, persisted_snapshot: PersistedSnapshot, - db_to_tables: &mut HashMap<Arc<str>, HashMap<Arc<str>, Vec<ParquetFile>>>, + db_to_tables: &mut HashMap<DbId, HashMap<TableId, Vec<ParquetFile>>>, ) -> u64 { let mut file_count = 0; persisted_snapshot .databases .into_iter() - .for_each(|(db_name, tables)| { - let db_tables: &mut HashMap<Arc<str>, Vec<ParquetFile>> = - db_to_tables.entry(db_name).or_default(); + .for_each(|(db_id, tables)| { + let db_tables: &mut HashMap<TableId, Vec<ParquetFile>> = + db_to_tables.entry(db_id).or_default(); tables .tables .into_iter() - .for_each(|(table_name, mut new_parquet_files)| { - let table_files = db_tables.entry(table_name).or_default(); + .for_each(|(table_id, mut new_parquet_files)| { + let table_files = db_tables.entry(table_id).or_default(); if initial_load { file_count += new_parquet_files.len() as u64; table_files.append(&mut new_parquet_files); @@ -202,10 +203,10 @@ mod tests { .last() .unwrap() .databases - .get("db-1") + .get(&DbId::from(0)) .unwrap() .tables - .get("table-1") + .get(&TableId::from(0)) .unwrap() .last() .cloned() @@ -260,7 +261,7 @@ mod tests { // TODO: Check why `add_parquet_file` method does not check if file is // already present. This is checked when trying to add a new PersistedSnapshot // as part of snapshotting process. - new_snapshot.add_parquet_file(Arc::from("db-1"), Arc::from("table-1"), file); + new_snapshot.add_parquet_file(DbId::from(0), TableId::from(0), file); }); new_snapshot } diff --git a/influxdb3_write/src/write_buffer/queryable_buffer.rs b/influxdb3_write/src/write_buffer/queryable_buffer.rs index 09dc56364d..903c7125b2 100644 --- a/influxdb3_write/src/write_buffer/queryable_buffer.rs +++ b/influxdb3_write/src/write_buffer/queryable_buffer.rs @@ -8,15 +8,14 @@ use crate::write_buffer::table_buffer::TableBuffer; use crate::{ParquetFile, ParquetFileId, PersistedSnapshot}; use arrow::record_batch::RecordBatch; use async_trait::async_trait; -use data_types::{ - ChunkId, ChunkOrder, PartitionKey, TableId, TimestampMinMax, TransitionPartitionId, -}; +use data_types::{ChunkId, ChunkOrder, PartitionKey, TimestampMinMax, TransitionPartitionId}; use datafusion::catalog::Session; use datafusion::common::DataFusionError; use datafusion::logical_expr::Expr; use datafusion_util::stream_from_batches; use hashbrown::HashMap; use influxdb3_catalog::catalog::{Catalog, DatabaseSchema}; +use influxdb3_id::{DbId, TableId}; use influxdb3_wal::{CatalogOp, SnapshotDetails, WalContents, WalFileNotifier, WalOp, WriteBatch}; use iox_query::chunk_statistics::{create_chunk_statistics, NoColumnRanges}; use iox_query::exec::Executor; @@ -81,20 +80,24 @@ impl QueryableBuffer { _projection: Option<&Vec<usize>>, _ctx: &dyn Session, ) -> Result<Vec<Arc<dyn QueryChunk>>, DataFusionError> { + let table_id = self + .catalog + .table_name_to_id(db_schema.id, table_name.into()) + .ok_or_else(|| DataFusionError::Execution(format!("table {} not found", table_name)))?; let table = db_schema .tables - .get(table_name) - .ok_or_else(|| DataFusionError::Execution(format!("table {} not found", table_name)))?; + .get(&table_id) + .expect("Checked table already exists"); let schema = table.schema.clone(); let arrow_schema = schema.as_arrow(); let buffer = self.buffer.read(); - let Some(db_buffer) = buffer.db_to_table.get(db_schema.name.as_ref()) else { + let Some(db_buffer) = buffer.db_to_table.get(&db_schema.id) else { return Ok(vec![]); }; - let Some(table_buffer) = db_buffer.get(table_name) else { + let Some(table_buffer) = db_buffer.get(&table_id) else { return Ok(vec![]); }; @@ -115,7 +118,7 @@ impl QueryableBuffer { schema: schema.clone(), stats: Arc::new(chunk_stats), partition_id: TransitionPartitionId::new( - TableId::new(0), + data_types::TableId::new(0), &PartitionKey::from(gen_time.to_string()), ), sort_key: None, @@ -150,23 +153,25 @@ impl QueryableBuffer { let mut persisting_chunks = vec![]; let catalog = Arc::clone(&buffer.catalog); - for (database_name, table_map) in buffer.db_to_table.iter_mut() { - for (table_name, table_buffer) in table_map.iter_mut() { + for (database_id, table_map) in buffer.db_to_table.iter_mut() { + for (table_id, table_buffer) in table_map.iter_mut() { let snapshot_chunks = table_buffer.snapshot(snapshot_details.end_time_marker); for chunk in snapshot_chunks { + let table_name = catalog + .table_id_to_name(*database_id, *table_id) + .expect("table exists"); + let db_name = catalog.db_id_to_name(*database_id).expect("db_exists"); let persist_job = PersistJob { - database_name: Arc::clone(database_name), - table_name: Arc::clone(table_name), + database_id: *database_id, + table_id: *table_id, + table_name: Arc::clone(&table_name), chunk_time: chunk.chunk_time, path: ParquetFilePath::new_with_chunk_time( - database_name.as_ref(), - catalog - .db_schema(database_name) - .expect("db exists") - .id - .as_u32(), + db_name.as_ref(), + database_id.as_u32(), table_name.as_ref(), + table_id.as_u32(), chunk.chunk_time, write.wal_file_number, ), @@ -242,8 +247,8 @@ impl QueryableBuffer { let mut cache_notifiers = vec![]; for persist_job in persist_jobs { let path = persist_job.path.to_string(); - let database_name = Arc::clone(&persist_job.database_name); - let table_name = Arc::clone(&persist_job.table_name); + let database_id = persist_job.database_id; + let table_id = persist_job.table_id; let chunk_time = persist_job.chunk_time; let min_time = persist_job.timestamp_min_max.min; let max_time = persist_job.timestamp_min_max.max; @@ -257,8 +262,8 @@ impl QueryableBuffer { .await; cache_notifiers.push(cache_notifier); persisted_snapshot.add_parquet_file( - database_name, - table_name, + database_id, + table_id, ParquetFile { id: ParquetFileId::new(), path, @@ -312,8 +317,8 @@ impl QueryableBuffer { receiver } - pub fn persisted_parquet_files(&self, db_name: &str, table_name: &str) -> Vec<ParquetFile> { - self.persisted_files.get_files(db_name, table_name) + pub fn persisted_parquet_files(&self, db_id: DbId, table_id: TableId) -> Vec<ParquetFile> { + self.persisted_files.get_files(db_id, table_id) } pub fn persisted_snapshot_notify_rx( @@ -345,11 +350,11 @@ impl WalFileNotifier for QueryableBuffer { #[derive(Debug)] pub struct BufferState { - pub db_to_table: HashMap<Arc<str>, TableNameToBufferMap>, + pub db_to_table: HashMap<DbId, TableIdToBufferMap>, catalog: Arc<Catalog>, } -type TableNameToBufferMap = HashMap<Arc<str>, TableBuffer>; +type TableIdToBufferMap = HashMap<TableId, TableBuffer>; impl BufferState { pub fn new(catalog: Arc<Catalog>) -> Self { @@ -370,17 +375,17 @@ impl BufferState { let db_schema = self .catalog - .db_schema(&catalog_batch.database_name) + .db_schema(&catalog_batch.database_id) .expect("database should exist"); for op in catalog_batch.ops { match op { CatalogOp::CreateLastCache(definition) => { let table_schema = db_schema - .get_table_schema(&definition.table) + .get_table_schema(definition.table_id) .expect("table should exist"); last_cache_provider.create_cache_from_definition( - db_schema.name.as_ref(), + db_schema.id, table_schema, &definition, ); @@ -388,8 +393,8 @@ impl BufferState { CatalogOp::DeleteLastCache(cache) => { // we can ignore it if this doesn't exist for any reason let _ = last_cache_provider.delete_cache( - db_schema.name.as_ref(), - &cache.table, + db_schema.id, + cache.table_id, &cache.name, ); } @@ -406,30 +411,23 @@ impl BufferState { fn add_write_batch(&mut self, write_batch: WriteBatch) { let db_schema = self .catalog - .db_schema(&write_batch.database_name) + .db_schema(&write_batch.database_id) .expect("database should exist"); - let database_buffer = self - .db_to_table - .entry(write_batch.database_name) - .or_default(); - - for (table_name, table_chunks) in write_batch.table_chunks { - let table_buffer = database_buffer - .entry_ref(table_name.as_ref()) - .or_insert_with(|| { - let table_schema = db_schema - .get_table(table_name.as_ref()) - .expect("table should exist"); - let sort_key = table_schema - .schema - .primary_key() - .iter() - .map(|c| c.to_string()) - .collect::<Vec<_>>(); - let index_columns = table_schema.index_columns(); - - TableBuffer::new(&index_columns, SortKey::from(sort_key)) - }); + let database_buffer = self.db_to_table.entry(write_batch.database_id).or_default(); + + for (table_id, table_chunks) in write_batch.table_chunks { + let table_buffer = database_buffer.entry(table_id).or_insert_with(|| { + let table_schema = db_schema.get_table(table_id).expect("table should exist"); + let sort_key = table_schema + .schema + .primary_key() + .iter() + .map(|c| c.to_string()) + .collect::<Vec<_>>(); + let index_columns = table_schema.index_columns(); + + TableBuffer::new(&index_columns, SortKey::from(sort_key)) + }); for (chunk_time, chunk) in table_chunks.chunk_time_to_chunk { table_buffer.buffer_chunk(chunk_time, chunk.rows); } @@ -439,7 +437,8 @@ impl BufferState { #[derive(Debug)] struct PersistJob { - database_name: Arc<str>, + database_id: DbId, + table_id: TableId, table_name: Arc<str>, chunk_time: i64, path: ParquetFilePath, @@ -459,10 +458,10 @@ async fn sort_dedupe_persist( // iox_query let row_count = persist_job.batch.num_rows(); info!( - "Persisting {} rows for db {} and table {} and chunk {} to file {}", + "Persisting {} rows for db id {} and table id {} and chunk {} to file {}", row_count, - persist_job.database_name, - persist_job.table_name, + persist_job.database_id, + persist_job.table_id, persist_job.chunk_time, persist_job.path.to_string() ); @@ -479,7 +478,7 @@ async fn sort_dedupe_persist( schema: persist_job.schema.clone(), stats: Arc::new(chunk_stats), partition_id: TransitionPartitionId::new( - TableId::new(0), + data_types::TableId::new(0), &PartitionKey::from(format!("{}", persist_job.chunk_time)), ), sort_key: Some(persist_job.sort_key.clone()), @@ -491,7 +490,7 @@ async fn sort_dedupe_persist( let logical_plan = ReorgPlanner::new() .compact_plan( - TableId::new(0), + data_types::TableId::new(0), persist_job.table_name, &persist_job.schema, chunks, diff --git a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap index 4fb9f2de38..714d7dd0c7 100644 --- a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap +++ b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap @@ -3,12 +3,12 @@ source: influxdb3_write/src/write_buffer/mod.rs expression: catalog_json --- { - "databases": { - "db": { + "databases": [ + { "id": 0, "name": "db", - "tables": { - "table": { + "tables": [ + { "cols": { "f1": { "influx_type": "field", @@ -49,16 +49,31 @@ expression: catalog_json "n": 1, "name": "cache", "table": "table", + "table_id": 0, "ttl": 14400, "vals": null } ], - "name": "table" + "table_id": 0, + "table_name": "table" } - } + ] } - }, + ], + "db_map": [ + { + "db_id": 0, + "name": "db" + } + ], "host_id": "test_host", "instance_id": "[uuid]", - "sequence": 3 + "sequence": 3, + "table_map": [ + { + "db_id": 0, + "name": "table", + "table_id": 0 + } + ] } diff --git a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap index fe7816c5d9..707318c64e 100644 --- a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap +++ b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap @@ -3,12 +3,12 @@ source: influxdb3_write/src/write_buffer/mod.rs expression: catalog_json --- { - "databases": { - "db": { + "databases": [ + { "id": 0, "name": "db", - "tables": { - "table": { + "tables": [ + { "cols": { "f1": { "influx_type": "field", @@ -44,16 +44,31 @@ expression: catalog_json "n": 1, "name": "cache", "table": "table", + "table_id": 0, "ttl": 14400, "vals": null } ], - "name": "table" + "table_id": 0, + "table_name": "table" } - } + ] } - }, + ], + "db_map": [ + { + "db_id": 0, + "name": "db" + } + ], "host_id": "test_host", "instance_id": "[uuid]", - "sequence": 2 + "sequence": 2, + "table_map": [ + { + "db_id": 0, + "name": "table", + "table_id": 0 + } + ] } diff --git a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap index 6916b0364d..d7aa539bb6 100644 --- a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap +++ b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap @@ -3,12 +3,12 @@ source: influxdb3_write/src/write_buffer/mod.rs expression: catalog_json --- { - "databases": { - "db": { + "databases": [ + { "id": 0, "name": "db", - "tables": { - "table": { + "tables": [ + { "cols": { "f1": { "influx_type": "field", @@ -41,12 +41,26 @@ expression: catalog_json } } }, - "name": "table" + "table_id": 0, + "table_name": "table" } - } + ] } - }, + ], + "db_map": [ + { + "db_id": 0, + "name": "db" + } + ], "host_id": "test_host", "instance_id": "[uuid]", - "sequence": 4 + "sequence": 4, + "table_map": [ + { + "db_id": 0, + "name": "table", + "table_id": 0 + } + ] } diff --git a/influxdb3_write/src/write_buffer/validator.rs b/influxdb3_write/src/write_buffer/validator.rs index d99e11c0b2..0697d82efa 100644 --- a/influxdb3_write/src/write_buffer/validator.rs +++ b/influxdb3_write/src/write_buffer/validator.rs @@ -1,12 +1,15 @@ use std::{borrow::Cow, sync::Arc}; use crate::{write_buffer::Result, Precision, WriteLineError}; +use bimap::BiHashMap; use data_types::{NamespaceName, Timestamp}; use hashbrown::HashMap; use influxdb3_catalog::catalog::{ influx_column_type_from_field_value, Catalog, DatabaseSchema, TableDefinition, }; +use influxdb3_id::DbId; +use influxdb3_id::TableId; use influxdb3_wal::{ CatalogBatch, CatalogOp, Field, FieldAdditions, FieldData, FieldDataType, FieldDefinition, Gen1Duration, Row, TableChunks, WriteBatch, @@ -86,8 +89,15 @@ impl WriteValidator<WithCatalog> { line_number: line_idx + 1, error_message: e.to_string(), }) - .and_then(|l| validate_v3_line(&mut schema, line_idx, l, lp_lines.peek().unwrap())) - { + .and_then(|l| { + validate_v3_line( + &self.state.catalog, + &mut schema, + line_idx, + l, + lp_lines.peek().unwrap(), + ) + }) { Ok(line) => line, Err(e) => { if !accept_partial { @@ -159,7 +169,7 @@ impl WriteValidator<WithCatalog> { line_number: line_idx + 1, error_message: e.to_string(), }) - .and_then(|l| validate_v1_line(&mut schema, line_idx, l)) + .and_then(|l| validate_v1_line(&self.state.catalog, &mut schema, line_idx, l)) { Ok(line) => line, Err(e) => { @@ -216,6 +226,7 @@ impl WriteValidator<WithCatalog> { /// This errors if the write is being performed against a v1 table, i.e., one that does not have /// a series key. fn validate_v3_line<'a>( + catalog: &Catalog, db_schema: &mut Cow<'_, DatabaseSchema>, line_number: usize, line: v3::ParsedLine<'a>, @@ -223,7 +234,11 @@ fn validate_v3_line<'a>( ) -> Result<(v3::ParsedLine<'a>, Option<CatalogOp>), WriteLineError> { let mut catalog_op = None; let table_name = line.series.measurement.as_str(); - if let Some(table_def) = db_schema.get_table(table_name) { + if let Some(table_def) = catalog + .table_name_to_id(db_schema.id, table_name.into()) + .and_then(|table_id| db_schema.get_table(table_id)) + { + let table_id = table_def.table_id; if !table_def.is_v3() { return Err(WriteLineError { original_line: raw_line.to_string(), @@ -243,7 +258,7 @@ fn validate_v3_line<'a>( error_message: format!( "write to table {table_name} had the incorrect series key, \ expected: [{expected}], received: [{received}]", - table_name = table_def.name, + table_name = table_def.table_name, expected = s.join(", "), received = l.join(", "), ), @@ -258,7 +273,7 @@ fn validate_v3_line<'a>( error_message: format!( "write to table {table_name} was missing a series key, the series key \ contains [{key_members}]", - table_name = table_def.name, + table_name = table_def.table_name, key_members = s.join(", "), ), }); @@ -303,7 +318,8 @@ fn validate_v3_line<'a>( // have been parsed and validated. if !columns.is_empty() { let database_name = Arc::clone(&db_schema.name); - let t = db_schema.to_mut().tables.get_mut(table_name).unwrap(); + let database_id = db_schema.id; + let t = db_schema.to_mut().tables.get_mut(&table_id).unwrap(); let mut fields = Vec::with_capacity(columns.len()); for (name, influx_type) in &columns { @@ -313,8 +329,10 @@ fn validate_v3_line<'a>( }); } catalog_op = Some(CatalogOp::AddFields(FieldAdditions { + database_id, database_name, - table_name: Arc::clone(&t.name), + table_id: t.table_id, + table_name: Arc::clone(&t.table_name), field_definitions: fields, })); @@ -325,6 +343,7 @@ fn validate_v3_line<'a>( })?; } } else { + let table_id = TableId::new(); let mut columns = Vec::new(); let mut key = Vec::new(); if let Some(series_key) = &line.series.series_key { @@ -352,14 +371,21 @@ fn validate_v3_line<'a>( }); } - let table = TableDefinition::new(Arc::clone(&table_name), columns, Some(key.clone())) - .map_err(|e| WriteLineError { - original_line: raw_line.to_string(), - line_number: line_number + 1, - error_message: e.to_string(), - })?; + let table = TableDefinition::new( + table_id, + Arc::clone(&table_name), + columns, + Some(key.clone()), + ) + .map_err(|e| WriteLineError { + original_line: raw_line.to_string(), + line_number: line_number + 1, + error_message: e.to_string(), + })?; let table_definition_op = CatalogOp::CreateTable(influxdb3_wal::TableDefinition { + table_id, + database_id: db_schema.id, database_name: Arc::clone(&db_schema.name), table_name: Arc::clone(&table_name), field_definitions: fields, @@ -367,12 +393,24 @@ fn validate_v3_line<'a>( }); catalog_op = Some(table_definition_op); + // We have to add the mapping here or else each line might create a new + // table and table_id before the CatalogOp is applied + catalog + .inner() + .write() + .table_map + .entry(db_schema.id) + .and_modify(|map| { + map.insert(table_id, Arc::clone(&table_name)); + }) + .or_insert_with(|| { + let mut map = BiHashMap::new(); + map.insert(table_id, Arc::clone(&table_name)); + map + }); + assert!( - db_schema - .to_mut() - .tables - .insert(table_name, table) - .is_none(), + db_schema.to_mut().tables.insert(table_id, table).is_none(), "attempted to overwrite existing table" ) } @@ -388,13 +426,17 @@ fn validate_v3_line<'a>( /// An error will also be produced if the write, which is for the v1 data model, is targetting /// a v3 table. fn validate_v1_line<'a>( + catalog: &Catalog, db_schema: &mut Cow<'_, DatabaseSchema>, line_number: usize, line: ParsedLine<'a>, ) -> Result<(ParsedLine<'a>, Option<CatalogOp>), WriteLineError> { let mut catalog_op = None; let table_name = line.series.measurement.as_str(); - if let Some(table_def) = db_schema.get_table(table_name) { + if let Some(table_def) = catalog + .table_name_to_id(db_schema.id, table_name.into()) + .and_then(|table_id| db_schema.get_table(table_id)) + { if table_def.is_v3() { return Err(WriteLineError { original_line: line.to_string(), @@ -443,7 +485,9 @@ fn validate_v1_line<'a>( // have been parsed and validated. if !columns.is_empty() { let database_name = Arc::clone(&db_schema.name); - let table_name = Arc::clone(&table_def.name); + let database_id = db_schema.id; + let table_name: Arc<str> = Arc::clone(&table_def.table_name); + let table_id = table_def.table_id; let mut fields = Vec::with_capacity(columns.len()); for (name, influx_type) in &columns { @@ -454,11 +498,7 @@ fn validate_v1_line<'a>( } // unwrap is safe due to the surrounding if let condition: - let t = db_schema - .to_mut() - .tables - .get_mut(table_name.as_ref()) - .unwrap(); + let t = db_schema.to_mut().tables.get_mut(&table_id).unwrap(); t.add_columns(columns).map_err(|e| WriteLineError { original_line: line.to_string(), line_number: line_number + 1, @@ -467,11 +507,14 @@ fn validate_v1_line<'a>( catalog_op = Some(CatalogOp::AddFields(FieldAdditions { database_name, + database_id, + table_id, table_name, field_definitions: fields, })); } } else { + let table_id = TableId::new(); // This is a new table, so build up its columns: let mut columns = Vec::new(); if let Some(tag_set) = &line.series.tag_set { @@ -498,13 +541,32 @@ fn validate_v1_line<'a>( }); } catalog_op = Some(CatalogOp::CreateTable(influxdb3_wal::TableDefinition { + table_id, + database_id: db_schema.id, database_name: Arc::clone(&db_schema.name), table_name: Arc::clone(&table_name), field_definitions: fields, key: None, })); + // We have to add the mapping here or else each line might create a new + // table and table_id before the CatalogOp is applied + catalog + .inner() + .write() + .table_map + .entry(db_schema.id) + .and_modify(|map| { + map.insert(table_id, Arc::clone(&table_name)); + }) + .or_insert_with(|| { + let mut map = BiHashMap::new(); + map.insert(table_id, Arc::clone(&table_name)); + map + }); + let table = TableDefinition::new( + table_id, Arc::clone(&table_name), columns, Option::<Vec<String>>::None, @@ -512,11 +574,7 @@ fn validate_v1_line<'a>( .unwrap(); assert!( - db_schema - .to_mut() - .tables - .insert(table_name, table) - .is_none(), + db_schema.to_mut().tables.insert(table_id, table).is_none(), "attempted to overwrite existing table" ); } @@ -570,6 +628,8 @@ impl<'lp> WriteValidator<LinesParsed<'lp, v3::ParsedLine<'lp>>> { .unwrap_or(0); convert_v3_parsed_line( + &self.state.catalog.catalog, + self.state.catalog.db_schema.id, line, &mut table_chunks, ingest_time, @@ -596,8 +656,10 @@ impl<'lp> WriteValidator<LinesParsed<'lp, v3::ParsedLine<'lp>>> { } fn convert_v3_parsed_line( + catalog: &Catalog, + db_id: DbId, line: v3::ParsedLine<'_>, - table_chunk_map: &mut HashMap<Arc<str>, TableChunks>, + table_chunk_map: &mut HashMap<TableId, TableChunks>, ingest_time: Time, gen1_duration: Gen1Duration, precision: Precision, @@ -637,7 +699,10 @@ fn convert_v3_parsed_line( // Add the row into the correct chunk in the table let chunk_time = gen1_duration.chunk_time_for_timestamp(Timestamp::new(time_value_nanos)); let table_name: Arc<str> = line.series.measurement.to_string().into(); - let table_chunks = table_chunk_map.entry(Arc::clone(&table_name)).or_default(); + let table_id = catalog + .table_name_to_id(db_id, Arc::clone(&table_name)) + .expect("table should exist by this point"); + let table_chunks = table_chunk_map.entry(table_id).or_default(); table_chunks.push_row( chunk_time, Row { @@ -670,6 +735,8 @@ impl<'lp> WriteValidator<LinesParsed<'lp, ParsedLine<'lp>>> { tag_count += line.series.tag_set.as_ref().map(|t| t.len()).unwrap_or(0); convert_v1_parsed_line( + &self.state.catalog.catalog, + self.state.catalog.db_schema.id, line, &mut table_chunks, ingest_time, @@ -696,8 +763,10 @@ impl<'lp> WriteValidator<LinesParsed<'lp, ParsedLine<'lp>>> { } fn convert_v1_parsed_line( + catalog: &Catalog, + db_id: DbId, line: ParsedLine<'_>, - table_chunk_map: &mut HashMap<Arc<str>, TableChunks>, + table_chunk_map: &mut HashMap<TableId, TableChunks>, ingest_time: Time, gen1_duration: Gen1Duration, precision: Precision, @@ -747,7 +816,10 @@ fn convert_v1_parsed_line( }); let table_name: Arc<str> = line.series.measurement.to_string().into(); - let table_chunks = table_chunk_map.entry(table_name).or_default(); + let table_id = catalog + .table_name_to_id(db_id, Arc::clone(&table_name)) + .expect("table should exist by this point"); + let table_chunks = table_chunk_map.entry(table_id).or_default(); table_chunks.push_row( chunk_time, Row { @@ -782,6 +854,7 @@ mod tests { use crate::{catalog::Catalog, write_buffer::Error, Precision}; use data_types::NamespaceName; + use influxdb3_id::TableId; use influxdb3_wal::Gen1Duration; use iox_time::Time; @@ -807,7 +880,12 @@ mod tests { assert!(result.errors.is_empty()); assert_eq!(result.valid_data.database_name.as_ref(), namespace.as_str()); - let batch = result.valid_data.table_chunks.get("cpu").unwrap(); + // cpu table + let batch = result + .valid_data + .table_chunks + .get(&TableId::from(0)) + .unwrap(); assert_eq!(batch.row_count(), 1); Ok(())
3f6bb3e330a8b6ba14dec6d30903ecca9d2c4620
Stuart Carnie
2023-01-16 09:00:41
Parse IANA timezones in an InfluxQL TZ clause (#6585)
* feat: Parse IANA timezone strings to chrono_tz::Tz * feat: Visitors can customise the return error type This avoids having to remap errors from `&'static str` to the caller's error type, and will be used in a future PR for time range expressions. * chore: Run cargo hakari tasks
Co-authored-by: CircleCI[bot] <[email protected]>
feat: Parse IANA timezones in an InfluxQL TZ clause (#6585) * feat: Parse IANA timezone strings to chrono_tz::Tz * feat: Visitors can customise the return error type This avoids having to remap errors from `&'static str` to the caller's error type, and will be used in a future PR for time range expressions. * chore: Run cargo hakari tasks Co-authored-by: CircleCI[bot] <[email protected]>
diff --git a/Cargo.lock b/Cargo.lock index ab6d928b3f..b240fb3738 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -723,6 +723,28 @@ dependencies = [ "scanlex", ] +[[package]] +name = "chrono-tz" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa48fa079165080f11d7753fd0bc175b7d391f276b965fe4b55bfad67856e463" +dependencies = [ + "chrono", + "chrono-tz-build", + "phf", +] + +[[package]] +name = "chrono-tz-build" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9998fb9f7e9b2111641485bf8beb32f92945f97f92a3d061f744cfef335f751" +dependencies = [ + "parse-zoneinfo", + "phf", + "phf_codegen", +] + [[package]] name = "ciborium" version = "0.2.0" @@ -2331,6 +2353,8 @@ name = "influxdb_influxql_parser" version = "0.1.0" dependencies = [ "assert_matches", + "chrono", + "chrono-tz", "insta", "nom", "once_cell", @@ -3887,6 +3911,15 @@ dependencies = [ "workspace-hack", ] +[[package]] +name = "parse-zoneinfo" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c705f256449c60da65e11ff6626e0c16a0a0b96aaa348de61376b249bc340f41" +dependencies = [ + "regex", +] + [[package]] name = "paste" version = "1.0.11" @@ -3999,6 +4032,44 @@ dependencies = [ "indexmap", ] +[[package]] +name = "phf" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "928c6535de93548188ef63bb7c4036bd415cd8f36ad25af44b9789b2ee72a48c" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_codegen" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a56ac890c5e3ca598bbdeaa99964edb5b0258a583a9eb6ef4e89fc85d9224770" +dependencies = [ + "phf_generator", + "phf_shared", +] + +[[package]] +name = "phf_generator" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1181c94580fa345f50f19d738aaa39c0ed30a600d95cb2d3e23f94266f14fbf" +dependencies = [ + "phf_shared", + "rand", +] + +[[package]] +name = "phf_shared" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1fb5f6f826b772a8d4c0394209441e7d37cbbb967ae9c7e0e8134365c9ee676" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "1.0.12" @@ -6529,10 +6600,12 @@ dependencies = [ "once_cell", "parking_lot 0.12.1", "parquet", + "phf_shared", "predicates", "prost 0.11.6", "prost-types 0.11.6", "rand", + "rand_core", "regex", "regex-automata", "regex-syntax", diff --git a/influxdb_influxql_parser/Cargo.toml b/influxdb_influxql_parser/Cargo.toml index c1a4505294..e155ef0eec 100644 --- a/influxdb_influxql_parser/Cargo.toml +++ b/influxdb_influxql_parser/Cargo.toml @@ -8,7 +8,9 @@ license.workspace = true [dependencies] # In alphabetical order nom = { version = "7", default-features = false, features = ["std"] } once_cell = "1" -workspace-hack = { path = "../workspace-hack"} +chrono = { version = "0.4", default-features = false } +chrono-tz = { version = "0.8" } +workspace-hack = { path = "../workspace-hack" } [dev-dependencies] # In alphabetical order test_helpers = { path = "../test_helpers" } diff --git a/influxdb_influxql_parser/src/select.rs b/influxdb_influxql_parser/src/select.rs index 13604cd0e2..a0f462e77e 100644 --- a/influxdb_influxql_parser/src/select.rs +++ b/influxdb_influxql_parser/src/select.rs @@ -13,13 +13,13 @@ use crate::expression::arithmetic::{ }; use crate::expression::conditional::is_valid_now_call; use crate::identifier::{identifier, Identifier}; -use crate::internal::{expect, verify, ParseResult}; +use crate::impl_tuple_clause; +use crate::internal::{expect, map_fail, verify, ParseResult}; use crate::keywords::keyword; use crate::literal::{duration, literal, number, unsigned_integer, Literal, Number}; use crate::parameter::parameter; use crate::select::MeasurementSelection::Subquery; use crate::string::{regex, single_quoted_string, Regex}; -use crate::{impl_tuple_clause, write_escaped}; use nom::branch::alt; use nom::bytes::complete::tag; use nom::character::complete::char; @@ -630,17 +630,15 @@ fn soffset_clause(i: &str) -> ParseResult<&str, SOffsetClause> { )(i) } -/// Represents the value of the time zone string of a `TZ` clause. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct TimeZoneClause(pub(crate) String); +/// Represents an IANA time zone parsed from the `TZ` clause. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct TimeZoneClause(pub(crate) chrono_tz::Tz); -impl_tuple_clause!(TimeZoneClause, String); +impl_tuple_clause!(TimeZoneClause, chrono_tz::Tz); impl Display for TimeZoneClause { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.write_str("TZ('")?; - write_escaped!(f, self.0, '\n' => "\\n", '\\' => "\\\\", '\'' => "\\'", '"' => "\\\""); - f.write_str("')") + write!(f, "TZ('{}')", self.0) } } @@ -649,6 +647,34 @@ impl Display for TimeZoneClause { /// ```text /// timezone_clause ::= "TZ" "(" single_quoted_string ")" /// ``` +/// +/// ## NOTE +/// +/// There are some differences with how the IANA timezone string +/// is parsed to a [chrono_tz::Tz] in Rust vs a [`time.Location`][location] via +/// Go's [`time.LoadLocation`][load_location] +/// function, which is used by the canonical Go InfluxQL parser. +/// +/// It isn't expected that these differences matter for parsing, however, +/// the notable differences are: +/// +/// * Specifying the location name `Local` returns `time.Local`, which represents +/// the system's local time zone. As a result, a user could specify a `TZ` clause +/// as `TZ('Local')` to use the local time zone of the server running InfluxDB. +/// +/// * on macOS, IANA name lookups are case-insensitive, whereas the Rust implementation +/// is case-sensitive. However, this is purely a result of the Go implementation, +/// which loads the zoneinfo files from the filesystem. macOS uses a case-insensitive +/// file system by default. When using a case-sensitive file system, name lookups are +/// also case-sensitive. +/// +/// * Go's implementation (by default) loads the timezone database from the local file system +/// vs Rust's implementation, where the database is statically compiled into the binary. Changes +/// to the IANA database on disk will allow an existing binary to load new timezones. +/// +/// [location]: https://github.com/influxdata/influxql/blob/7e7d61973256ffeef4b99edd0a89f18a9e52fa2d/parser.go#L2384 +/// [load_location]: https://pkg.go.dev/time#LoadLocation +/// fn timezone_clause(i: &str) -> ParseResult<&str, TimeZoneClause> { preceded( keyword("TZ"), @@ -656,7 +682,12 @@ fn timezone_clause(i: &str) -> ParseResult<&str, TimeZoneClause> { preceded(ws0, char('(')), expect( "invalid TZ clause, expected string", - preceded(ws0, map(single_quoted_string, TimeZoneClause)), + preceded( + ws0, + map_fail("unable to find timezone", single_quoted_string, |s| { + s.parse().map(TimeZoneClause) + }), + ), ), preceded(ws0, char(')')), ), @@ -1189,13 +1220,17 @@ mod test { #[test] fn test_timezone_clause() { let (_, got) = timezone_clause("TZ('Australia/Hobart')").unwrap(); - assert_eq!(*got, "Australia/Hobart"); + assert_eq!(*got, chrono_tz::Australia::Hobart); + + let (_, got) = timezone_clause("TZ('UTC')").unwrap(); + assert_eq!(*got, chrono_tz::UTC); // Fallible cases assert_expect_error!( timezone_clause("TZ(foo)"), "invalid TZ clause, expected string" ); + assert_expect_error!(timezone_clause("TZ('Foo')"), "unable to find timezone"); } #[test] diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-6.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-6.snap index 715115ee4b..926231af6e 100644 --- a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-6.snap +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit__test__select_statement-6.snap @@ -2,8 +2,8 @@ source: influxdb_influxql_parser/src/visit.rs expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE host = \"node1\")\n WHERE region =~ /west/ AND value > 5\n GROUP BY TIME(5m), host\n FILL(previous)\n ORDER BY TIME DESC\n LIMIT 1 OFFSET 2\n SLIMIT 3 SOFFSET 4\n TZ('Australia/Hobart')\n \"#)" --- -- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })" -- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }" +- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) })" +- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) }" - "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }" - "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }" - "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" @@ -86,8 +86,8 @@ expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE - "post_visit_slimit_clause: SLimitClause(3)" - "pre_visit_soffset_clause: SOffsetClause(4)" - "post_visit_soffset_clause: SOffsetClause(4)" -- "pre_visit_timezone_clause: TimeZoneClause(\"Australia/Hobart\")" -- "post_visit_timezone_clause: TimeZoneClause(\"Australia/Hobart\")" -- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }" -- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })" +- "pre_visit_timezone_clause: TimeZoneClause(Australia/Hobart)" +- "post_visit_timezone_clause: TimeZoneClause(Australia/Hobart)" +- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) }" +- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) })" diff --git a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-6.snap b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-6.snap index 330e5dc006..d4975bfeb1 100644 --- a/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-6.snap +++ b/influxdb_influxql_parser/src/snapshots/influxdb_influxql_parser__visit_mut__test__select_statement-6.snap @@ -2,8 +2,8 @@ source: influxdb_influxql_parser/src/visit_mut.rs expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE host = \"node1\")\n WHERE region =~ /west/ AND value > 5\n GROUP BY TIME(5m), host\n FILL(previous)\n ORDER BY TIME DESC\n LIMIT 1 OFFSET 2\n SLIMIT 3 SOFFSET 4\n TZ('Australia/Hobart')\n \"#)" --- -- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })" -- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }" +- "pre_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) })" +- "pre_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) }" - "pre_visit_select_field_list: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }" - "pre_visit_select_field: Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }" - "pre_visit_expr: VarRef { name: Identifier(\"value\"), data_type: None }" @@ -86,8 +86,8 @@ expression: "visit_statement!(r#\"SELECT value FROM (SELECT usage FROM cpu WHERE - "post_visit_slimit_clause: SLimitClause(3)" - "pre_visit_soffset_clause: SOffsetClause(4)" - "post_visit_soffset_clause: SOffsetClause(4)" -- "pre_visit_timezone_clause: TimeZoneClause(\"Australia/Hobart\")" -- "post_visit_timezone_clause: TimeZoneClause(\"Australia/Hobart\")" -- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) }" -- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(\"Australia/Hobart\")) })" +- "pre_visit_timezone_clause: TimeZoneClause(Australia/Hobart)" +- "post_visit_timezone_clause: TimeZoneClause(Australia/Hobart)" +- "post_visit_select_statement: SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) }" +- "post_visit_statement: Select(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"value\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Subquery(SelectStatement { fields: ZeroOrMore { contents: [Field { expr: VarRef { name: Identifier(\"usage\"), data_type: None }, alias: None }] }, from: ZeroOrMore { contents: [Name(QualifiedMeasurementName { database: None, retention_policy: None, name: Name(Identifier(\"cpu\")) })] }, condition: Some(WhereClause(Binary { lhs: Expr(VarRef { name: Identifier(\"host\"), data_type: None }), op: Eq, rhs: Expr(VarRef { name: Identifier(\"node1\"), data_type: None }) })), group_by: None, fill: None, order_by: None, limit: None, offset: None, series_limit: None, series_offset: None, timezone: None })] }, condition: Some(WhereClause(Binary { lhs: Binary { lhs: Expr(VarRef { name: Identifier(\"region\"), data_type: None }), op: EqRegex, rhs: Expr(Literal(Regex(Regex(\"west\")))) }, op: And, rhs: Binary { lhs: Expr(VarRef { name: Identifier(\"value\"), data_type: None }), op: Gt, rhs: Expr(Literal(Unsigned(5))) } })), group_by: Some(ZeroOrMore { contents: [Time { interval: Literal(Duration(Duration(300000000000))), offset: None }, Tag(Identifier(\"host\"))] }), fill: Some(Previous), order_by: Some(Descending), limit: Some(LimitClause(1)), offset: Some(OffsetClause(2)), series_limit: Some(SLimitClause(3)), series_offset: Some(SOffsetClause(4)), timezone: Some(TimeZoneClause(Australia/Hobart)) })" diff --git a/influxdb_influxql_parser/src/visit.rs b/influxdb_influxql_parser/src/visit.rs index 2e44984bae..ee2ce8dca3 100644 --- a/influxdb_influxql_parser/src/visit.rs +++ b/influxdb_influxql_parser/src/visit.rs @@ -3,14 +3,16 @@ //! # Example //! //! ``` -//! use influxdb_influxql_parser::visit::{Visitable, Visitor, VisitorResult}; +//! use influxdb_influxql_parser::visit::{Visitable, Visitor}; //! use influxdb_influxql_parser::parse_statements; //! use influxdb_influxql_parser::common::WhereClause; //! //! struct MyVisitor; //! //! impl Visitor for MyVisitor { -//! fn post_visit_where_clause(self, n: &WhereClause) -> VisitorResult<Self> { +//! type Error = (); +//! +//! fn post_visit_where_clause(self, n: &WhereClause) -> Result<Self, Self::Error> { //! println!("{}", n); //! Ok(self) //! } @@ -47,9 +49,6 @@ use crate::show_tag_values::{ShowTagValuesStatement, WithKeyClause}; use crate::simple_from_clause::{DeleteFromClause, ShowFromClause}; use crate::statement::Statement; -/// The result type for a [`Visitor`]. -pub type VisitorResult<T, E = &'static str> = Result<T, E>; - /// Controls how the visitor recursion should proceed. pub enum Recursion<V: Visitor> { /// Attempt to visit all the children, recursively, of this expression. @@ -63,13 +62,16 @@ pub enum Recursion<V: Visitor> { /// any [`Visitable::accept`], `pre_visit` functions are invoked repeatedly /// until a leaf node is reached or a `pre_visit` function returns [`Recursion::Stop`]. pub trait Visitor: Sized { + /// The type returned in the event of an error traversing the tree. + type Error; + /// Invoked before any children of the InfluxQL statement are visited. - fn pre_visit_statement(self, _n: &Statement) -> VisitorResult<Recursion<Self>> { + fn pre_visit_statement(self, _n: &Statement) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the InfluxQL statement are visited. - fn post_visit_statement(self, _n: &Statement) -> VisitorResult<Self> { + fn post_visit_statement(self, _n: &Statement) -> Result<Self, Self::Error> { Ok(self) } @@ -77,7 +79,7 @@ pub trait Visitor: Sized { fn pre_visit_create_database_statement( self, _n: &CreateDatabaseStatement, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } @@ -86,37 +88,46 @@ pub trait Visitor: Sized { fn post_visit_create_database_statement( self, _n: &CreateDatabaseStatement, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the `DELETE` statement are visited. - fn pre_visit_delete_statement(self, _n: &DeleteStatement) -> VisitorResult<Recursion<Self>> { + fn pre_visit_delete_statement( + self, + _n: &DeleteStatement, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the `DELETE` statement are visited. - fn post_visit_delete_statement(self, _n: &DeleteStatement) -> VisitorResult<Self> { + fn post_visit_delete_statement(self, _n: &DeleteStatement) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the `FROM` clause of a `DELETE` statement are visited. - fn pre_visit_delete_from_clause(self, _n: &DeleteFromClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_delete_from_clause( + self, + _n: &DeleteFromClause, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the `FROM` clause of a `DELETE` statement are visited. - fn post_visit_delete_from_clause(self, _n: &DeleteFromClause) -> VisitorResult<Self> { + fn post_visit_delete_from_clause(self, _n: &DeleteFromClause) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the measurement name are visited. - fn pre_visit_measurement_name(self, _n: &MeasurementName) -> VisitorResult<Recursion<Self>> { + fn pre_visit_measurement_name( + self, + _n: &MeasurementName, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the measurement name are visited. - fn post_visit_measurement_name(self, _n: &MeasurementName) -> VisitorResult<Self> { + fn post_visit_measurement_name(self, _n: &MeasurementName) -> Result<Self, Self::Error> { Ok(self) } @@ -124,7 +135,7 @@ pub trait Visitor: Sized { fn pre_visit_drop_measurement_statement( self, _n: &DropMeasurementStatement, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } @@ -132,27 +143,33 @@ pub trait Visitor: Sized { fn post_visit_drop_measurement_statement( self, _n: &DropMeasurementStatement, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the `EXPLAIN` statement are visited. - fn pre_visit_explain_statement(self, _n: &ExplainStatement) -> VisitorResult<Recursion<Self>> { + fn pre_visit_explain_statement( + self, + _n: &ExplainStatement, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the `EXPLAIN` statement are visited. - fn post_visit_explain_statement(self, _n: &ExplainStatement) -> VisitorResult<Self> { + fn post_visit_explain_statement(self, _n: &ExplainStatement) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the `SELECT` statement are visited. - fn pre_visit_select_statement(self, _n: &SelectStatement) -> VisitorResult<Recursion<Self>> { + fn pre_visit_select_statement( + self, + _n: &SelectStatement, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the `SELECT` statement are visited. - fn post_visit_select_statement(self, _n: &SelectStatement) -> VisitorResult<Self> { + fn post_visit_select_statement(self, _n: &SelectStatement) -> Result<Self, Self::Error> { Ok(self) } @@ -160,7 +177,7 @@ pub trait Visitor: Sized { fn pre_visit_show_databases_statement( self, _n: &ShowDatabasesStatement, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } @@ -168,7 +185,7 @@ pub trait Visitor: Sized { fn post_visit_show_databases_statement( self, _n: &ShowDatabasesStatement, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self) } @@ -176,7 +193,7 @@ pub trait Visitor: Sized { fn pre_visit_show_measurements_statement( self, _n: &ShowMeasurementsStatement, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } @@ -184,7 +201,7 @@ pub trait Visitor: Sized { fn post_visit_show_measurements_statement( self, _n: &ShowMeasurementsStatement, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self) } @@ -192,7 +209,7 @@ pub trait Visitor: Sized { fn pre_visit_show_retention_policies_statement( self, _n: &ShowRetentionPoliciesStatement, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } @@ -200,7 +217,7 @@ pub trait Visitor: Sized { fn post_visit_show_retention_policies_statement( self, _n: &ShowRetentionPoliciesStatement, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self) } @@ -208,12 +225,15 @@ pub trait Visitor: Sized { fn pre_visit_show_tag_keys_statement( self, _n: &ShowTagKeysStatement, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the `SHOW TAG KEYS` statement are visited. - fn post_visit_show_tag_keys_statement(self, _n: &ShowTagKeysStatement) -> VisitorResult<Self> { + fn post_visit_show_tag_keys_statement( + self, + _n: &ShowTagKeysStatement, + ) -> Result<Self, Self::Error> { Ok(self) } @@ -221,7 +241,7 @@ pub trait Visitor: Sized { fn pre_visit_show_tag_values_statement( self, _n: &ShowTagValuesStatement, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } @@ -229,7 +249,7 @@ pub trait Visitor: Sized { fn post_visit_show_tag_values_statement( self, _n: &ShowTagValuesStatement, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self) } @@ -237,7 +257,7 @@ pub trait Visitor: Sized { fn pre_visit_show_field_keys_statement( self, _n: &ShowFieldKeysStatement, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } @@ -245,7 +265,7 @@ pub trait Visitor: Sized { fn post_visit_show_field_keys_statement( self, _n: &ShowFieldKeysStatement, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self) } @@ -253,42 +273,45 @@ pub trait Visitor: Sized { fn pre_visit_conditional_expression( self, _n: &ConditionalExpression, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the conditional expression are visited. - fn post_visit_conditional_expression(self, _n: &ConditionalExpression) -> VisitorResult<Self> { + fn post_visit_conditional_expression( + self, + _n: &ConditionalExpression, + ) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the arithmetic expression are visited. - fn pre_visit_expr(self, _n: &Expr) -> VisitorResult<Recursion<Self>> { + fn pre_visit_expr(self, _n: &Expr) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the arithmetic expression are visited. - fn post_visit_expr(self, _n: &Expr) -> VisitorResult<Self> { + fn post_visit_expr(self, _n: &Expr) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any fields of the `SELECT` projection are visited. - fn pre_visit_select_field_list(self, _n: &FieldList) -> VisitorResult<Recursion<Self>> { + fn pre_visit_select_field_list(self, _n: &FieldList) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all fields of the `SELECT` projection are visited. - fn post_visit_select_field_list(self, _n: &FieldList) -> VisitorResult<Self> { + fn post_visit_select_field_list(self, _n: &FieldList) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the field of a `SELECT` statement are visited. - fn pre_visit_select_field(self, _n: &Field) -> VisitorResult<Recursion<Self>> { + fn pre_visit_select_field(self, _n: &Field) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the field of a `SELECT` statement are visited. - fn post_visit_select_field(self, _n: &Field) -> VisitorResult<Self> { + fn post_visit_select_field(self, _n: &Field) -> Result<Self, Self::Error> { Ok(self) } @@ -296,12 +319,15 @@ pub trait Visitor: Sized { fn pre_visit_select_from_clause( self, _n: &FromMeasurementClause, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the `FROM` clause of a `SELECT` statement are visited. - fn post_visit_select_from_clause(self, _n: &FromMeasurementClause) -> VisitorResult<Self> { + fn post_visit_select_from_clause( + self, + _n: &FromMeasurementClause, + ) -> Result<Self, Self::Error> { Ok(self) } @@ -309,7 +335,7 @@ pub trait Visitor: Sized { fn pre_visit_select_measurement_selection( self, _n: &MeasurementSelection, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } @@ -317,47 +343,50 @@ pub trait Visitor: Sized { fn post_visit_select_measurement_selection( self, _n: &MeasurementSelection, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the `GROUP BY` clause are visited. - fn pre_visit_group_by_clause(self, _n: &GroupByClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_group_by_clause(self, _n: &GroupByClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the `GROUP BY` clause are visited. - fn post_visit_group_by_clause(self, _n: &GroupByClause) -> VisitorResult<Self> { + fn post_visit_group_by_clause(self, _n: &GroupByClause) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the `GROUP BY` dimension expression are visited. - fn pre_visit_select_dimension(self, _n: &Dimension) -> VisitorResult<Recursion<Self>> { + fn pre_visit_select_dimension(self, _n: &Dimension) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the `GROUP BY` dimension expression are visited. - fn post_visit_select_dimension(self, _n: &Dimension) -> VisitorResult<Self> { + fn post_visit_select_dimension(self, _n: &Dimension) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the `WHERE` clause are visited. - fn pre_visit_where_clause(self, _n: &WhereClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_where_clause(self, _n: &WhereClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the `WHERE` clause are visited. - fn post_visit_where_clause(self, _n: &WhereClause) -> VisitorResult<Self> { + fn post_visit_where_clause(self, _n: &WhereClause) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the `FROM` clause for any `SHOW` statement are visited. - fn pre_visit_show_from_clause(self, _n: &ShowFromClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_show_from_clause( + self, + _n: &ShowFromClause, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the `FROM` clause for any `SHOW` statement are visited. - fn post_visit_show_from_clause(self, _n: &ShowFromClause) -> VisitorResult<Self> { + fn post_visit_show_from_clause(self, _n: &ShowFromClause) -> Result<Self, Self::Error> { Ok(self) } @@ -365,7 +394,7 @@ pub trait Visitor: Sized { fn pre_visit_qualified_measurement_name( self, _n: &QualifiedMeasurementName, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } @@ -373,97 +402,103 @@ pub trait Visitor: Sized { fn post_visit_qualified_measurement_name( self, _n: &QualifiedMeasurementName, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the `FILL` clause are visited. - fn pre_visit_fill_clause(self, _n: &FillClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_fill_clause(self, _n: &FillClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the `FILL` clause are visited. - fn post_visit_fill_clause(self, _n: &FillClause) -> VisitorResult<Self> { + fn post_visit_fill_clause(self, _n: &FillClause) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the `ORDER BY` clause are visited. - fn pre_visit_order_by_clause(self, _n: &OrderByClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_order_by_clause(self, _n: &OrderByClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the `ORDER BY` clause are visited. - fn post_visit_order_by_clause(self, _n: &OrderByClause) -> VisitorResult<Self> { + fn post_visit_order_by_clause(self, _n: &OrderByClause) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the `LIMIT` clause are visited. - fn pre_visit_limit_clause(self, _n: &LimitClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_limit_clause(self, _n: &LimitClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the `LIMIT` clause are visited. - fn post_visit_limit_clause(self, _n: &LimitClause) -> VisitorResult<Self> { + fn post_visit_limit_clause(self, _n: &LimitClause) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the `OFFSET` clause are visited. - fn pre_visit_offset_clause(self, _n: &OffsetClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_offset_clause(self, _n: &OffsetClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the `OFFSET` clause are visited. - fn post_visit_offset_clause(self, _n: &OffsetClause) -> VisitorResult<Self> { + fn post_visit_offset_clause(self, _n: &OffsetClause) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the `SLIMIT` clause are visited. - fn pre_visit_slimit_clause(self, _n: &SLimitClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_slimit_clause(self, _n: &SLimitClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the `SLIMIT` clause are visited. - fn post_visit_slimit_clause(self, _n: &SLimitClause) -> VisitorResult<Self> { + fn post_visit_slimit_clause(self, _n: &SLimitClause) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of the `SOFFSET` clause are visited. - fn pre_visit_soffset_clause(self, _n: &SOffsetClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_soffset_clause(self, _n: &SOffsetClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of the `SOFFSET` clause are visited. - fn post_visit_soffset_clause(self, _n: &SOffsetClause) -> VisitorResult<Self> { + fn post_visit_soffset_clause(self, _n: &SOffsetClause) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of a `TZ` clause are visited. - fn pre_visit_timezone_clause(self, _n: &TimeZoneClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_timezone_clause( + self, + _n: &TimeZoneClause, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of a `TZ` clause are visited. - fn post_visit_timezone_clause(self, _n: &TimeZoneClause) -> VisitorResult<Self> { + fn post_visit_timezone_clause(self, _n: &TimeZoneClause) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of an extended `ON` clause are visited. - fn pre_visit_extended_on_clause(self, _n: &ExtendedOnClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_extended_on_clause( + self, + _n: &ExtendedOnClause, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of an extended `ON` clause are visited. - fn post_visit_extended_on_clause(self, _n: &ExtendedOnClause) -> VisitorResult<Self> { + fn post_visit_extended_on_clause(self, _n: &ExtendedOnClause) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of an `ON` clause are visited. - fn pre_visit_on_clause(self, _n: &OnClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_on_clause(self, _n: &OnClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of an `ON` clause are visited. - fn post_visit_on_clause(self, _n: &OnClause) -> VisitorResult<Self> { + fn post_visit_on_clause(self, _n: &OnClause) -> Result<Self, Self::Error> { Ok(self) } @@ -471,22 +506,25 @@ pub trait Visitor: Sized { fn pre_visit_with_measurement_clause( self, _n: &WithMeasurementClause, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of a `WITH MEASUREMENT` clause are visited. - fn post_visit_with_measurement_clause(self, _n: &WithMeasurementClause) -> VisitorResult<Self> { + fn post_visit_with_measurement_clause( + self, + _n: &WithMeasurementClause, + ) -> Result<Self, Self::Error> { Ok(self) } /// Invoked before any children of a `WITH KEY` clause are visited. - fn pre_visit_with_key_clause(self, _n: &WithKeyClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_with_key_clause(self, _n: &WithKeyClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self)) } /// Invoked after all children of a `WITH KEY` clause are visited. - fn post_visit_with_key_clause(self, _n: &WithKeyClause) -> VisitorResult<Self> { + fn post_visit_with_key_clause(self, _n: &WithKeyClause) -> Result<Self, Self::Error> { Ok(self) } } @@ -494,11 +532,11 @@ pub trait Visitor: Sized { /// Trait for types that can be visited by [`Visitor`] pub trait Visitable: Sized { /// accept a visitor, calling `visit` on all children of this - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V>; + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error>; } impl Visitable for Statement { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_statement(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -523,7 +561,7 @@ impl Visitable for Statement { } impl Visitable for CreateDatabaseStatement { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_create_database_statement(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -534,7 +572,7 @@ impl Visitable for CreateDatabaseStatement { } impl Visitable for DeleteStatement { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_delete_statement(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -558,7 +596,7 @@ impl Visitable for DeleteStatement { } impl Visitable for WhereClause { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_where_clause(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -571,7 +609,7 @@ impl Visitable for WhereClause { } impl Visitable for DeleteFromClause { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_delete_from_clause(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -584,7 +622,7 @@ impl Visitable for DeleteFromClause { } impl Visitable for MeasurementName { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_measurement_name(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -595,7 +633,7 @@ impl Visitable for MeasurementName { } impl Visitable for DropMeasurementStatement { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_drop_measurement_statement(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -606,7 +644,7 @@ impl Visitable for DropMeasurementStatement { } impl Visitable for ExplainStatement { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_explain_statement(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -619,7 +657,7 @@ impl Visitable for ExplainStatement { } impl Visitable for SelectStatement { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_select_statement(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -688,7 +726,7 @@ impl Visitable for SelectStatement { } impl Visitable for TimeZoneClause { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_timezone_clause(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -699,7 +737,7 @@ impl Visitable for TimeZoneClause { } impl Visitable for LimitClause { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_limit_clause(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -710,7 +748,7 @@ impl Visitable for LimitClause { } impl Visitable for OffsetClause { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_offset_clause(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -721,7 +759,7 @@ impl Visitable for OffsetClause { } impl Visitable for SLimitClause { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_slimit_clause(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -732,7 +770,7 @@ impl Visitable for SLimitClause { } impl Visitable for SOffsetClause { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_soffset_clause(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -743,7 +781,7 @@ impl Visitable for SOffsetClause { } impl Visitable for FillClause { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_fill_clause(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -754,7 +792,7 @@ impl Visitable for FillClause { } impl Visitable for OrderByClause { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_order_by_clause(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -765,7 +803,7 @@ impl Visitable for OrderByClause { } impl Visitable for GroupByClause { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_group_by_clause(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -778,7 +816,7 @@ impl Visitable for GroupByClause { } impl Visitable for ShowMeasurementsStatement { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_show_measurements_statement(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -819,7 +857,7 @@ impl Visitable for ShowMeasurementsStatement { } impl Visitable for ExtendedOnClause { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_extended_on_clause(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -830,7 +868,7 @@ impl Visitable for ExtendedOnClause { } impl Visitable for WithMeasurementClause { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_with_measurement_clause(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -846,7 +884,7 @@ impl Visitable for WithMeasurementClause { } impl Visitable for ShowRetentionPoliciesStatement { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_show_retention_policies_statement(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -863,7 +901,7 @@ impl Visitable for ShowRetentionPoliciesStatement { } impl Visitable for ShowFromClause { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_show_from_clause(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -876,7 +914,7 @@ impl Visitable for ShowFromClause { } impl Visitable for QualifiedMeasurementName { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_qualified_measurement_name(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -889,7 +927,7 @@ impl Visitable for QualifiedMeasurementName { } impl Visitable for ShowTagKeysStatement { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_show_tag_keys_statement(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -930,7 +968,7 @@ impl Visitable for ShowTagKeysStatement { } impl Visitable for ShowTagValuesStatement { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_show_tag_values_statement(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -973,7 +1011,7 @@ impl Visitable for ShowTagValuesStatement { } impl Visitable for ShowFieldKeysStatement { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_show_field_keys_statement(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -1008,7 +1046,7 @@ impl Visitable for ShowFieldKeysStatement { } impl Visitable for FieldList { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_select_field_list(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -1021,7 +1059,7 @@ impl Visitable for FieldList { } impl Visitable for Field { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_select_field(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -1034,7 +1072,7 @@ impl Visitable for Field { } impl Visitable for FromMeasurementClause { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_select_from_clause(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -1047,7 +1085,7 @@ impl Visitable for FromMeasurementClause { } impl Visitable for MeasurementSelection { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_select_measurement_selection(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -1063,7 +1101,7 @@ impl Visitable for MeasurementSelection { } impl Visitable for Dimension { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_select_dimension(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -1086,7 +1124,7 @@ impl Visitable for Dimension { } impl Visitable for WithKeyClause { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_with_key_clause(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -1097,7 +1135,7 @@ impl Visitable for WithKeyClause { } impl Visitable for ShowDatabasesStatement { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_show_databases_statement(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -1107,7 +1145,7 @@ impl Visitable for ShowDatabasesStatement { } impl Visitable for ConditionalExpression { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_conditional_expression(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -1127,7 +1165,7 @@ impl Visitable for ConditionalExpression { } impl Visitable for Expr { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_expr(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -1156,7 +1194,7 @@ impl Visitable for Expr { } impl Visitable for OnClause { - fn accept<V: Visitor>(&self, visitor: V) -> VisitorResult<V> { + fn accept<V: Visitor>(&self, visitor: V) -> Result<V, V::Error> { let visitor = match visitor.pre_visit_on_clause(self)? { Continue(visitor) => visitor, Stop(visitor) => return Ok(visitor), @@ -1169,7 +1207,7 @@ impl Visitable for OnClause { #[cfg(test)] mod test { use super::Recursion::Continue; - use super::{Recursion, Visitable, Visitor, VisitorResult}; + use super::{Recursion, Visitable, Visitor}; use crate::common::{ LimitClause, MeasurementName, OffsetClause, OrderByClause, QualifiedMeasurementName, WhereClause, @@ -1216,106 +1254,117 @@ mod test { } impl Visitor for TestVisitor { - fn pre_visit_statement(self, n: &Statement) -> VisitorResult<Recursion<Self>> { + type Error = (); + + fn pre_visit_statement(self, n: &Statement) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("statement", n))) } - fn post_visit_statement(self, n: &Statement) -> VisitorResult<Self> { + fn post_visit_statement(self, n: &Statement) -> Result<Self, Self::Error> { Ok(self.push_post("statement", n)) } - fn pre_visit_delete_statement(self, n: &DeleteStatement) -> VisitorResult<Recursion<Self>> { + fn pre_visit_delete_statement( + self, + n: &DeleteStatement, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("delete_statement", n))) } - fn post_visit_delete_statement(self, n: &DeleteStatement) -> VisitorResult<Self> { + fn post_visit_delete_statement(self, n: &DeleteStatement) -> Result<Self, Self::Error> { Ok(self.push_post("delete_statement", n)) } fn pre_visit_delete_from_clause( self, n: &DeleteFromClause, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("delete_from", n))) } - fn post_visit_delete_from_clause(self, n: &DeleteFromClause) -> VisitorResult<Self> { + fn post_visit_delete_from_clause(self, n: &DeleteFromClause) -> Result<Self, Self::Error> { Ok(self.push_post("delete_from", n)) } - fn pre_visit_measurement_name(self, n: &MeasurementName) -> VisitorResult<Recursion<Self>> { + fn pre_visit_measurement_name( + self, + n: &MeasurementName, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("measurement_name", n))) } - fn post_visit_measurement_name(self, n: &MeasurementName) -> VisitorResult<Self> { + fn post_visit_measurement_name(self, n: &MeasurementName) -> Result<Self, Self::Error> { Ok(self.push_post("measurement_name", n)) } fn pre_visit_drop_measurement_statement( self, n: &DropMeasurementStatement, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("drop_measurement_statement", n))) } fn post_visit_drop_measurement_statement( self, n: &DropMeasurementStatement, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self.push_post("drop_measurement_statement", n)) } fn pre_visit_explain_statement( self, n: &ExplainStatement, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("explain_statement", n))) } - fn post_visit_explain_statement(self, n: &ExplainStatement) -> VisitorResult<Self> { + fn post_visit_explain_statement(self, n: &ExplainStatement) -> Result<Self, Self::Error> { Ok(self.push_post("explain_statement", n)) } - fn pre_visit_select_statement(self, n: &SelectStatement) -> VisitorResult<Recursion<Self>> { + fn pre_visit_select_statement( + self, + n: &SelectStatement, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("select_statement", n))) } - fn post_visit_select_statement(self, n: &SelectStatement) -> VisitorResult<Self> { + fn post_visit_select_statement(self, n: &SelectStatement) -> Result<Self, Self::Error> { Ok(self.push_post("select_statement", n)) } fn pre_visit_show_databases_statement( self, n: &ShowDatabasesStatement, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("show_databases_statement", n))) } fn post_visit_show_databases_statement( self, n: &ShowDatabasesStatement, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self.push_post("show_databases_statement", n)) } fn pre_visit_show_measurements_statement( self, n: &ShowMeasurementsStatement, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("show_measurements_statement", n))) } fn post_visit_show_measurements_statement( self, n: &ShowMeasurementsStatement, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self.push_post("show_measurements_statement", n)) } fn pre_visit_show_retention_policies_statement( self, n: &ShowRetentionPoliciesStatement, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue( self.push_pre("show_retention_policies_statement", n), )) @@ -1324,255 +1373,279 @@ mod test { fn post_visit_show_retention_policies_statement( self, n: &ShowRetentionPoliciesStatement, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self.push_post("show_retention_policies_statement", n)) } fn pre_visit_show_tag_keys_statement( self, n: &ShowTagKeysStatement, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("show_tag_keys_statement", n))) } fn post_visit_show_tag_keys_statement( self, n: &ShowTagKeysStatement, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self.push_post("show_tag_keys_statement", n)) } fn pre_visit_show_tag_values_statement( self, n: &ShowTagValuesStatement, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("show_tag_values_statement", n))) } fn post_visit_show_tag_values_statement( self, n: &ShowTagValuesStatement, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self.push_post("show_tag_values_statement", n)) } fn pre_visit_show_field_keys_statement( self, n: &ShowFieldKeysStatement, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("show_field_keys_statement", n))) } fn post_visit_show_field_keys_statement( self, n: &ShowFieldKeysStatement, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self.push_post("show_field_keys_statement", n)) } fn pre_visit_conditional_expression( self, n: &ConditionalExpression, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("conditional_expression", n))) } fn post_visit_conditional_expression( self, n: &ConditionalExpression, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self.push_post("conditional_expression", n)) } - fn pre_visit_expr(self, n: &Expr) -> VisitorResult<Recursion<Self>> { + fn pre_visit_expr(self, n: &Expr) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("expr", n))) } - fn post_visit_expr(self, n: &Expr) -> VisitorResult<Self> { + fn post_visit_expr(self, n: &Expr) -> Result<Self, Self::Error> { Ok(self.push_post("expr", n)) } - fn pre_visit_select_field_list(self, n: &FieldList) -> VisitorResult<Recursion<Self>> { + fn pre_visit_select_field_list( + self, + n: &FieldList, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("select_field_list", n))) } - fn post_visit_select_field_list(self, n: &FieldList) -> VisitorResult<Self> { + fn post_visit_select_field_list(self, n: &FieldList) -> Result<Self, Self::Error> { Ok(self.push_post("select_field_list", n)) } - fn pre_visit_select_field(self, n: &Field) -> VisitorResult<Recursion<Self>> { + fn pre_visit_select_field(self, n: &Field) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("select_field", n))) } - fn post_visit_select_field(self, n: &Field) -> VisitorResult<Self> { + fn post_visit_select_field(self, n: &Field) -> Result<Self, Self::Error> { Ok(self.push_post("select_field", n)) } fn pre_visit_select_from_clause( self, n: &FromMeasurementClause, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("select_from_clause", n))) } - fn post_visit_select_from_clause(self, n: &FromMeasurementClause) -> VisitorResult<Self> { + fn post_visit_select_from_clause( + self, + n: &FromMeasurementClause, + ) -> Result<Self, Self::Error> { Ok(self.push_post("select_from_clause", n)) } fn pre_visit_select_measurement_selection( self, n: &MeasurementSelection, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("select_measurement_selection", n))) } fn post_visit_select_measurement_selection( self, n: &MeasurementSelection, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self.push_post("select_measurement_selection", n)) } - fn pre_visit_group_by_clause(self, n: &GroupByClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_group_by_clause( + self, + n: &GroupByClause, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("group_by_clause", n))) } - fn post_visit_group_by_clause(self, n: &GroupByClause) -> VisitorResult<Self> { + fn post_visit_group_by_clause(self, n: &GroupByClause) -> Result<Self, Self::Error> { Ok(self.push_post("group_by_clause", n)) } - fn pre_visit_select_dimension(self, n: &Dimension) -> VisitorResult<Recursion<Self>> { + fn pre_visit_select_dimension(self, n: &Dimension) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("select_dimension", n))) } - fn post_visit_select_dimension(self, n: &Dimension) -> VisitorResult<Self> { + fn post_visit_select_dimension(self, n: &Dimension) -> Result<Self, Self::Error> { Ok(self.push_post("select_dimension", n)) } - fn pre_visit_where_clause(self, n: &WhereClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_where_clause(self, n: &WhereClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("where_clause", n))) } - fn post_visit_where_clause(self, n: &WhereClause) -> VisitorResult<Self> { + fn post_visit_where_clause(self, n: &WhereClause) -> Result<Self, Self::Error> { Ok(self.push_post("where_clause", n)) } - fn pre_visit_show_from_clause(self, n: &ShowFromClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_show_from_clause( + self, + n: &ShowFromClause, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("show_from_clause", n))) } - fn post_visit_show_from_clause(self, n: &ShowFromClause) -> VisitorResult<Self> { + fn post_visit_show_from_clause(self, n: &ShowFromClause) -> Result<Self, Self::Error> { Ok(self.push_post("show_from_clause", n)) } fn pre_visit_qualified_measurement_name( self, n: &QualifiedMeasurementName, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("qualified_measurement_name", n))) } fn post_visit_qualified_measurement_name( self, n: &QualifiedMeasurementName, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self.push_post("qualified_measurement_name", n)) } - fn pre_visit_fill_clause(self, n: &FillClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_fill_clause(self, n: &FillClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("fill_clause", n))) } - fn post_visit_fill_clause(self, n: &FillClause) -> VisitorResult<Self> { + fn post_visit_fill_clause(self, n: &FillClause) -> Result<Self, Self::Error> { Ok(self.push_post("fill_clause", n)) } - fn pre_visit_order_by_clause(self, n: &OrderByClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_order_by_clause( + self, + n: &OrderByClause, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("order_by_clause", n))) } - fn post_visit_order_by_clause(self, n: &OrderByClause) -> VisitorResult<Self> { + fn post_visit_order_by_clause(self, n: &OrderByClause) -> Result<Self, Self::Error> { Ok(self.push_post("order_by_clause", n)) } - fn pre_visit_limit_clause(self, n: &LimitClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_limit_clause(self, n: &LimitClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("limit_clause", n))) } - fn post_visit_limit_clause(self, n: &LimitClause) -> VisitorResult<Self> { + fn post_visit_limit_clause(self, n: &LimitClause) -> Result<Self, Self::Error> { Ok(self.push_post("limit_clause", n)) } - fn pre_visit_offset_clause(self, n: &OffsetClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_offset_clause(self, n: &OffsetClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("offset_clause", n))) } - fn post_visit_offset_clause(self, n: &OffsetClause) -> VisitorResult<Self> { + fn post_visit_offset_clause(self, n: &OffsetClause) -> Result<Self, Self::Error> { Ok(self.push_post("offset_clause", n)) } - fn pre_visit_slimit_clause(self, n: &SLimitClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_slimit_clause(self, n: &SLimitClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("slimit_clause", n))) } - fn post_visit_slimit_clause(self, n: &SLimitClause) -> VisitorResult<Self> { + fn post_visit_slimit_clause(self, n: &SLimitClause) -> Result<Self, Self::Error> { Ok(self.push_post("slimit_clause", n)) } - fn pre_visit_soffset_clause(self, n: &SOffsetClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_soffset_clause( + self, + n: &SOffsetClause, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("soffset_clause", n))) } - fn post_visit_soffset_clause(self, n: &SOffsetClause) -> VisitorResult<Self> { + fn post_visit_soffset_clause(self, n: &SOffsetClause) -> Result<Self, Self::Error> { Ok(self.push_post("soffset_clause", n)) } - fn pre_visit_timezone_clause(self, n: &TimeZoneClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_timezone_clause( + self, + n: &TimeZoneClause, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("timezone_clause", n))) } - fn post_visit_timezone_clause(self, n: &TimeZoneClause) -> VisitorResult<Self> { + fn post_visit_timezone_clause(self, n: &TimeZoneClause) -> Result<Self, Self::Error> { Ok(self.push_post("timezone_clause", n)) } fn pre_visit_extended_on_clause( self, n: &ExtendedOnClause, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("extended_on_clause", n))) } - fn post_visit_extended_on_clause(self, n: &ExtendedOnClause) -> VisitorResult<Self> { + fn post_visit_extended_on_clause(self, n: &ExtendedOnClause) -> Result<Self, Self::Error> { Ok(self.push_post("extended_on_clause", n)) } - fn pre_visit_on_clause(self, n: &OnClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_on_clause(self, n: &OnClause) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("on_clause", n))) } - fn post_visit_on_clause(self, n: &OnClause) -> VisitorResult<Self> { + fn post_visit_on_clause(self, n: &OnClause) -> Result<Self, Self::Error> { Ok(self.push_pre("on_clause", n)) } fn pre_visit_with_measurement_clause( self, n: &WithMeasurementClause, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("with_measurement_clause", n))) } fn post_visit_with_measurement_clause( self, n: &WithMeasurementClause, - ) -> VisitorResult<Self> { + ) -> Result<Self, Self::Error> { Ok(self.push_post("with_measurement_clause", n)) } - fn pre_visit_with_key_clause(self, n: &WithKeyClause) -> VisitorResult<Recursion<Self>> { + fn pre_visit_with_key_clause( + self, + n: &WithKeyClause, + ) -> Result<Recursion<Self>, Self::Error> { Ok(Continue(self.push_pre("with_key_clause", n))) } - fn post_visit_with_key_clause(self, n: &WithKeyClause) -> VisitorResult<Self> { + fn post_visit_with_key_clause(self, n: &WithKeyClause) -> Result<Self, Self::Error> { Ok(self.push_post("with_key_clause", n)) } } diff --git a/influxdb_influxql_parser/src/visit_mut.rs b/influxdb_influxql_parser/src/visit_mut.rs index 728a182c25..eeaa6fe8d5 100644 --- a/influxdb_influxql_parser/src/visit_mut.rs +++ b/influxdb_influxql_parser/src/visit_mut.rs @@ -3,14 +3,16 @@ //! # Example //! //! ``` -//! use influxdb_influxql_parser::visit_mut::{VisitableMut, VisitorMut, VisitorResult}; +//! use influxdb_influxql_parser::visit_mut::{VisitableMut, VisitorMut}; //! use influxdb_influxql_parser::parse_statements; //! use influxdb_influxql_parser::common::WhereClause; //! //! struct MyVisitor; //! //! impl VisitorMut for MyVisitor { -//! fn post_visit_where_clause(&mut self, n: &mut WhereClause) -> VisitorResult<()> { +//! type Error = (); +//! +//! fn post_visit_where_clause(&mut self, n: &mut WhereClause) -> Result<(), Self::Error> { //! println!("{}", n); //! Ok(()) //! } @@ -47,9 +49,6 @@ use crate::show_tag_values::{ShowTagValuesStatement, WithKeyClause}; use crate::simple_from_clause::{DeleteFromClause, ShowFromClause}; use crate::statement::Statement; -/// The result type for a [`VisitorMut`]. -pub type VisitorResult<T, E = &'static str> = Result<T, E>; - /// Controls how the visitor recursion should proceed. #[derive(Clone, Copy)] pub enum Recursion { @@ -64,13 +63,16 @@ pub enum Recursion { /// any [`VisitableMut::accept`], `pre_visit` functions are invoked repeatedly /// until a leaf node is reached or a `pre_visit` function returns [`Recursion::Stop`]. pub trait VisitorMut: Sized { + /// The type returned in the event of an error traversing the tree. + type Error; + /// Invoked before any children of the InfluxQL statement are visited. - fn pre_visit_statement(&mut self, _n: &mut Statement) -> VisitorResult<Recursion> { + fn pre_visit_statement(&mut self, _n: &mut Statement) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the InfluxQL statement are visited. - fn post_visit_statement(&mut self, _n: &mut Statement) -> VisitorResult<()> { + fn post_visit_statement(&mut self, _n: &mut Statement) -> Result<(), Self::Error> { Ok(()) } @@ -78,7 +80,7 @@ pub trait VisitorMut: Sized { fn pre_visit_create_database_statement( &mut self, _n: &mut CreateDatabaseStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { Ok(Continue) } @@ -87,17 +89,20 @@ pub trait VisitorMut: Sized { fn post_visit_create_database_statement( &mut self, _n: &mut CreateDatabaseStatement, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of the `DELETE` statement are visited. - fn pre_visit_delete_statement(&mut self, _n: &mut DeleteStatement) -> VisitorResult<Recursion> { + fn pre_visit_delete_statement( + &mut self, + _n: &mut DeleteStatement, + ) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the `DELETE` statement are visited. - fn post_visit_delete_statement(&mut self, _n: &mut DeleteStatement) -> VisitorResult<()> { + fn post_visit_delete_statement(&mut self, _n: &mut DeleteStatement) -> Result<(), Self::Error> { Ok(()) } @@ -105,22 +110,28 @@ pub trait VisitorMut: Sized { fn pre_visit_delete_from_clause( &mut self, _n: &mut DeleteFromClause, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the `FROM` clause of a `DELETE` statement are visited. - fn post_visit_delete_from_clause(&mut self, _n: &mut DeleteFromClause) -> VisitorResult<()> { + fn post_visit_delete_from_clause( + &mut self, + _n: &mut DeleteFromClause, + ) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of the measurement name are visited. - fn pre_visit_measurement_name(&mut self, _n: &mut MeasurementName) -> VisitorResult<Recursion> { + fn pre_visit_measurement_name( + &mut self, + _n: &mut MeasurementName, + ) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the measurement name are visited. - fn post_visit_measurement_name(&mut self, _n: &mut MeasurementName) -> VisitorResult<()> { + fn post_visit_measurement_name(&mut self, _n: &mut MeasurementName) -> Result<(), Self::Error> { Ok(()) } @@ -128,7 +139,7 @@ pub trait VisitorMut: Sized { fn pre_visit_drop_measurement_statement( &mut self, _n: &mut DropMeasurementStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { Ok(Continue) } @@ -136,7 +147,7 @@ pub trait VisitorMut: Sized { fn post_visit_drop_measurement_statement( &mut self, _n: &mut DropMeasurementStatement, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { Ok(()) } @@ -144,22 +155,28 @@ pub trait VisitorMut: Sized { fn pre_visit_explain_statement( &mut self, _n: &mut ExplainStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the `EXPLAIN` statement are visited. - fn post_visit_explain_statement(&mut self, _n: &mut ExplainStatement) -> VisitorResult<()> { + fn post_visit_explain_statement( + &mut self, + _n: &mut ExplainStatement, + ) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of the `SELECT` statement are visited. - fn pre_visit_select_statement(&mut self, _n: &mut SelectStatement) -> VisitorResult<Recursion> { + fn pre_visit_select_statement( + &mut self, + _n: &mut SelectStatement, + ) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the `SELECT` statement are visited. - fn post_visit_select_statement(&mut self, _n: &mut SelectStatement) -> VisitorResult<()> { + fn post_visit_select_statement(&mut self, _n: &mut SelectStatement) -> Result<(), Self::Error> { Ok(()) } @@ -167,7 +184,7 @@ pub trait VisitorMut: Sized { fn pre_visit_show_databases_statement( &mut self, _n: &mut ShowDatabasesStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { Ok(Continue) } @@ -175,7 +192,7 @@ pub trait VisitorMut: Sized { fn post_visit_show_databases_statement( &mut self, _n: &mut ShowDatabasesStatement, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { Ok(()) } @@ -183,7 +200,7 @@ pub trait VisitorMut: Sized { fn pre_visit_show_measurements_statement( &mut self, _n: &mut ShowMeasurementsStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { Ok(Continue) } @@ -191,7 +208,7 @@ pub trait VisitorMut: Sized { fn post_visit_show_measurements_statement( &mut self, _n: &mut ShowMeasurementsStatement, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { Ok(()) } @@ -199,7 +216,7 @@ pub trait VisitorMut: Sized { fn pre_visit_show_retention_policies_statement( &mut self, _n: &mut ShowRetentionPoliciesStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { Ok(Continue) } @@ -207,7 +224,7 @@ pub trait VisitorMut: Sized { fn post_visit_show_retention_policies_statement( &mut self, _n: &mut ShowRetentionPoliciesStatement, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { Ok(()) } @@ -215,7 +232,7 @@ pub trait VisitorMut: Sized { fn pre_visit_show_tag_keys_statement( &mut self, _n: &mut ShowTagKeysStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { Ok(Continue) } @@ -223,7 +240,7 @@ pub trait VisitorMut: Sized { fn post_visit_show_tag_keys_statement( &mut self, _n: &mut ShowTagKeysStatement, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { Ok(()) } @@ -231,7 +248,7 @@ pub trait VisitorMut: Sized { fn pre_visit_show_tag_values_statement( &mut self, _n: &mut ShowTagValuesStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { Ok(Continue) } @@ -239,7 +256,7 @@ pub trait VisitorMut: Sized { fn post_visit_show_tag_values_statement( &mut self, _n: &mut ShowTagValuesStatement, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { Ok(()) } @@ -247,7 +264,7 @@ pub trait VisitorMut: Sized { fn pre_visit_show_field_keys_statement( &mut self, _n: &mut ShowFieldKeysStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { Ok(Continue) } @@ -255,7 +272,7 @@ pub trait VisitorMut: Sized { fn post_visit_show_field_keys_statement( &mut self, _n: &mut ShowFieldKeysStatement, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { Ok(()) } @@ -263,7 +280,7 @@ pub trait VisitorMut: Sized { fn pre_visit_conditional_expression( &mut self, _n: &mut ConditionalExpression, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { Ok(Continue) } @@ -271,37 +288,40 @@ pub trait VisitorMut: Sized { fn post_visit_conditional_expression( &mut self, _n: &mut ConditionalExpression, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of the arithmetic expression are visited. - fn pre_visit_expr(&mut self, _n: &mut Expr) -> VisitorResult<Recursion> { + fn pre_visit_expr(&mut self, _n: &mut Expr) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the arithmetic expression are visited. - fn post_visit_expr(&mut self, _n: &mut Expr) -> VisitorResult<()> { + fn post_visit_expr(&mut self, _n: &mut Expr) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any fields of the `SELECT` projection are visited. - fn pre_visit_select_field_list(&mut self, _n: &mut FieldList) -> VisitorResult<Recursion> { + fn pre_visit_select_field_list( + &mut self, + _n: &mut FieldList, + ) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all fields of the `SELECT` projection are visited. - fn post_visit_select_field_list(&mut self, _n: &mut FieldList) -> VisitorResult<()> { + fn post_visit_select_field_list(&mut self, _n: &mut FieldList) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of the field of a `SELECT` statement are visited. - fn pre_visit_select_field(&mut self, _n: &mut Field) -> VisitorResult<Recursion> { + fn pre_visit_select_field(&mut self, _n: &mut Field) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the field of a `SELECT` statement are visited. - fn post_visit_select_field(&mut self, _n: &mut Field) -> VisitorResult<()> { + fn post_visit_select_field(&mut self, _n: &mut Field) -> Result<(), Self::Error> { Ok(()) } @@ -309,7 +329,7 @@ pub trait VisitorMut: Sized { fn pre_visit_select_from_clause( &mut self, _n: &mut FromMeasurementClause, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { Ok(Continue) } @@ -317,7 +337,7 @@ pub trait VisitorMut: Sized { fn post_visit_select_from_clause( &mut self, _n: &mut FromMeasurementClause, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { Ok(()) } @@ -325,7 +345,7 @@ pub trait VisitorMut: Sized { fn pre_visit_select_measurement_selection( &mut self, _n: &mut MeasurementSelection, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { Ok(Continue) } @@ -333,47 +353,53 @@ pub trait VisitorMut: Sized { fn post_visit_select_measurement_selection( &mut self, _n: &mut MeasurementSelection, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of the `GROUP BY` clause are visited. - fn pre_visit_group_by_clause(&mut self, _n: &mut GroupByClause) -> VisitorResult<Recursion> { + fn pre_visit_group_by_clause( + &mut self, + _n: &mut GroupByClause, + ) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the `GROUP BY` clause are visited. - fn post_visit_group_by_clause(&mut self, _n: &mut GroupByClause) -> VisitorResult<()> { + fn post_visit_group_by_clause(&mut self, _n: &mut GroupByClause) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of the `GROUP BY` dimension expression are visited. - fn pre_visit_select_dimension(&mut self, _n: &mut Dimension) -> VisitorResult<Recursion> { + fn pre_visit_select_dimension(&mut self, _n: &mut Dimension) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the `GROUP BY` dimension expression are visited. - fn post_visit_select_dimension(&mut self, _n: &mut Dimension) -> VisitorResult<()> { + fn post_visit_select_dimension(&mut self, _n: &mut Dimension) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of the `WHERE` clause are visited. - fn pre_visit_where_clause(&mut self, _n: &mut WhereClause) -> VisitorResult<Recursion> { + fn pre_visit_where_clause(&mut self, _n: &mut WhereClause) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the `WHERE` clause are visited. - fn post_visit_where_clause(&mut self, _n: &mut WhereClause) -> VisitorResult<()> { + fn post_visit_where_clause(&mut self, _n: &mut WhereClause) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of the `FROM` clause for any `SHOW` statement are visited. - fn pre_visit_show_from_clause(&mut self, _n: &mut ShowFromClause) -> VisitorResult<Recursion> { + fn pre_visit_show_from_clause( + &mut self, + _n: &mut ShowFromClause, + ) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the `FROM` clause for any `SHOW` statement are visited. - fn post_visit_show_from_clause(&mut self, _n: &mut ShowFromClause) -> VisitorResult<()> { + fn post_visit_show_from_clause(&mut self, _n: &mut ShowFromClause) -> Result<(), Self::Error> { Ok(()) } @@ -381,7 +407,7 @@ pub trait VisitorMut: Sized { fn pre_visit_qualified_measurement_name( &mut self, _n: &mut QualifiedMeasurementName, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { Ok(Continue) } @@ -389,77 +415,86 @@ pub trait VisitorMut: Sized { fn post_visit_qualified_measurement_name( &mut self, _n: &mut QualifiedMeasurementName, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of the `FILL` clause are visited. - fn pre_visit_fill_clause(&mut self, _n: &mut FillClause) -> VisitorResult<Recursion> { + fn pre_visit_fill_clause(&mut self, _n: &mut FillClause) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the `FILL` clause are visited. - fn post_visit_fill_clause(&mut self, _n: &mut FillClause) -> VisitorResult<()> { + fn post_visit_fill_clause(&mut self, _n: &mut FillClause) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of the `ORDER BY` clause are visited. - fn pre_visit_order_by_clause(&mut self, _n: &mut OrderByClause) -> VisitorResult<Recursion> { + fn pre_visit_order_by_clause( + &mut self, + _n: &mut OrderByClause, + ) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the `ORDER BY` clause are visited. - fn post_visit_order_by_clause(&mut self, _n: &mut OrderByClause) -> VisitorResult<()> { + fn post_visit_order_by_clause(&mut self, _n: &mut OrderByClause) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of the `LIMIT` clause are visited. - fn pre_visit_limit_clause(&mut self, _n: &mut LimitClause) -> VisitorResult<Recursion> { + fn pre_visit_limit_clause(&mut self, _n: &mut LimitClause) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the `LIMIT` clause are visited. - fn post_visit_limit_clause(&mut self, _n: &mut LimitClause) -> VisitorResult<()> { + fn post_visit_limit_clause(&mut self, _n: &mut LimitClause) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of the `OFFSET` clause are visited. - fn pre_visit_offset_clause(&mut self, _n: &mut OffsetClause) -> VisitorResult<Recursion> { + fn pre_visit_offset_clause(&mut self, _n: &mut OffsetClause) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the `OFFSET` clause are visited. - fn post_visit_offset_clause(&mut self, _n: &mut OffsetClause) -> VisitorResult<()> { + fn post_visit_offset_clause(&mut self, _n: &mut OffsetClause) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of the `SLIMIT` clause are visited. - fn pre_visit_slimit_clause(&mut self, _n: &mut SLimitClause) -> VisitorResult<Recursion> { + fn pre_visit_slimit_clause(&mut self, _n: &mut SLimitClause) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the `SLIMIT` clause are visited. - fn post_visit_slimit_clause(&mut self, _n: &mut SLimitClause) -> VisitorResult<()> { + fn post_visit_slimit_clause(&mut self, _n: &mut SLimitClause) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of the `SOFFSET` clause are visited. - fn pre_visit_soffset_clause(&mut self, _n: &mut SOffsetClause) -> VisitorResult<Recursion> { + fn pre_visit_soffset_clause( + &mut self, + _n: &mut SOffsetClause, + ) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of the `SOFFSET` clause are visited. - fn post_visit_soffset_clause(&mut self, _n: &mut SOffsetClause) -> VisitorResult<()> { + fn post_visit_soffset_clause(&mut self, _n: &mut SOffsetClause) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of a `TZ` clause are visited. - fn pre_visit_timezone_clause(&mut self, _n: &mut TimeZoneClause) -> VisitorResult<Recursion> { + fn pre_visit_timezone_clause( + &mut self, + _n: &mut TimeZoneClause, + ) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of a `TZ` clause are visited. - fn post_visit_timezone_clause(&mut self, _n: &mut TimeZoneClause) -> VisitorResult<()> { + fn post_visit_timezone_clause(&mut self, _n: &mut TimeZoneClause) -> Result<(), Self::Error> { Ok(()) } @@ -467,22 +502,25 @@ pub trait VisitorMut: Sized { fn pre_visit_extended_on_clause( &mut self, _n: &mut ExtendedOnClause, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of an extended `ON` clause are visited. - fn post_visit_extended_on_clause(&mut self, _n: &mut ExtendedOnClause) -> VisitorResult<()> { + fn post_visit_extended_on_clause( + &mut self, + _n: &mut ExtendedOnClause, + ) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of an `ON` clause are visited. - fn pre_visit_on_clause(&mut self, _n: &mut OnClause) -> VisitorResult<Recursion> { + fn pre_visit_on_clause(&mut self, _n: &mut OnClause) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of an `ON` clause are visited. - fn post_visit_on_clause(&mut self, _n: &mut OnClause) -> VisitorResult<()> { + fn post_visit_on_clause(&mut self, _n: &mut OnClause) -> Result<(), Self::Error> { Ok(()) } @@ -490,7 +528,7 @@ pub trait VisitorMut: Sized { fn pre_visit_with_measurement_clause( &mut self, _n: &mut WithMeasurementClause, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { Ok(Continue) } @@ -498,17 +536,20 @@ pub trait VisitorMut: Sized { fn post_visit_with_measurement_clause( &mut self, _n: &mut WithMeasurementClause, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { Ok(()) } /// Invoked before any children of a `WITH KEY` clause are visited. - fn pre_visit_with_key_clause(&mut self, _n: &mut WithKeyClause) -> VisitorResult<Recursion> { + fn pre_visit_with_key_clause( + &mut self, + _n: &mut WithKeyClause, + ) -> Result<Recursion, Self::Error> { Ok(Continue) } /// Invoked after all children of a `WITH KEY` clause are visited. - fn post_visit_with_key_clause(&mut self, _n: &mut WithKeyClause) -> VisitorResult<()> { + fn post_visit_with_key_clause(&mut self, _n: &mut WithKeyClause) -> Result<(), Self::Error> { Ok(()) } } @@ -516,11 +557,11 @@ pub trait VisitorMut: Sized { /// Trait for types that can be visited by [`VisitorMut`] pub trait VisitableMut: Sized { /// accept a visitor, calling `visit` on all children of this - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()>; + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error>; } impl VisitableMut for Statement { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_statement(self)? { return Ok(()); }; @@ -544,7 +585,7 @@ impl VisitableMut for Statement { } impl VisitableMut for CreateDatabaseStatement { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_create_database_statement(self)? { return Ok(()); }; @@ -554,7 +595,7 @@ impl VisitableMut for CreateDatabaseStatement { } impl VisitableMut for DeleteStatement { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_delete_statement(self)? { return Ok(()); }; @@ -575,7 +616,7 @@ impl VisitableMut for DeleteStatement { } impl VisitableMut for WhereClause { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_where_clause(self)? { return Ok(()); }; @@ -587,7 +628,7 @@ impl VisitableMut for WhereClause { } impl VisitableMut for DeleteFromClause { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_delete_from_clause(self)? { return Ok(()); }; @@ -601,7 +642,7 @@ impl VisitableMut for DeleteFromClause { } impl VisitableMut for MeasurementName { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_measurement_name(self)? { return Ok(()); }; @@ -611,7 +652,7 @@ impl VisitableMut for MeasurementName { } impl VisitableMut for DropMeasurementStatement { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_drop_measurement_statement(self)? { return Ok(()); }; @@ -621,7 +662,7 @@ impl VisitableMut for DropMeasurementStatement { } impl VisitableMut for ExplainStatement { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_explain_statement(self)? { return Ok(()); }; @@ -633,7 +674,7 @@ impl VisitableMut for ExplainStatement { } impl VisitableMut for SelectStatement { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_select_statement(self)? { return Ok(()); }; @@ -683,7 +724,7 @@ impl VisitableMut for SelectStatement { } impl VisitableMut for TimeZoneClause { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_timezone_clause(self)? { return Ok(()); }; @@ -693,7 +734,7 @@ impl VisitableMut for TimeZoneClause { } impl VisitableMut for LimitClause { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_limit_clause(self)? { return Ok(()); }; @@ -703,7 +744,7 @@ impl VisitableMut for LimitClause { } impl VisitableMut for OffsetClause { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_offset_clause(self)? { return Ok(()); }; @@ -713,7 +754,7 @@ impl VisitableMut for OffsetClause { } impl VisitableMut for SLimitClause { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_slimit_clause(self)? { return Ok(()); }; @@ -723,7 +764,7 @@ impl VisitableMut for SLimitClause { } impl VisitableMut for SOffsetClause { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_soffset_clause(self)? { return Ok(()); }; @@ -733,7 +774,7 @@ impl VisitableMut for SOffsetClause { } impl VisitableMut for FillClause { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_fill_clause(self)? { return Ok(()); }; @@ -743,7 +784,7 @@ impl VisitableMut for FillClause { } impl VisitableMut for OrderByClause { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_order_by_clause(self)? { return Ok(()); }; @@ -753,7 +794,7 @@ impl VisitableMut for OrderByClause { } impl VisitableMut for GroupByClause { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_group_by_clause(self)? { return Ok(()); }; @@ -767,7 +808,7 @@ impl VisitableMut for GroupByClause { } impl VisitableMut for ShowMeasurementsStatement { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_show_measurements_statement(self)? { return Ok(()); }; @@ -797,7 +838,7 @@ impl VisitableMut for ShowMeasurementsStatement { } impl VisitableMut for ExtendedOnClause { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_extended_on_clause(self)? { return Ok(()); }; @@ -807,7 +848,7 @@ impl VisitableMut for ExtendedOnClause { } impl VisitableMut for WithMeasurementClause { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_with_measurement_clause(self)? { return Ok(()); }; @@ -822,7 +863,7 @@ impl VisitableMut for WithMeasurementClause { } impl VisitableMut for ShowRetentionPoliciesStatement { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_show_retention_policies_statement(self)? { return Ok(()); }; @@ -836,7 +877,7 @@ impl VisitableMut for ShowRetentionPoliciesStatement { } impl VisitableMut for ShowFromClause { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_show_from_clause(self)? { return Ok(()); }; @@ -850,7 +891,7 @@ impl VisitableMut for ShowFromClause { } impl VisitableMut for QualifiedMeasurementName { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_qualified_measurement_name(self)? { return Ok(()); }; @@ -862,7 +903,7 @@ impl VisitableMut for QualifiedMeasurementName { } impl VisitableMut for ShowTagKeysStatement { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_show_tag_keys_statement(self)? { return Ok(()); }; @@ -892,7 +933,7 @@ impl VisitableMut for ShowTagKeysStatement { } impl VisitableMut for ShowTagValuesStatement { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_show_tag_values_statement(self)? { return Ok(()); }; @@ -924,7 +965,7 @@ impl VisitableMut for ShowTagValuesStatement { } impl VisitableMut for ShowFieldKeysStatement { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_show_field_keys_statement(self)? { return Ok(()); }; @@ -950,7 +991,7 @@ impl VisitableMut for ShowFieldKeysStatement { } impl VisitableMut for FieldList { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_select_field_list(self)? { return Ok(()); }; @@ -964,7 +1005,7 @@ impl VisitableMut for FieldList { } impl VisitableMut for Field { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_select_field(self)? { return Ok(()); }; @@ -976,7 +1017,7 @@ impl VisitableMut for Field { } impl VisitableMut for FromMeasurementClause { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_select_from_clause(self)? { return Ok(()); }; @@ -990,7 +1031,7 @@ impl VisitableMut for FromMeasurementClause { } impl VisitableMut for MeasurementSelection { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_select_measurement_selection(self)? { return Ok(()); }; @@ -1005,7 +1046,7 @@ impl VisitableMut for MeasurementSelection { } impl VisitableMut for Dimension { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_select_dimension(self)? { return Ok(()); }; @@ -1025,7 +1066,7 @@ impl VisitableMut for Dimension { } impl VisitableMut for WithKeyClause { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_with_key_clause(self)? { return Ok(()); }; @@ -1035,7 +1076,7 @@ impl VisitableMut for WithKeyClause { } impl VisitableMut for ShowDatabasesStatement { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_show_databases_statement(self)? { return Ok(()); }; @@ -1044,7 +1085,7 @@ impl VisitableMut for ShowDatabasesStatement { } impl VisitableMut for ConditionalExpression { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_conditional_expression(self)? { return Ok(()); }; @@ -1063,7 +1104,7 @@ impl VisitableMut for ConditionalExpression { } impl VisitableMut for Expr { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_expr(self)? { return Ok(()); }; @@ -1091,7 +1132,7 @@ impl VisitableMut for Expr { } impl VisitableMut for OnClause { - fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> VisitorResult<()> { + fn accept<V: VisitorMut>(&mut self, visitor: &mut V) -> Result<(), V::Error> { if let Stop = visitor.pre_visit_on_clause(self)? { return Ok(()); }; @@ -1103,7 +1144,7 @@ impl VisitableMut for OnClause { #[cfg(test)] mod test { use super::Recursion::Continue; - use super::{Recursion, VisitableMut, VisitorMut, VisitorResult}; + use super::{Recursion, VisitableMut, VisitorMut}; use crate::common::{ LimitClause, MeasurementName, OffsetClause, OrderByClause, QualifiedMeasurementName, WhereClause, @@ -1147,12 +1188,14 @@ mod test { } impl VisitorMut for TestVisitor { - fn pre_visit_statement(&mut self, n: &mut Statement) -> VisitorResult<Recursion> { + type Error = (); + + fn pre_visit_statement(&mut self, n: &mut Statement) -> Result<Recursion, Self::Error> { self.push_pre("statement", n); Ok(Continue) } - fn post_visit_statement(&mut self, n: &mut Statement) -> VisitorResult<()> { + fn post_visit_statement(&mut self, n: &mut Statement) -> Result<(), Self::Error> { self.push_post("statement", n); Ok(()) } @@ -1160,12 +1203,15 @@ mod test { fn pre_visit_delete_statement( &mut self, n: &mut DeleteStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("delete_statement", n); Ok(Continue) } - fn post_visit_delete_statement(&mut self, n: &mut DeleteStatement) -> VisitorResult<()> { + fn post_visit_delete_statement( + &mut self, + n: &mut DeleteStatement, + ) -> Result<(), Self::Error> { self.push_post("delete_statement", n); Ok(()) } @@ -1173,12 +1219,15 @@ mod test { fn pre_visit_delete_from_clause( &mut self, n: &mut DeleteFromClause, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("delete_from", n); Ok(Continue) } - fn post_visit_delete_from_clause(&mut self, n: &mut DeleteFromClause) -> VisitorResult<()> { + fn post_visit_delete_from_clause( + &mut self, + n: &mut DeleteFromClause, + ) -> Result<(), Self::Error> { self.push_post("delete_from", n); Ok(()) } @@ -1186,12 +1235,15 @@ mod test { fn pre_visit_measurement_name( &mut self, n: &mut MeasurementName, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("measurement_name", n); Ok(Continue) } - fn post_visit_measurement_name(&mut self, n: &mut MeasurementName) -> VisitorResult<()> { + fn post_visit_measurement_name( + &mut self, + n: &mut MeasurementName, + ) -> Result<(), Self::Error> { self.push_post("measurement_name", n); Ok(()) } @@ -1199,7 +1251,7 @@ mod test { fn pre_visit_drop_measurement_statement( &mut self, n: &mut DropMeasurementStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("drop_measurement_statement", n); Ok(Continue) } @@ -1207,7 +1259,7 @@ mod test { fn post_visit_drop_measurement_statement( &mut self, n: &mut DropMeasurementStatement, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { self.push_post("drop_measurement_statement", n); Ok(()) } @@ -1215,12 +1267,15 @@ mod test { fn pre_visit_explain_statement( &mut self, n: &mut ExplainStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("explain_statement", n); Ok(Continue) } - fn post_visit_explain_statement(&mut self, n: &mut ExplainStatement) -> VisitorResult<()> { + fn post_visit_explain_statement( + &mut self, + n: &mut ExplainStatement, + ) -> Result<(), Self::Error> { self.push_post("explain_statement", n); Ok(()) } @@ -1228,12 +1283,15 @@ mod test { fn pre_visit_select_statement( &mut self, n: &mut SelectStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("select_statement", n); Ok(Continue) } - fn post_visit_select_statement(&mut self, n: &mut SelectStatement) -> VisitorResult<()> { + fn post_visit_select_statement( + &mut self, + n: &mut SelectStatement, + ) -> Result<(), Self::Error> { self.push_post("select_statement", n); Ok(()) } @@ -1241,7 +1299,7 @@ mod test { fn pre_visit_show_databases_statement( &mut self, n: &mut ShowDatabasesStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("show_databases_statement", n); Ok(Continue) } @@ -1249,7 +1307,7 @@ mod test { fn post_visit_show_databases_statement( &mut self, n: &mut ShowDatabasesStatement, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { self.push_post("show_databases_statement", n); Ok(()) } @@ -1257,7 +1315,7 @@ mod test { fn pre_visit_show_measurements_statement( &mut self, n: &mut ShowMeasurementsStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("show_measurements_statement", n); Ok(Continue) } @@ -1265,7 +1323,7 @@ mod test { fn post_visit_show_measurements_statement( &mut self, n: &mut ShowMeasurementsStatement, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { self.push_post("show_measurements_statement", n); Ok(()) } @@ -1273,7 +1331,7 @@ mod test { fn pre_visit_show_retention_policies_statement( &mut self, n: &mut ShowRetentionPoliciesStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("show_retention_policies_statement", n); Ok(Continue) } @@ -1281,7 +1339,7 @@ mod test { fn post_visit_show_retention_policies_statement( &mut self, n: &mut ShowRetentionPoliciesStatement, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { self.push_post("show_retention_policies_statement", n); Ok(()) } @@ -1289,7 +1347,7 @@ mod test { fn pre_visit_show_tag_keys_statement( &mut self, n: &mut ShowTagKeysStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("show_tag_keys_statement", n); Ok(Continue) } @@ -1297,7 +1355,7 @@ mod test { fn post_visit_show_tag_keys_statement( &mut self, n: &mut ShowTagKeysStatement, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { self.push_post("show_tag_keys_statement", n); Ok(()) } @@ -1305,7 +1363,7 @@ mod test { fn pre_visit_show_tag_values_statement( &mut self, n: &mut ShowTagValuesStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("show_tag_values_statement", n); Ok(Continue) } @@ -1313,7 +1371,7 @@ mod test { fn post_visit_show_tag_values_statement( &mut self, n: &mut ShowTagValuesStatement, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { self.push_post("show_tag_values_statement", n); Ok(()) } @@ -1321,7 +1379,7 @@ mod test { fn pre_visit_show_field_keys_statement( &mut self, n: &mut ShowFieldKeysStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("show_field_keys_statement", n); Ok(Continue) } @@ -1329,7 +1387,7 @@ mod test { fn post_visit_show_field_keys_statement( &mut self, n: &mut ShowFieldKeysStatement, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { self.push_post("show_field_keys_statement", n); Ok(()) } @@ -1337,7 +1395,7 @@ mod test { fn pre_visit_conditional_expression( &mut self, n: &mut ConditionalExpression, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("conditional_expression", n); Ok(Continue) } @@ -1345,37 +1403,40 @@ mod test { fn post_visit_conditional_expression( &mut self, n: &mut ConditionalExpression, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { self.push_post("conditional_expression", n); Ok(()) } - fn pre_visit_expr(&mut self, n: &mut Expr) -> VisitorResult<Recursion> { + fn pre_visit_expr(&mut self, n: &mut Expr) -> Result<Recursion, Self::Error> { self.push_pre("expr", n); Ok(Continue) } - fn post_visit_expr(&mut self, n: &mut Expr) -> VisitorResult<()> { + fn post_visit_expr(&mut self, n: &mut Expr) -> Result<(), Self::Error> { self.push_post("expr", n); Ok(()) } - fn pre_visit_select_field_list(&mut self, n: &mut FieldList) -> VisitorResult<Recursion> { + fn pre_visit_select_field_list( + &mut self, + n: &mut FieldList, + ) -> Result<Recursion, Self::Error> { self.push_pre("select_field_list", n); Ok(Continue) } - fn post_visit_select_field_list(&mut self, n: &mut FieldList) -> VisitorResult<()> { + fn post_visit_select_field_list(&mut self, n: &mut FieldList) -> Result<(), Self::Error> { self.push_post("select_field_list", n); Ok(()) } - fn pre_visit_select_field(&mut self, n: &mut Field) -> VisitorResult<Recursion> { + fn pre_visit_select_field(&mut self, n: &mut Field) -> Result<Recursion, Self::Error> { self.push_pre("select_field", n); Ok(Continue) } - fn post_visit_select_field(&mut self, n: &mut Field) -> VisitorResult<()> { + fn post_visit_select_field(&mut self, n: &mut Field) -> Result<(), Self::Error> { self.push_post("select_field", n); Ok(()) } @@ -1383,7 +1444,7 @@ mod test { fn pre_visit_select_from_clause( &mut self, n: &mut FromMeasurementClause, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("select_from_clause", n); Ok(Continue) } @@ -1391,7 +1452,7 @@ mod test { fn post_visit_select_from_clause( &mut self, n: &mut FromMeasurementClause, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { self.push_post("select_from_clause", n); Ok(()) } @@ -1399,7 +1460,7 @@ mod test { fn pre_visit_select_measurement_selection( &mut self, n: &mut MeasurementSelection, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("select_measurement_selection", n); Ok(Continue) } @@ -1407,37 +1468,46 @@ mod test { fn post_visit_select_measurement_selection( &mut self, n: &mut MeasurementSelection, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { self.push_post("select_measurement_selection", n); Ok(()) } - fn pre_visit_group_by_clause(&mut self, n: &mut GroupByClause) -> VisitorResult<Recursion> { + fn pre_visit_group_by_clause( + &mut self, + n: &mut GroupByClause, + ) -> Result<Recursion, Self::Error> { self.push_pre("group_by_clause", n); Ok(Continue) } - fn post_visit_group_by_clause(&mut self, n: &mut GroupByClause) -> VisitorResult<()> { + fn post_visit_group_by_clause(&mut self, n: &mut GroupByClause) -> Result<(), Self::Error> { self.push_post("group_by_clause", n); Ok(()) } - fn pre_visit_select_dimension(&mut self, n: &mut Dimension) -> VisitorResult<Recursion> { + fn pre_visit_select_dimension( + &mut self, + n: &mut Dimension, + ) -> Result<Recursion, Self::Error> { self.push_pre("select_dimension", n); Ok(Continue) } - fn post_visit_select_dimension(&mut self, n: &mut Dimension) -> VisitorResult<()> { + fn post_visit_select_dimension(&mut self, n: &mut Dimension) -> Result<(), Self::Error> { self.push_post("select_dimension", n); Ok(()) } - fn pre_visit_where_clause(&mut self, n: &mut WhereClause) -> VisitorResult<Recursion> { + fn pre_visit_where_clause( + &mut self, + n: &mut WhereClause, + ) -> Result<Recursion, Self::Error> { self.push_pre("where_clause", n); Ok(Continue) } - fn post_visit_where_clause(&mut self, n: &mut WhereClause) -> VisitorResult<()> { + fn post_visit_where_clause(&mut self, n: &mut WhereClause) -> Result<(), Self::Error> { self.push_post("where_clause", n); Ok(()) } @@ -1445,12 +1515,15 @@ mod test { fn pre_visit_show_from_clause( &mut self, n: &mut ShowFromClause, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("show_from_clause", n); Ok(Continue) } - fn post_visit_show_from_clause(&mut self, n: &mut ShowFromClause) -> VisitorResult<()> { + fn post_visit_show_from_clause( + &mut self, + n: &mut ShowFromClause, + ) -> Result<(), Self::Error> { self.push_post("show_from_clause", n); Ok(()) } @@ -1458,7 +1531,7 @@ mod test { fn pre_visit_qualified_measurement_name( &mut self, n: &mut QualifiedMeasurementName, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("qualified_measurement_name", n); Ok(Continue) } @@ -1466,67 +1539,82 @@ mod test { fn post_visit_qualified_measurement_name( &mut self, n: &mut QualifiedMeasurementName, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { self.push_post("qualified_measurement_name", n); Ok(()) } - fn pre_visit_fill_clause(&mut self, n: &mut FillClause) -> VisitorResult<Recursion> { + fn pre_visit_fill_clause(&mut self, n: &mut FillClause) -> Result<Recursion, Self::Error> { self.push_pre("fill_clause", n); Ok(Continue) } - fn post_visit_fill_clause(&mut self, n: &mut FillClause) -> VisitorResult<()> { + fn post_visit_fill_clause(&mut self, n: &mut FillClause) -> Result<(), Self::Error> { self.push_post("fill_clause", n); Ok(()) } - fn pre_visit_order_by_clause(&mut self, n: &mut OrderByClause) -> VisitorResult<Recursion> { + fn pre_visit_order_by_clause( + &mut self, + n: &mut OrderByClause, + ) -> Result<Recursion, Self::Error> { self.push_pre("order_by_clause", n); Ok(Continue) } - fn post_visit_order_by_clause(&mut self, n: &mut OrderByClause) -> VisitorResult<()> { + fn post_visit_order_by_clause(&mut self, n: &mut OrderByClause) -> Result<(), Self::Error> { self.push_post("order_by_clause", n); Ok(()) } - fn pre_visit_limit_clause(&mut self, n: &mut LimitClause) -> VisitorResult<Recursion> { + fn pre_visit_limit_clause( + &mut self, + n: &mut LimitClause, + ) -> Result<Recursion, Self::Error> { self.push_pre("limit_clause", n); Ok(Continue) } - fn post_visit_limit_clause(&mut self, n: &mut LimitClause) -> VisitorResult<()> { + fn post_visit_limit_clause(&mut self, n: &mut LimitClause) -> Result<(), Self::Error> { self.push_post("limit_clause", n); Ok(()) } - fn pre_visit_offset_clause(&mut self, n: &mut OffsetClause) -> VisitorResult<Recursion> { + fn pre_visit_offset_clause( + &mut self, + n: &mut OffsetClause, + ) -> Result<Recursion, Self::Error> { self.push_pre("offset_clause", n); Ok(Continue) } - fn post_visit_offset_clause(&mut self, n: &mut OffsetClause) -> VisitorResult<()> { + fn post_visit_offset_clause(&mut self, n: &mut OffsetClause) -> Result<(), Self::Error> { self.push_post("offset_clause", n); Ok(()) } - fn pre_visit_slimit_clause(&mut self, n: &mut SLimitClause) -> VisitorResult<Recursion> { + fn pre_visit_slimit_clause( + &mut self, + n: &mut SLimitClause, + ) -> Result<Recursion, Self::Error> { self.push_pre("slimit_clause", n); Ok(Continue) } - fn post_visit_slimit_clause(&mut self, n: &mut SLimitClause) -> VisitorResult<()> { + fn post_visit_slimit_clause(&mut self, n: &mut SLimitClause) -> Result<(), Self::Error> { self.push_post("slimit_clause", n); Ok(()) } - fn pre_visit_soffset_clause(&mut self, n: &mut SOffsetClause) -> VisitorResult<Recursion> { + fn pre_visit_soffset_clause( + &mut self, + n: &mut SOffsetClause, + ) -> Result<Recursion, Self::Error> { self.push_pre("soffset_clause", n); Ok(Continue) } - fn post_visit_soffset_clause(&mut self, n: &mut SOffsetClause) -> VisitorResult<()> { + fn post_visit_soffset_clause(&mut self, n: &mut SOffsetClause) -> Result<(), Self::Error> { self.push_post("soffset_clause", n); Ok(()) } @@ -1534,12 +1622,15 @@ mod test { fn pre_visit_timezone_clause( &mut self, n: &mut TimeZoneClause, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("timezone_clause", n); Ok(Continue) } - fn post_visit_timezone_clause(&mut self, n: &mut TimeZoneClause) -> VisitorResult<()> { + fn post_visit_timezone_clause( + &mut self, + n: &mut TimeZoneClause, + ) -> Result<(), Self::Error> { self.push_post("timezone_clause", n); Ok(()) } @@ -1547,22 +1638,25 @@ mod test { fn pre_visit_extended_on_clause( &mut self, n: &mut ExtendedOnClause, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("extended_on_clause", n); Ok(Continue) } - fn post_visit_extended_on_clause(&mut self, n: &mut ExtendedOnClause) -> VisitorResult<()> { + fn post_visit_extended_on_clause( + &mut self, + n: &mut ExtendedOnClause, + ) -> Result<(), Self::Error> { self.push_post("extended_on_clause", n); Ok(()) } - fn pre_visit_on_clause(&mut self, n: &mut OnClause) -> VisitorResult<Recursion> { + fn pre_visit_on_clause(&mut self, n: &mut OnClause) -> Result<Recursion, Self::Error> { self.push_pre("on_clause", n); Ok(Continue) } - fn post_visit_on_clause(&mut self, n: &mut OnClause) -> VisitorResult<()> { + fn post_visit_on_clause(&mut self, n: &mut OnClause) -> Result<(), Self::Error> { self.push_pre("on_clause", n); Ok(()) } @@ -1570,7 +1664,7 @@ mod test { fn pre_visit_with_measurement_clause( &mut self, n: &mut WithMeasurementClause, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { self.push_pre("with_measurement_clause", n); Ok(Continue) } @@ -1578,17 +1672,20 @@ mod test { fn post_visit_with_measurement_clause( &mut self, n: &mut WithMeasurementClause, - ) -> VisitorResult<()> { + ) -> Result<(), Self::Error> { self.push_post("with_measurement_clause", n); Ok(()) } - fn pre_visit_with_key_clause(&mut self, n: &mut WithKeyClause) -> VisitorResult<Recursion> { + fn pre_visit_with_key_clause( + &mut self, + n: &mut WithKeyClause, + ) -> Result<Recursion, Self::Error> { self.push_pre("with_key_clause", n); Ok(Continue) } - fn post_visit_with_key_clause(&mut self, n: &mut WithKeyClause) -> VisitorResult<()> { + fn post_visit_with_key_clause(&mut self, n: &mut WithKeyClause) -> Result<(), Self::Error> { self.push_post("with_key_clause", n); Ok(()) } @@ -1703,10 +1800,12 @@ mod test { struct AddLimit; impl VisitorMut for AddLimit { + type Error = (); + fn pre_visit_select_statement( &mut self, n: &mut SelectStatement, - ) -> VisitorResult<Recursion> { + ) -> Result<Recursion, Self::Error> { n.limit = Some(LimitClause(10)); Ok(Continue) } diff --git a/iox_query/src/plan/influxql/field.rs b/iox_query/src/plan/influxql/field.rs index d579cda212..bed619559d 100644 --- a/iox_query/src/plan/influxql/field.rs +++ b/iox_query/src/plan/influxql/field.rs @@ -1,6 +1,6 @@ use influxdb_influxql_parser::expression::Expr; use influxdb_influxql_parser::select::{Field, SelectStatement}; -use influxdb_influxql_parser::visit::{Recursion, Visitable, Visitor, VisitorResult}; +use influxdb_influxql_parser::visit::{Recursion, Visitable, Visitor}; use std::ops::Deref; /// Returns the name of the field. @@ -59,7 +59,9 @@ pub(crate) fn field_by_name(select: &SelectStatement, name: &str) -> Option<Fiel struct BinaryExprNameVisitor<'a>(&'a mut Vec<String>); impl<'a> Visitor for BinaryExprNameVisitor<'a> { - fn pre_visit_expr(self, n: &Expr) -> VisitorResult<Recursion<Self>> { + type Error = (); + + fn pre_visit_expr(self, n: &Expr) -> Result<Recursion<Self>, Self::Error> { match n { Expr::Call { name, .. } => self.0.push(name.clone()), Expr::VarRef { name, .. } => self.0.push(name.to_string()), diff --git a/iox_query/src/plan/influxql/rewriter.rs b/iox_query/src/plan/influxql/rewriter.rs index 8b344b9af5..0665e1ed89 100644 --- a/iox_query/src/plan/influxql/rewriter.rs +++ b/iox_query/src/plan/influxql/rewriter.rs @@ -14,7 +14,6 @@ use influxdb_influxql_parser::select::{ SelectStatement, }; use influxdb_influxql_parser::string::Regex; -use influxdb_influxql_parser::visit::{Recursion, Visitable, Visitor, VisitorResult}; use itertools::Itertools; use predicate::rpc_predicate::QueryNamespaceMeta; use query_functions::clean_non_meta_escapes; @@ -151,10 +150,14 @@ fn from_field_and_dimensions( /// has any wildcards or regular expressions in the projection list /// and `GROUP BY` clause respectively. fn has_wildcards(stmt: &SelectStatement) -> (bool, bool) { + use influxdb_influxql_parser::visit::{Recursion, Visitable, Visitor}; + struct HasWildcardsVisitor(bool, bool); impl Visitor for HasWildcardsVisitor { - fn pre_visit_expr(self, n: &Expr) -> VisitorResult<Recursion<Self>> { + type Error = DataFusionError; + + fn pre_visit_expr(self, n: &Expr) -> Result<Recursion<Self>> { Ok( if matches!(n, Expr::Wildcard(_) | Expr::Literal(Literal::Regex(_))) { Recursion::Stop(Self(true, self.1)) @@ -167,12 +170,12 @@ fn has_wildcards(stmt: &SelectStatement) -> (bool, bool) { fn pre_visit_select_from_clause( self, _n: &FromMeasurementClause, - ) -> VisitorResult<Recursion<Self>> { + ) -> Result<Recursion<Self>> { // Don't traverse FROM and potential subqueries Ok(Recursion::Stop(self)) } - fn pre_visit_select_dimension(self, n: &Dimension) -> VisitorResult<Recursion<Self>> { + fn pre_visit_select_dimension(self, n: &Dimension) -> Result<Recursion<Self>> { Ok(if matches!(n, Dimension::Wildcard | Dimension::Regex(_)) { Recursion::Stop(Self(self.0, true)) } else { diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 9c98801415..e62683edfe 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -57,10 +57,12 @@ object_store = { git = "https://github.com/apache/arrow-rs.git", rev = "f5c165ac once_cell = { version = "1", features = ["alloc", "parking_lot", "parking_lot_core", "race", "std"] } parking_lot = { version = "0.12", features = ["arc_lock"] } parquet = { version = "29", features = ["arrow", "arrow-array", "arrow-buffer", "arrow-cast", "arrow-data", "arrow-ipc", "arrow-schema", "arrow-select", "async", "base64", "brotli", "experimental", "flate2", "futures", "lz4", "snap", "tokio", "zstd"] } +phf_shared = { version = "0.11", features = ["std"] } predicates = { version = "2", features = ["diff", "difflib", "float-cmp", "normalize-line-endings", "regex"] } prost = { version = "0.11", features = ["prost-derive", "std"] } prost-types = { version = "0.11", features = ["std"] } rand = { version = "0.8", features = ["alloc", "getrandom", "libc", "rand_chacha", "small_rng", "std", "std_rng"] } +rand_core = { version = "0.6", default-features = false, features = ["alloc", "getrandom", "std"] } regex = { version = "1", features = ["aho-corasick", "memchr", "perf", "perf-cache", "perf-dfa", "perf-inline", "perf-literal", "std", "unicode", "unicode-age", "unicode-bool", "unicode-case", "unicode-gencat", "unicode-perl", "unicode-script", "unicode-segment"] } regex-automata = { version = "0.1", features = ["regex-syntax", "std"] } regex-syntax = { version = "0.6", features = ["unicode", "unicode-age", "unicode-bool", "unicode-case", "unicode-gencat", "unicode-perl", "unicode-script", "unicode-segment"] } @@ -121,9 +123,11 @@ nom = { version = "7", features = ["alloc", "std"] } num-traits = { version = "0.2", features = ["i128", "libm", "std"] } once_cell = { version = "1", features = ["alloc", "parking_lot", "parking_lot_core", "race", "std"] } parking_lot = { version = "0.12", features = ["arc_lock"] } +phf_shared = { version = "0.11", features = ["std"] } prost = { version = "0.11", features = ["prost-derive", "std"] } prost-types = { version = "0.11", features = ["std"] } rand = { version = "0.8", features = ["alloc", "getrandom", "libc", "rand_chacha", "small_rng", "std", "std_rng"] } +rand_core = { version = "0.6", default-features = false, features = ["alloc", "getrandom", "std"] } regex = { version = "1", features = ["aho-corasick", "memchr", "perf", "perf-cache", "perf-dfa", "perf-inline", "perf-literal", "std", "unicode", "unicode-age", "unicode-bool", "unicode-case", "unicode-gencat", "unicode-perl", "unicode-script", "unicode-segment"] } regex-syntax = { version = "0.6", features = ["unicode", "unicode-age", "unicode-bool", "unicode-case", "unicode-gencat", "unicode-perl", "unicode-script", "unicode-segment"] } ring = { version = "0.16", features = ["alloc", "dev_urandom_fallback", "once_cell", "std"] }
4184a331eae1c283648bb556d389e6fae60ce57b
Trevor Hilton
2024-09-27 11:59:17
parquet cache with less locking (#25389)
Closes #25382 Closes #25383 This refactors the parquet cache to use less locking by switching from using the `clru` crate to a hand-rolled cache implementation. The new cache still acts as an LRU, but it uses atomics to track hit-time per entry, and handles pruning in a separate process that is decoupled from insertion/gets to the cache. The `Cache` type uses a [`DashMap`](https://docs.rs/dashmap/latest/dashmap/struct.DashMap.html) internally to store cache entries. This should help reduce lock contention, and also has the added benefit of not requiring mutability to insert into _or_ get from the map. The cache maps an `object_store::Path` to a `CacheEntry`. On a hit, an entry will have its `hit_time` (an `AtomicI64`) incremented. During a prune operation, entries that have the oldest hit times will be removed from the cache. See the `Cache::prune` method for details. The cache is setup with a memory _capacity_ and a _prune percent_. The cache tracks memory used when entries are added, based on their _size_, and when a prune is invoked in the background, if the cache has exceeded its capacity, it will prune `prune_percent * cache.len()` entries from the cache. Two tests were added: * `cache_evicts_lru_when_full` to check LRU behaviour of the cache * `cache_hit_while_fetching` to check that a cache entry hit while a request is in flight to fetch that entry will not result in extra calls to the underlying object store
null
refactor: parquet cache with less locking (#25389) Closes #25382 Closes #25383 This refactors the parquet cache to use less locking by switching from using the `clru` crate to a hand-rolled cache implementation. The new cache still acts as an LRU, but it uses atomics to track hit-time per entry, and handles pruning in a separate process that is decoupled from insertion/gets to the cache. The `Cache` type uses a [`DashMap`](https://docs.rs/dashmap/latest/dashmap/struct.DashMap.html) internally to store cache entries. This should help reduce lock contention, and also has the added benefit of not requiring mutability to insert into _or_ get from the map. The cache maps an `object_store::Path` to a `CacheEntry`. On a hit, an entry will have its `hit_time` (an `AtomicI64`) incremented. During a prune operation, entries that have the oldest hit times will be removed from the cache. See the `Cache::prune` method for details. The cache is setup with a memory _capacity_ and a _prune percent_. The cache tracks memory used when entries are added, based on their _size_, and when a prune is invoked in the background, if the cache has exceeded its capacity, it will prune `prune_percent * cache.len()` entries from the cache. Two tests were added: * `cache_evicts_lru_when_full` to check LRU behaviour of the cache * `cache_hit_while_fetching` to check that a cache entry hit while a request is in flight to fetch that entry will not result in extra calls to the underlying object store
diff --git a/Cargo.lock b/Cargo.lock index f41eed1208..f7ba4f732a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -966,12 +966,6 @@ dependencies = [ "workspace-hack", ] -[[package]] -name = "clru" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbd0f76e066e64fdc5631e3bb46381254deab9ef1158292f27c8c57e3bf3fe59" - [[package]] name = "colorchoice" version = "1.0.2" @@ -2822,15 +2816,16 @@ dependencies = [ name = "influxdb3_write" version = "0.1.0" dependencies = [ + "anyhow", "arrow", "arrow_util", "async-trait", "byteorder", "bytes", "chrono", - "clru", "crc32fast", "crossbeam-channel", + "dashmap", "data_types", "datafusion", "datafusion_util", diff --git a/influxdb3/src/commands/serve.rs b/influxdb3/src/commands/serve.rs index abfb9a6a19..63877620a3 100644 --- a/influxdb3/src/commands/serve.rs +++ b/influxdb3/src/commands/serve.rs @@ -260,10 +260,17 @@ pub async fn command(config: Config) -> Result<()> { let object_store: Arc<DynObjectStore> = make_object_store(&config.object_store_config).map_err(Error::ObjectStoreParsing)?; - // TODO(trevor): make this configurable/optional: + let time_provider = Arc::new(SystemProvider::new()); + + // TODO(trevor): make the cache capacity and prune percent configurable/optional: let cache_capacity = 1024 * 1024 * 1024; - let (object_store, parquet_cache) = - create_cached_obj_store_and_oracle(object_store, cache_capacity); + let prune_percent = 0.1; + let (object_store, parquet_cache) = create_cached_obj_store_and_oracle( + object_store, + Arc::clone(&time_provider) as _, + cache_capacity, + prune_percent, + ); let trace_exporter = config.tracing_config.build()?; @@ -322,7 +329,6 @@ pub async fn command(config: Config) -> Result<()> { snapshot_size: config.wal_snapshot_size, }; - let time_provider = Arc::new(SystemProvider::new()); let catalog = persister .load_or_create_catalog() .await diff --git a/influxdb3_server/src/lib.rs b/influxdb3_server/src/lib.rs index 2735d2dcbd..140c6b47fa 100644 --- a/influxdb3_server/src/lib.rs +++ b/influxdb3_server/src/lib.rs @@ -745,7 +745,9 @@ mod tests { let common_state = crate::CommonServerState::new(Arc::clone(&metrics), None, trace_header_parser).unwrap(); let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::new()); - let (object_store, parquet_cache) = test_cached_obj_store_and_oracle(object_store); + let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(start_time))); + let (object_store, parquet_cache) = + test_cached_obj_store_and_oracle(object_store, Arc::clone(&time_provider) as _); let parquet_store = ParquetStorage::new(Arc::clone(&object_store), StorageId::from("influxdb3")); let exec = Arc::new(Executor::new_with_config_and_executor( @@ -761,7 +763,6 @@ mod tests { DedicatedExecutor::new_testing(), )); let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host")); - let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(start_time))); let dummy_host_id = Arc::from("dummy-host-id"); let instance_id = Arc::from("dummy-instance-id"); diff --git a/influxdb3_server/src/query_executor.rs b/influxdb3_server/src/query_executor.rs index 58a6feecee..d238f48f50 100644 --- a/influxdb3_server/src/query_executor.rs +++ b/influxdb3_server/src/query_executor.rs @@ -630,9 +630,10 @@ mod tests { // Set up QueryExecutor let object_store: Arc<dyn ObjectStore> = Arc::new(LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap()); - let (object_store, parquet_cache) = test_cached_obj_store_and_oracle(object_store); - let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host")); let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))); + let (object_store, parquet_cache) = + test_cached_obj_store_and_oracle(object_store, Arc::clone(&time_provider) as _); + let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host")); let executor = make_exec(Arc::clone(&object_store)); let host_id = Arc::from("dummy-host-id"); let instance_id = Arc::from("instance-id"); diff --git a/influxdb3_write/Cargo.toml b/influxdb3_write/Cargo.toml index 0c11c21581..9fb21822ff 100644 --- a/influxdb3_write/Cargo.toml +++ b/influxdb3_write/Cargo.toml @@ -24,14 +24,15 @@ influxdb3_id = { path = "../influxdb3_id" } influxdb3_wal = { path = "../influxdb3_wal" } # crates.io dependencies +anyhow.workspace = true arrow.workspace = true async-trait.workspace = true byteorder.workspace = true bytes.workspace = true chrono.workspace = true -clru.workspace = true crc32fast.workspace = true crossbeam-channel.workspace = true +dashmap.workspace = true datafusion.workspace = true futures.workspace = true futures-util.workspace = true diff --git a/influxdb3_write/src/last_cache/mod.rs b/influxdb3_write/src/last_cache/mod.rs index f8d8478ff2..b40f0137f8 100644 --- a/influxdb3_write/src/last_cache/mod.rs +++ b/influxdb3_write/src/last_cache/mod.rs @@ -1579,13 +1579,15 @@ mod tests { use influxdb3_id::DbId; use influxdb3_wal::{LastCacheDefinition, WalConfig}; use insta::assert_json_snapshot; - use iox_time::{MockProvider, Time}; + use iox_time::{MockProvider, Time, TimeProvider}; async fn setup_write_buffer() -> WriteBufferImpl { let obj_store: Arc<dyn ObjectStore> = Arc::new(InMemory::new()); - let (obj_store, parquet_cache) = test_cached_obj_store_and_oracle(obj_store); + let time_provider: Arc<dyn TimeProvider> = + Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))); + let (obj_store, parquet_cache) = + test_cached_obj_store_and_oracle(obj_store, Arc::clone(&time_provider)); let persister = Arc::new(Persister::new(obj_store, "test_host")); - let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))); let host_id = Arc::from("dummy-host-id"); let instance_id = Arc::from("dummy-instance-id"); WriteBufferImpl::new( diff --git a/influxdb3_write/src/parquet_cache/mod.rs b/influxdb3_write/src/parquet_cache/mod.rs index 23687f8da0..5cbe481dee 100644 --- a/influxdb3_write/src/parquet_cache/mod.rs +++ b/influxdb3_write/src/parquet_cache/mod.rs @@ -1,24 +1,41 @@ //! An in-memory cache of Parquet files that are persisted to object storage use std::{ - fmt::Debug, hash::RandomState, num::NonZeroUsize, ops::Range, sync::Arc, time::Duration, + collections::BinaryHeap, + fmt::Debug, + ops::Range, + sync::{ + atomic::{AtomicI64, AtomicUsize, Ordering}, + Arc, + }, + time::Duration, }; +use anyhow::bail; use async_trait::async_trait; use bytes::Bytes; -use clru::{CLruCache, CLruCacheConfig, WeightScale}; -use futures::{StreamExt, TryStreamExt}; -use futures_util::stream::BoxStream; +use dashmap::{DashMap, Entry}; +use futures::{ + future::{BoxFuture, Shared}, + stream::BoxStream, + FutureExt, StreamExt, TryStreamExt, +}; +use iox_time::TimeProvider; use object_store::{ path::Path, Error, GetOptions, GetResult, GetResultPayload, ListResult, MultipartUpload, ObjectMeta, ObjectStore, PutMultipartOpts, PutOptions, PutPayload, PutResult, - Result as ObjectStoreResult, }; -use observability_deps::tracing::{error, info}; +use observability_deps::tracing::{error, info, warn}; use tokio::sync::{ mpsc::{channel, Receiver, Sender}, - oneshot, Mutex, + oneshot, }; +/// Shared future type for cache values that are being fetched +type SharedCacheValueFuture = Shared<BoxFuture<'static, Result<Arc<CacheValue>, DynError>>>; + +/// Dynamic error type that can be cloned +type DynError = Arc<dyn std::error::Error + Send + Sync>; + /// A request to fetch an item at the given `path` from an object store /// /// Contains a notifier to notify the caller that registers the cache request when the item @@ -84,9 +101,16 @@ impl ParquetCacheOracle for MemCacheOracle { /// that returns them as their `Arc<dyn _>` equivalent. pub fn create_cached_obj_store_and_oracle( object_store: Arc<dyn ObjectStore>, + time_provider: Arc<dyn TimeProvider>, cache_capacity: usize, + prune_percent: f64, ) -> (Arc<dyn ObjectStore>, Arc<dyn ParquetCacheOracle>) { - let store = Arc::new(MemCachedObjectStore::new(object_store, cache_capacity)); + let store = Arc::new(MemCachedObjectStore::new( + object_store, + cache_capacity, + time_provider, + prune_percent, + )); let oracle = Arc::new(MemCacheOracle::new(Arc::clone(&store))); (store, oracle) } @@ -94,11 +118,12 @@ pub fn create_cached_obj_store_and_oracle( /// Create a test cached object store with a cache capacity of 1GB pub fn test_cached_obj_store_and_oracle( object_store: Arc<dyn ObjectStore>, + time_provider: Arc<dyn TimeProvider>, ) -> (Arc<dyn ObjectStore>, Arc<dyn ParquetCacheOracle>) { - create_cached_obj_store_and_oracle(object_store, 1024 * 1024 * 1024) + create_cached_obj_store_and_oracle(object_store, time_provider, 1024 * 1024 * 1024, 0.1) } -/// An entry in the cache, containing the actual bytes as well as object store metadata +/// A value in the cache, containing the actual bytes as well as object store metadata #[derive(Debug)] struct CacheValue { data: Bytes, @@ -108,59 +133,263 @@ struct CacheValue { impl CacheValue { /// Get the size of the cache value's memory footprint in bytes fn size(&self) -> usize { - // TODO(trevor): could also calculate the size of the metadata... - self.data.len() + let Self { + data, + meta: + ObjectMeta { + location, + last_modified: _, + size: _, + e_tag, + version, + }, + } = self; + + data.len() + + location.as_ref().len() + + e_tag.as_ref().map(|s| s.capacity()).unwrap_or_default() + + version.as_ref().map(|s| s.capacity()).unwrap_or_default() + } + + /// Fetch the value from an object store + async fn fetch(store: Arc<dyn ObjectStore>, path: Path) -> object_store::Result<Self> { + let res = store.get(&path).await?; + let meta = res.meta.clone(); + let data = res.bytes().await?; + Ok(Self { data, meta }) } } -/// The state of a cache entry +/// Holds the state and hit time for an entry in the cache #[derive(Debug)] -enum CacheEntry { +struct CacheEntry { + state: CacheEntryState, + /// The nano-second timestamp of when this value was last hit + hit_time: AtomicI64, +} + +impl CacheEntry { + /// Get the approximate memory footprint of this entry in bytes + fn size(&self) -> usize { + self.state.size() + std::mem::size_of::<AtomicI64>() + } + + fn is_fetching(&self) -> bool { + matches!(self.state, CacheEntryState::Fetching(_)) + } + + fn is_success(&self) -> bool { + matches!(self.state, CacheEntryState::Success(_)) + } +} + +/// The state of a cache entry +/// +/// This implements `Clone` so that a reference to the entry in the `Cache` does not need to be +/// held for long. +#[derive(Debug, Clone)] +enum CacheEntryState { /// The cache entry is being fetched from object store - Fetching, + Fetching(SharedCacheValueFuture), /// The cache entry was successfully fetched and is stored in the cache as a [`CacheValue`] Success(Arc<CacheValue>), - /// The request to the object store failed - Failed, - /// The cache entry was deleted - Deleted, - /// The object is too large for the cache - TooLarge, } -impl CacheEntry { - /// Get the size of thje cache entry in bytes +impl CacheEntryState { + /// Get the approximate size of the cache entry in bytes fn size(&self) -> usize { match self { - CacheEntry::Fetching => 0, - CacheEntry::Success(v) => v.size(), - CacheEntry::Failed => 0, - CacheEntry::Deleted => 0, - CacheEntry::TooLarge => 0, + CacheEntryState::Fetching(_) => 0, + CacheEntryState::Success(v) => v.size(), } } - fn is_fetching(&self) -> bool { - matches!(self, CacheEntry::Fetching) + /// Get the value in this state, or wait for it if it is still fetching + /// + /// This takes `self` as it is meant to be used on an entry's state that has been cloned. + async fn value(self) -> object_store::Result<Arc<CacheValue>> { + match self { + CacheEntryState::Fetching(fut) => fut.await.map_err(|e| Error::Generic { + store: STORE_NAME, + source: Box::new(e), + }), + CacheEntryState::Success(v) => Ok(v), + } } +} - fn is_success(&self) -> bool { - matches!(self, CacheEntry::Success(_)) +/// A cache for storing objects from object storage by their [`Path`] +/// +/// This acts as a Least-Recently-Used (LRU) cache that allows for concurrent reads and writes. See +/// the [`Cache::prune`] method for implementation of how the cache entries are pruned. Pruning must +/// be invoked externally, e.g., on an interval. +#[derive(Debug)] +struct Cache { + /// The maximum amount of memory this cache should occupy in bytes + capacity: usize, + /// The current amount of memory being used by the cache in bytes + used: AtomicUsize, + /// What percentage of the total number of cache entries will be pruned during a pruning operation + prune_percent: f64, + /// The map storing cache entries + map: DashMap<Path, CacheEntry>, + /// Provides timestamps for updating the hit time of each cache entry + time_provider: Arc<dyn TimeProvider>, +} + +impl Cache { + /// Create a new cache with a given capacity and prune percent + fn new(capacity: usize, prune_percent: f64, time_provider: Arc<dyn TimeProvider>) -> Self { + Self { + capacity, + used: AtomicUsize::new(0), + prune_percent, + map: DashMap::new(), + time_provider, + } } - fn keep(&self) -> bool { - self.is_fetching() || self.is_success() + /// Get an entry in the cache or `None` if there is not an entry + /// + /// This updates the hit time of the entry and returns a cloned copy of the entry state so that + /// the reference into the map is dropped + fn get(&self, path: &Path) -> Option<CacheEntryState> { + let entry = self.map.get(path)?; + if entry.is_success() { + entry + .hit_time + .store(self.time_provider.now().timestamp_nanos(), Ordering::SeqCst); + } + Some(entry.state.clone()) + } + + /// Check if an entry in the cache is in process of being fetched or if it was already fetched + /// successfully + /// + /// This does not update the hit time of the entry + fn path_already_fetched(&self, path: &Path) -> bool { + self.map.get(path).is_some() + } + + /// Insert a `Fetching` entry to the cache along with the shared future for polling the value + /// being fetched + fn set_fetching(&self, path: &Path, fut: SharedCacheValueFuture) { + let entry = CacheEntry { + state: CacheEntryState::Fetching(fut), + hit_time: AtomicI64::new(self.time_provider.now().timestamp_nanos()), + }; + let additional = entry.size(); + self.map.insert(path.clone(), entry); + self.used.fetch_add(additional, Ordering::SeqCst); + } + + /// Update a `Fetching` entry to a `Success` entry in the cache + fn set_success(&self, path: &Path, value: Arc<CacheValue>) -> Result<(), anyhow::Error> { + match self.map.entry(path.clone()) { + Entry::Occupied(mut o) => { + let entry = o.get_mut(); + if !entry.is_fetching() { + // NOTE(trevor): the only other state is Success, so bailing here just + // means that we leave the entry alone, and since objects in the store are + // treated as immutable, this should be okay. + bail!("attempted to store value in non-fetching cache entry"); + } + entry.state = CacheEntryState::Success(value); + entry + .hit_time + .store(self.time_provider.now().timestamp_nanos(), Ordering::SeqCst); + // TODO(trevor): what if size is greater than cache capacity? + let additional = entry.size(); + self.used.fetch_add(additional, Ordering::SeqCst); + Ok(()) + } + Entry::Vacant(_) => bail!("attempted to set success state on an empty cache entry"), + } + } + + /// Remove an entry from the cache, as well as its associated size from the used capacity + fn remove(&self, path: &Path) { + let Some((_, entry)) = self.map.remove(path) else { + return; + }; + self.used.fetch_sub(entry.state.size(), Ordering::SeqCst); + } + + /// Prune least recently hit entries from the cache + /// + /// This is a no-op if the `used` amount on the cache is not >= its `capacity` + fn prune(&self) { + let used = self.used.load(Ordering::SeqCst); + if used < self.capacity { + return; + } + let n_to_prune = (self.map.len() as f64 * self.prune_percent).floor() as usize; + // use a BinaryHeap to determine the cut-off time, at which, entries that were + // last hit before that time will be pruned: + let mut prune_heap = BinaryHeap::with_capacity(n_to_prune); + + for map_ref in self.map.iter() { + let hit_time = map_ref.value().hit_time.load(Ordering::SeqCst); + let size = map_ref.value().size(); + let path = map_ref.key().as_ref(); + if prune_heap.len() < n_to_prune { + // if the heap isn't full yet, throw this item on: + prune_heap.push(PruneHeapItem { + hit_time, + path_ref: path.into(), + size, + }); + } else if hit_time < prune_heap.peek().map(|item| item.hit_time).unwrap() { + // otherwise, the heap is at its capacity, so only push if the hit_time + // in question is older than the top of the heap (after pop'ing the top + // of the heap to make room) + prune_heap.pop(); + prune_heap.push(PruneHeapItem { + path_ref: path.into(), + hit_time, + size, + }); + } + } + + // track the total size of entries that get freed: + let mut freed = 0; + // drop entries with hit times before the cut-off: + for item in prune_heap { + self.map.remove(&Path::from(item.path_ref.as_ref())); + freed += item.size; + } + // update used mem size with freed amount: + self.used.fetch_sub(freed, Ordering::SeqCst); } } -/// Implements the [`WeightScale`] trait to determine a [`CacheEntry`]'s size on insertion to -/// the cache -#[derive(Debug)] -struct CacheEntryScale; +/// An item that stores what is needed for pruning [`CacheEntry`]s +#[derive(Debug, Eq)] +struct PruneHeapItem { + /// Reference to the entry's `Path` key + path_ref: Arc<str>, + /// Entry's hit time for comparison and heap insertion + hit_time: i64, + /// Entry size used to calculate the amount of memory freed after a prune + size: usize, +} + +impl PartialEq for PruneHeapItem { + fn eq(&self, other: &Self) -> bool { + self.hit_time.eq(&other.hit_time) + } +} -impl WeightScale<Path, CacheEntry> for CacheEntryScale { - fn weight(&self, key: &Path, value: &CacheEntry) -> usize { - key.as_ref().len() + value.size() +impl PartialOrd for PruneHeapItem { + fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { + Some(self.hit_time.cmp(&other.hit_time)) + } +} + +impl Ord for PruneHeapItem { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.hit_time.cmp(&other.hit_time) } } @@ -168,63 +397,26 @@ impl WeightScale<Path, CacheEntry> for CacheEntryScale { const STORE_NAME: &str = "mem_cached_object_store"; /// An object store with an associated cache that can serve GET-style requests using the cache -/// -/// The least-recently used (LRU) entries will be evicted when new entries are inserted, if the -/// new entry would exceed the cache's memory capacity #[derive(Debug)] pub struct MemCachedObjectStore { /// An inner object store for which items will be cached inner: Arc<dyn ObjectStore>, - /// A weighted LRU cache for storing the objects associated with a given path in memory - // NOTE(trevor): this uses a mutex as the CLruCache type needs &mut self for its get method, so - // we always need an exclusive lock on the cache. If this creates a performance bottleneck then - // we will need to look for alternatives. - // - // A Tokio mutex is used to prevent blocking the thread while waiting for a lock, and so that - // the lock can be held accross await points. - cache: Arc<Mutex<CLruCache<Path, CacheEntry, RandomState, CacheEntryScale>>>, + cache: Arc<Cache>, } impl MemCachedObjectStore { - /// Create a new [`MemCachedObjectStore`] with the given memory capacity - fn new(inner: Arc<dyn ObjectStore>, memory_capacity: usize) -> Self { - let cache = CLruCache::with_config( - CLruCacheConfig::new(NonZeroUsize::new(memory_capacity).unwrap()) - .with_scale(CacheEntryScale), - ); + /// Create a new [`MemCachedObjectStore`] + fn new( + inner: Arc<dyn ObjectStore>, + memory_capacity: usize, + time_provider: Arc<dyn TimeProvider>, + prune_percent: f64, + ) -> Self { Self { inner, - cache: Arc::new(Mutex::new(cache)), + cache: Arc::new(Cache::new(memory_capacity, prune_percent, time_provider)), } } - - /// Get an entry in the cache if it contains a successful fetch result, or `None` otherwise - /// - /// This requires `&mut self` as the underlying method on the cache requires a mutable reference - /// in order to update the recency of the entry in the cache - async fn get_cache_value(&self, path: &Path) -> Option<Arc<CacheValue>> { - self.cache - .lock() - .await - .get(path) - .and_then(|entry| match entry { - CacheEntry::Fetching - | CacheEntry::Failed - | CacheEntry::Deleted - | CacheEntry::TooLarge => None, - CacheEntry::Success(v) => Some(Arc::clone(v)), - }) - } - - /// Set the state of a cache entry to `Deleted`, since we cannot remove elements from the - /// cache directly. - async fn delete_cache_value(&self, path: &Path) { - let _ = self - .cache - .lock() - .await - .put_with_weight(path.clone(), CacheEntry::Deleted); - } } impl std::fmt::Display for MemCachedObjectStore { @@ -242,7 +434,7 @@ impl std::fmt::Display for MemCachedObjectStore { /// from the inner store. #[async_trait] impl ObjectStore for MemCachedObjectStore { - async fn put(&self, location: &Path, bytes: PutPayload) -> ObjectStoreResult<PutResult> { + async fn put(&self, location: &Path, bytes: PutPayload) -> object_store::Result<PutResult> { self.inner.put(location, bytes).await } @@ -251,11 +443,14 @@ impl ObjectStore for MemCachedObjectStore { location: &Path, bytes: PutPayload, opts: PutOptions, - ) -> ObjectStoreResult<PutResult> { + ) -> object_store::Result<PutResult> { self.inner.put_opts(location, bytes, opts).await } - async fn put_multipart(&self, location: &Path) -> ObjectStoreResult<Box<dyn MultipartUpload>> { + async fn put_multipart( + &self, + location: &Path, + ) -> object_store::Result<Box<dyn MultipartUpload>> { self.inner.put_multipart(location).await } @@ -263,14 +458,15 @@ impl ObjectStore for MemCachedObjectStore { &self, location: &Path, opts: PutMultipartOpts, - ) -> ObjectStoreResult<Box<dyn MultipartUpload>> { + ) -> object_store::Result<Box<dyn MultipartUpload>> { self.inner.put_multipart_opts(location, opts).await } /// Get an object from the object store. If this object is cached, then it will not make a request /// to the inner object store. - async fn get(&self, location: &Path) -> ObjectStoreResult<GetResult> { - if let Some(v) = self.get_cache_value(location).await { + async fn get(&self, location: &Path) -> object_store::Result<GetResult> { + if let Some(state) = self.cache.get(location) { + let v = state.value().await?; Ok(GetResult { payload: GetResultPayload::Stream( futures::stream::iter([Ok(v.data.clone())]).boxed(), @@ -284,13 +480,17 @@ impl ObjectStore for MemCachedObjectStore { } } - async fn get_opts(&self, location: &Path, options: GetOptions) -> ObjectStoreResult<GetResult> { + async fn get_opts( + &self, + location: &Path, + options: GetOptions, + ) -> object_store::Result<GetResult> { // NOTE(trevor): this could probably be supported through the cache if we need it via the // ObjectMeta stored in the cache. For now this is conservative: self.inner.get_opts(location, options).await } - async fn get_range(&self, location: &Path, range: Range<usize>) -> ObjectStoreResult<Bytes> { + async fn get_range(&self, location: &Path, range: Range<usize>) -> object_store::Result<Bytes> { Ok(self .get_ranges(location, &[range]) .await? @@ -305,8 +505,9 @@ impl ObjectStore for MemCachedObjectStore { &self, location: &Path, ranges: &[Range<usize>], - ) -> ObjectStoreResult<Vec<Bytes>> { - if let Some(v) = self.get_cache_value(location).await { + ) -> object_store::Result<Vec<Bytes>> { + if let Some(state) = self.cache.get(location) { + let v = state.value().await?; ranges .iter() .map(|range| { @@ -339,8 +540,9 @@ impl ObjectStore for MemCachedObjectStore { } } - async fn head(&self, location: &Path) -> ObjectStoreResult<ObjectMeta> { - if let Some(v) = self.get_cache_value(location).await { + async fn head(&self, location: &Path) -> object_store::Result<ObjectMeta> { + if let Some(state) = self.cache.get(location) { + let v = state.value().await?; Ok(v.meta.clone()) } else { self.inner.head(location).await @@ -348,22 +550,22 @@ impl ObjectStore for MemCachedObjectStore { } /// Delete an object on object store, but also remove it from the cache. - async fn delete(&self, location: &Path) -> ObjectStoreResult<()> { + async fn delete(&self, location: &Path) -> object_store::Result<()> { let result = self.inner.delete(location).await?; - self.delete_cache_value(location).await; + self.cache.remove(location); Ok(result) } fn delete_stream<'a>( &'a self, - locations: BoxStream<'a, ObjectStoreResult<Path>>, - ) -> BoxStream<'a, ObjectStoreResult<Path>> { + locations: BoxStream<'a, object_store::Result<Path>>, + ) -> BoxStream<'a, object_store::Result<Path>> { locations .and_then(|_| futures::future::err(Error::NotImplemented)) .boxed() } - fn list(&self, prefix: Option<&Path>) -> BoxStream<'_, ObjectStoreResult<ObjectMeta>> { + fn list(&self, prefix: Option<&Path>) -> BoxStream<'_, object_store::Result<ObjectMeta>> { self.inner.list(prefix) } @@ -371,27 +573,27 @@ impl ObjectStore for MemCachedObjectStore { &self, prefix: Option<&Path>, offset: &Path, - ) -> BoxStream<'_, ObjectStoreResult<ObjectMeta>> { + ) -> BoxStream<'_, object_store::Result<ObjectMeta>> { self.inner.list_with_offset(prefix, offset) } - async fn list_with_delimiter(&self, prefix: Option<&Path>) -> ObjectStoreResult<ListResult> { + async fn list_with_delimiter(&self, prefix: Option<&Path>) -> object_store::Result<ListResult> { self.inner.list_with_delimiter(prefix).await } - async fn copy(&self, from: &Path, to: &Path) -> ObjectStoreResult<()> { + async fn copy(&self, from: &Path, to: &Path) -> object_store::Result<()> { self.inner.copy(from, to).await } - async fn rename(&self, from: &Path, to: &Path) -> ObjectStoreResult<()> { + async fn rename(&self, from: &Path, to: &Path) -> object_store::Result<()> { self.inner.rename(from, to).await } - async fn copy_if_not_exists(&self, from: &Path, to: &Path) -> ObjectStoreResult<()> { + async fn copy_if_not_exists(&self, from: &Path, to: &Path) -> object_store::Result<()> { self.inner.copy_if_not_exists(from, to).await } - async fn rename_if_not_exists(&self, from: &Path, to: &Path) -> ObjectStoreResult<()> { + async fn rename_if_not_exists(&self, from: &Path, to: &Path) -> object_store::Result<()> { self.inner.rename_if_not_exists(from, to).await } } @@ -409,61 +611,41 @@ fn background_cache_request_handler( ) -> tokio::task::JoinHandle<()> { tokio::spawn(async move { while let Some(CacheRequest { path, notifier }) = rx.recv().await { - // clone the path before acquiring the lock: - let path_cloned = path.clone(); - // Check that the cache does not already contain an entry for the provide path, or that - // it is not already in the process of fetching the given path: - let mut cache_lock = mem_store.cache.lock().await; - if cache_lock - .get(&path) - .is_some_and(|entry| entry.is_fetching() || entry.is_success()) - { + // We assume that objects on object store are immutable, so we can skip objects that + // we have already fetched: + if mem_store.cache.path_already_fetched(&path) { continue; } + // Create a future that will go and fetch the cache value from the store: + let path_cloned = path.clone(); + let store_cloned = Arc::clone(&mem_store.inner); + let fut = async move { + CacheValue::fetch(store_cloned, path_cloned) + .await + .map(Arc::new) + .map_err(|e| Arc::new(e) as _) + } + .boxed() + .shared(); // Put a `Fetching` state in the entry to prevent concurrent requests to the same path: - let _ = cache_lock.put_with_weight(path_cloned, CacheEntry::Fetching); - // Drop the lock before spawning the task below - drop(cache_lock); + mem_store.cache.set_fetching(&path, fut.clone()); let mem_store_captured = Arc::clone(&mem_store); tokio::spawn(async move { - let cache_insertion_result = match mem_store_captured.inner.get(&path).await { - Ok(result) => { - let meta = result.meta.clone(); - match result.bytes().await { - Ok(data) => mem_store_captured.cache.lock().await.put_with_weight( - path, - CacheEntry::Success(Arc::new(CacheValue { data, meta })), - ), - Err(error) => { - error!(%error, "failed to retrieve payload from object store get result"); - mem_store_captured - .cache - .lock() - .await - .put_with_weight(path, CacheEntry::Failed) - } - } + match fut.await { + Ok(value) => { + if let Err(error) = mem_store_captured.cache.set_success(&path, value) { + // NOTE(trevor): this would be an error if A) it tried to insert on an already + // successful entry, or B) it tried to insert on an empty entry, in either case + // we do not need to remove the entry to clear a fetching state, as in the + // other failure modes below... + warn!(%error, "failed to set the success state on the cache"); + }; } Err(error) => { error!(%error, "failed to fulfill cache request with object store"); - mem_store_captured - .cache - .lock() - .await - .put_with_weight(path, CacheEntry::Failed) + mem_store_captured.cache.remove(&path); } }; - // If an entry would not fit in the cache at all, the put_with_weight method returns - // it as an Err from above, and we would not have cleared the `Fetching` entry, so - // we need to do that here: - if let Err((k, _)) = cache_insertion_result { - mem_store_captured - .cache - .lock() - .await - .put_with_weight(k, CacheEntry::TooLarge) - .expect("cache capacity is too small"); - } // notify that the cache request has been fulfilled: let _ = notifier.send(()); }); @@ -476,31 +658,32 @@ fn background_cache_request_handler( // TODO(trevor): the interval could be configurable fn background_cache_pruner(mem_store: Arc<MemCachedObjectStore>) -> tokio::task::JoinHandle<()> { tokio::spawn(async move { - let mut interval = tokio::time::interval(Duration::from_secs(60)); + let mut interval = tokio::time::interval(Duration::from_millis(10)); interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); loop { interval.tick().await; - - mem_store.cache.lock().await.retain(|_, entry| entry.keep()); + mem_store.cache.prune(); } }) } #[cfg(test)] mod tests { - use std::{ops::Range, sync::Arc}; + use std::{ops::Range, sync::Arc, time::Duration}; use arrow::datatypes::ToByteSlice; use async_trait::async_trait; use bytes::Bytes; use futures::stream::BoxStream; use hashbrown::HashMap; + use iox_time::{MockProvider, Time, TimeProvider}; use object_store::{ memory::InMemory, path::Path, GetOptions, GetResult, ListResult, MultipartUpload, ObjectMeta, ObjectStore, PutMultipartOpts, PutOptions, PutPayload, PutResult, }; use parking_lot::RwLock; use pretty_assertions::assert_eq; + use tokio::sync::Notify; use crate::parquet_cache::{ create_cached_obj_store_and_oracle, test_cached_obj_store_and_oracle, CacheRequest, @@ -526,8 +709,12 @@ mod tests { async fn hit_cache_instead_of_object_store() { // set up the inner test object store and then wrap it with the mem cached store: let inner_store = Arc::new(TestObjectStore::new(Arc::new(InMemory::new()))); - let (cached_store, oracle) = - test_cached_obj_store_and_oracle(Arc::clone(&inner_store) as _); + let time_provider: Arc<dyn TimeProvider> = + Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))); + let (cached_store, oracle) = test_cached_obj_store_and_oracle( + Arc::clone(&inner_store) as _, + Arc::clone(&time_provider), + ); // PUT a paylaod into the object store through the outer mem cached store: let path = Path::from("0.parquet"); let payload = b"hello world"; @@ -564,12 +751,19 @@ mod tests { #[tokio::test] async fn cache_evicts_lru_when_full() { let inner_store = Arc::new(TestObjectStore::new(Arc::new(InMemory::new()))); - let cache_capacity_bytes = 32; - let (cached_store, oracle) = - create_cached_obj_store_and_oracle(Arc::clone(&inner_store) as _, cache_capacity_bytes); + let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))); + // these are magic numbers that will make it so the third entry exceeds the cache capacity: + let cache_capacity_bytes = 60; + let cache_prune_percent = 0.4; + let (cached_store, oracle) = create_cached_obj_store_and_oracle( + Arc::clone(&inner_store) as _, + Arc::clone(&time_provider) as _, + cache_capacity_bytes, + cache_prune_percent, + ); // PUT an entry into the store: - let path_1 = Path::from("0.parquet"); // 9 bytes for path - let payload_1 = b"Janeway"; // 7 bytes for payload + let path_1 = Path::from("0.parquet"); + let payload_1 = b"Janeway"; cached_store .put(&path_1, PutPayload::from_static(payload_1)) .await @@ -583,6 +777,9 @@ mod tests { assert_eq!(1, inner_store.total_get_request_count()); assert_eq!(1, inner_store.get_request_count(&path_1)); + // update time: + time_provider.set(Time::from_timestamp_nanos(1)); + // GET the entry to check its there and was retrieved from cache, i.e., that the request // counts do not change: assert_payload_at_equals!(cached_store, payload_1, path_1); @@ -590,15 +787,18 @@ mod tests { assert_eq!(1, inner_store.get_request_count(&path_1)); // PUT a second entry into the store: - let path_2 = Path::from("1.parquet"); // 9 bytes for path - let payload_2 = b"Paris"; // 5 bytes for payload + let path_2 = Path::from("1.parquet"); + let payload_2 = b"Paris"; cached_store .put(&path_2, PutPayload::from_static(payload_2)) .await .unwrap(); + // update time: + time_provider.set(Time::from_timestamp_nanos(2)); + // cache the second entry and wait for it to complete, this will not evict the first entry - // as both can fit in the cache whose capacity is 32 bytes: + // as both can fit in the cache: let (cache_request, notifier_rx) = CacheRequest::create(path_2.clone()); oracle.register(cache_request); let _ = notifier_rx.await; @@ -607,6 +807,9 @@ mod tests { assert_eq!(1, inner_store.get_request_count(&path_1)); assert_eq!(1, inner_store.get_request_count(&path_2)); + // update time: + time_provider.set(Time::from_timestamp_nanos(3)); + // GET the second entry and assert that it was retrieved from the cache, i.e., that the // request counts do not change: assert_payload_at_equals!(cached_store, payload_2, path_2); @@ -614,22 +817,29 @@ mod tests { assert_eq!(1, inner_store.get_request_count(&path_1)); assert_eq!(1, inner_store.get_request_count(&path_2)); + // update time: + time_provider.set(Time::from_timestamp_nanos(4)); + // GET the first entry again and assert that it was retrieved from the cache as before. This - // will also update the LRU so that the first entry (janeway) was used more recently than the - // second entry (paris): + // will also update the hit count so that the first entry (janeway) was used more recently + // than the second entry (paris): assert_payload_at_equals!(cached_store, payload_1, path_1); assert_eq!(2, inner_store.total_get_request_count()); assert_eq!(1, inner_store.get_request_count(&path_1)); // PUT a third entry into the store: - let path_3 = Path::from("2.parquet"); // 9 bytes for the path - let payload_3 = b"Neelix"; // 6 bytes for the payload + let path_3 = Path::from("2.parquet"); + let payload_3 = b"Neelix"; cached_store .put(&path_3, PutPayload::from_static(payload_3)) .await .unwrap(); - // cache the third entry and wait for it to complete, this will evict paris from the cache - // as the LRU entry: + + // update time: + time_provider.set(Time::from_timestamp_nanos(5)); + + // cache the third entry and wait for it to complete, this will push the cache past its + // capacity: let (cache_request, notifier_rx) = CacheRequest::create(path_3.clone()); oracle.register(cache_request); let _ = notifier_rx.await; @@ -639,6 +849,9 @@ mod tests { assert_eq!(1, inner_store.get_request_count(&path_2)); assert_eq!(1, inner_store.get_request_count(&path_3)); + // update time: + time_provider.set(Time::from_timestamp_nanos(6)); + // GET the new entry from the strore, and check that it was served by the cache: assert_payload_at_equals!(cached_store, payload_3, path_3); assert_eq!(3, inner_store.total_get_request_count()); @@ -646,6 +859,9 @@ mod tests { assert_eq!(1, inner_store.get_request_count(&path_2)); assert_eq!(1, inner_store.get_request_count(&path_3)); + // allow some time for pruning: + tokio::time::sleep(Duration::from_millis(100)).await; + // GET paris from the cached store, this will not be served by the cache, because paris was // evicted by neelix: assert_payload_at_equals!(cached_store, payload_2, path_2); @@ -655,12 +871,69 @@ mod tests { assert_eq!(1, inner_store.get_request_count(&path_3)); } + #[tokio::test] + async fn cache_hit_while_fetching() { + // Create a test store with a barrier: + let to_store_notify = Arc::new(Notify::new()); + let from_store_notify = Arc::new(Notify::new()); + let inner_store = Arc::new( + TestObjectStore::new(Arc::new(InMemory::new())) + .with_notifies(Arc::clone(&to_store_notify), Arc::clone(&from_store_notify)), + ); + let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))); + let (cached_store, oracle) = test_cached_obj_store_and_oracle( + Arc::clone(&inner_store) as _, + Arc::clone(&time_provider) as _, + ); + + // PUT an entry into the store: + let path = Path::from("0.parquet"); + let payload = b"Picard"; + cached_store + .put(&path, PutPayload::from_static(payload)) + .await + .unwrap(); + + // cache the entry, but don't wait on it until below in spawned task: + let (cache_request, notifier_rx) = CacheRequest::create(path.clone()); + oracle.register(cache_request); + + // we are in the middle of a get request, i.e., the cache entry is "fetching" + // once this call to notified wakes: + let _ = from_store_notify.notified().await; + + // spawn a thread to wake the in-flight get request initiated by the cache oracle + // after we have started a get request below, such that the get request below hits + // the cache while the entry is still "fetching" state: + let h = tokio::spawn(async move { + to_store_notify.notify_one(); + let _ = notifier_rx.await; + }); + + // make the request to the store, which hits the cache in the "fetching" state + // since we haven't made the call to notify the store to continue yet: + assert_payload_at_equals!(cached_store, payload, path); + + // drive the task to completion to ensure that the cache request has been fulfilled: + h.await.unwrap(); + + // there should only have been one request made, i.e., from the cache oracle: + assert_eq!(1, inner_store.total_get_request_count()); + assert_eq!(1, inner_store.get_request_count(&path)); + + // make another request to the store, to be sure that it is in the cache: + assert_payload_at_equals!(cached_store, payload, path); + assert_eq!(1, inner_store.total_get_request_count()); + assert_eq!(1, inner_store.get_request_count(&path)); + } + type RequestCounter = RwLock<HashMap<Path, usize>>; #[derive(Debug)] struct TestObjectStore { inner: Arc<dyn ObjectStore>, get: RequestCounter, + notifies: Option<(Arc<Notify>, Arc<Notify>)>, } impl TestObjectStore { @@ -668,9 +941,15 @@ mod tests { Self { inner, get: Default::default(), + notifies: None, } } + fn with_notifies(mut self, inbound: Arc<Notify>, outbound: Arc<Notify>) -> Self { + self.notifies = Some((inbound, outbound)); + self + } + fn total_get_request_count(&self) -> usize { self.get.read().iter().map(|(_, size)| size).sum() } @@ -686,13 +965,6 @@ mod tests { } } - /// [`MemCachedObjectStore`] implements most [`ObjectStore`] methods as a pass-through, since - /// caching is decided externally. The exception is `delete`, which will have the entry removed - /// from the cache if the delete to the object store was successful. - /// - /// GET-style methods will first check the cache for the object at the given path, before forwarding - /// to the inner [`ObjectStore`]. They do not, however, populate the cache after data has been fetched - /// from the inner store. #[async_trait] impl ObjectStore for TestObjectStore { async fn put(&self, location: &Path, bytes: PutPayload) -> object_store::Result<PutResult> { @@ -725,6 +997,10 @@ mod tests { async fn get(&self, location: &Path) -> object_store::Result<GetResult> { *self.get.write().entry(location.clone()).or_insert(0) += 1; + if let Some((inbound, outbound)) = &self.notifies { + outbound.notify_one(); + inbound.notified().await; + } self.inner.get(location).await } diff --git a/influxdb3_write/src/write_buffer/mod.rs b/influxdb3_write/src/write_buffer/mod.rs index 09284c935d..9f1a852e01 100644 --- a/influxdb3_write/src/write_buffer/mod.rs +++ b/influxdb3_write/src/write_buffer/mod.rs @@ -570,12 +570,13 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn writes_data_to_wal_and_is_queryable() { let object_store: Arc<dyn ObjectStore> = Arc::new(InMemory::new()); - let (object_store, parquet_cache) = test_cached_obj_store_and_oracle(object_store); + let time_provider: Arc<dyn TimeProvider> = + Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))); + let (object_store, parquet_cache) = + test_cached_obj_store_and_oracle(object_store, Arc::clone(&time_provider)); let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host")); let catalog = persister.load_or_create_catalog().await.unwrap(); let last_cache = LastCacheProvider::new_from_catalog(&catalog.clone_inner()).unwrap(); - let time_provider: Arc<dyn TimeProvider> = - Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))); let write_buffer = WriteBufferImpl::new( Arc::clone(&persister), Arc::new(catalog), @@ -1655,9 +1656,10 @@ mod tests { object_store: Arc<dyn ObjectStore>, wal_config: WalConfig, ) -> (WriteBufferImpl, IOxSessionContext) { - let (object_store, parquet_cache) = test_cached_obj_store_and_oracle(object_store); - let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host")); let time_provider: Arc<dyn TimeProvider> = Arc::new(MockProvider::new(start)); + let (object_store, parquet_cache) = + test_cached_obj_store_and_oracle(object_store, Arc::clone(&time_provider)); + let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host")); let catalog = persister.load_or_create_catalog().await.unwrap(); let last_cache = LastCacheProvider::new_from_catalog(&catalog.clone_inner()).unwrap(); let wbuf = WriteBufferImpl::new(
99f36f88897c5a3f1fce05881cf891ce4a7f9191
Marco Neumann
2022-10-25 09:16:16
unpatch clap V3 (#5971)
Upstream issue was solved by a new release.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
chore: unpatch clap V3 (#5971) Upstream issue was solved by a new release. Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/Cargo.lock b/Cargo.lock index 4d251f3d25..5c325f8962 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -609,8 +609,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.22" -source = "git+https://github.com/crepererum/clap.git?branch=crepererum/issue4418#6ca8aca7ba656f06358440e95ada08019073e1a5" +version = "3.2.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" dependencies = [ "bitflags", "clap_lex 0.2.4", @@ -675,7 +676,8 @@ dependencies = [ [[package]] name = "clap_lex" version = "0.2.4" -source = "git+https://github.com/crepererum/clap.git?branch=crepererum/issue4418#6ca8aca7ba656f06358440e95ada08019073e1a5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" dependencies = [ "os_str_bytes", ] @@ -933,7 +935,7 @@ dependencies = [ "atty", "cast", "ciborium", - "clap 3.2.22", + "clap 3.2.23", "criterion-plot", "futures", "itertools", diff --git a/Cargo.toml b/Cargo.toml index 387dd66db0..1a188495e7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -132,7 +132,3 @@ opt-level = 3 [profile.dev.package.similar] opt-level = 3 - -[patch.crates-io] -# See https://github.com/clap-rs/clap/issues/4418 -clap3 = { git = "https://github.com/crepererum/clap.git", branch = "crepererum/issue4418", package = "clap" }
0df6c7877c74ee15810c7502429819a95c5fe4f8
Dom Dwyer
2022-11-14 11:13:00
indirect DeferredLoad<TableName> init
Like the NamespaceNameProvider, this commit adds a TableNameProvider to provide decoupled initialisation of a DeferredLoad<TableName> instead of hard-coding in a catalog instance / query code, and plumbs it into position to be used when initialising a TableName.
null
refactor: indirect DeferredLoad<TableName> init Like the NamespaceNameProvider, this commit adds a TableNameProvider to provide decoupled initialisation of a DeferredLoad<TableName> instead of hard-coding in a catalog instance / query code, and plumbs it into position to be used when initialising a TableName.
diff --git a/ingester/src/data.rs b/ingester/src/data.rs index 7137d2cd1e..f58786d7f8 100644 --- a/ingester/src/data.rs +++ b/ingester/src/data.rs @@ -45,6 +45,7 @@ use self::{ namespace::name_resolver::{NamespaceNameProvider, NamespaceNameResolver}, partition::resolver::{CatalogPartitionResolver, PartitionCache, PartitionProvider}, shard::ShardData, + table::name_resolver::{TableNameProvider, TableNameResolver}, }; #[cfg(test)] @@ -204,6 +205,13 @@ impl IngesterData { backoff_config.clone(), )); + // Initialise the deferred table name resolver. + let table_name_provider: Arc<dyn TableNameProvider> = Arc::new(TableNameResolver::new( + TABLE_NAME_PRE_FETCH, + Arc::clone(&catalog), + backoff_config.clone(), + )); + let shards = shards .into_iter() .map(|(id, index)| { @@ -213,6 +221,7 @@ impl IngesterData { index, id, Arc::clone(&namespace_name_provider), + Arc::clone(&table_name_provider), Arc::clone(&partition_provider), Arc::clone(&metrics), ), @@ -356,7 +365,7 @@ impl Persister for IngesterData { // Begin resolving the load-deferred name concurrently if it is not // already available. - let table_name = Arc::clone(&table_data.table_name()); + let table_name = Arc::clone(table_data.table_name()); table_name.prefetch_now(); let partition = table_data.get_partition(partition_id).unwrap_or_else(|| { @@ -381,7 +390,7 @@ impl Persister for IngesterData { assert_eq!(guard.shard_id(), shard_id); assert_eq!(guard.namespace_id(), namespace_id); assert_eq!(guard.table_id(), table_id); - assert!(Arc::ptr_eq(&*guard.table_name(), &table_name)); + assert!(Arc::ptr_eq(guard.table_name(), &table_name)); partition_key = guard.partition_key().clone(); sort_key = guard.sort_key().clone(); @@ -711,7 +720,10 @@ mod tests { use super::*; use crate::{ - data::{namespace::NamespaceData, partition::resolver::CatalogPartitionResolver}, + data::{ + namespace::NamespaceData, partition::resolver::CatalogPartitionResolver, + table::name_resolver::mock::MockTableNameProvider, + }, deferred_load::DeferredLoad, lifecycle::{LifecycleConfig, LifecycleManager}, }; @@ -1515,6 +1527,7 @@ mod tests { let data = NamespaceData::new( namespace.id, DeferredLoad::new(Duration::from_millis(1), async { "foo".into() }), + Arc::new(MockTableNameProvider::new(table.name)), shard.id, partition_provider, &metrics, diff --git a/ingester/src/data/namespace.rs b/ingester/src/data/namespace.rs index 89776f9e91..fe5c613e8f 100644 --- a/ingester/src/data/namespace.rs +++ b/ingester/src/data/namespace.rs @@ -15,7 +15,7 @@ use write_summary::ShardProgress; use super::triggers::TestTriggers; use super::{ partition::resolver::PartitionProvider, - table::{TableData, TableName}, + table::{name_resolver::TableNameProvider, TableData, TableName}, TABLE_NAME_PRE_FETCH, }; use crate::{ @@ -60,7 +60,18 @@ pub(crate) struct NamespaceData { /// The catalog ID of the shard this namespace is being populated from. shard_id: ShardId, + /// A set of tables this [`NamespaceData`] instance has processed + /// [`DmlOperation`]'s for. + /// + /// The [`TableNameProvider`] acts as a [`DeferredLoad`] constructor to + /// resolve the [`TableName`] for new [`TableData`] out of the hot path. + /// + /// [`TableName`]: crate::data::table::TableName tables: ArcMap<TableId, TableData>, + #[allow(unused)] + table_name_resolver: Arc<dyn TableNameProvider>, + /// The count of tables initialised in this Ingester so far, across all + /// shards / namespaces. table_count: U64Counter, /// The resolver of `(shard_id, table_id, partition_key)` to @@ -120,6 +131,7 @@ impl NamespaceData { pub(super) fn new( namespace_id: NamespaceId, namespace_name: DeferredLoad<NamespaceName>, + table_name_resolver: Arc<dyn TableNameProvider>, shard_id: ShardId, partition_provider: Arc<dyn PartitionProvider>, metrics: &metric::Registry, @@ -136,6 +148,7 @@ impl NamespaceData { namespace_name, shard_id, tables: Default::default(), + table_name_resolver, table_count, buffering_sequence_number: RwLock::new(None), partition_provider, @@ -309,7 +322,10 @@ mod tests { use metric::{Attributes, Metric}; use crate::{ - data::partition::{resolver::MockPartitionProvider, PartitionData, SortKeyState}, + data::{ + partition::{resolver::MockPartitionProvider, PartitionData, SortKeyState}, + table::name_resolver::mock::MockTableNameProvider, + }, deferred_load, lifecycle::mock_handle::MockLifecycleHandle, test_util::{make_write_op, TEST_TABLE}, @@ -348,6 +364,7 @@ mod tests { let ns = NamespaceData::new( NAMESPACE_ID, DeferredLoad::new(Duration::from_millis(1), async { NAMESPACE_NAME.into() }), + Arc::new(MockTableNameProvider::new(TABLE_NAME)), SHARD_ID, partition_provider, &metrics, diff --git a/ingester/src/data/namespace/name_resolver.rs b/ingester/src/data/namespace/name_resolver.rs index 282bc50714..47bbbf6668 100644 --- a/ingester/src/data/namespace/name_resolver.rs +++ b/ingester/src/data/namespace/name_resolver.rs @@ -8,6 +8,8 @@ use crate::deferred_load::DeferredLoad; use super::NamespaceName; +/// An abstract provider of a [`DeferredLoad`] configured to fetch the +/// [`NamespaceName`] of the specified [`NamespaceId`]. pub(crate) trait NamespaceNameProvider: Send + Sync + std::fmt::Debug { fn for_namespace(&self, id: NamespaceId) -> DeferredLoad<NamespaceName>; } diff --git a/ingester/src/data/partition/resolver/trait.rs b/ingester/src/data/partition/resolver/trait.rs index c2481569a5..00d459729b 100644 --- a/ingester/src/data/partition/resolver/trait.rs +++ b/ingester/src/data/partition/resolver/trait.rs @@ -72,7 +72,7 @@ mod tests { shard_id, namespace_id, table_id, - table_name.clone(), + Arc::clone(&table_name), SortKeyState::Provided(None), None, ); diff --git a/ingester/src/data/shard.rs b/ingester/src/data/shard.rs index cea0c5d589..f8f4c29058 100644 --- a/ingester/src/data/shard.rs +++ b/ingester/src/data/shard.rs @@ -10,6 +10,7 @@ use write_summary::ShardProgress; use super::{ namespace::{name_resolver::NamespaceNameProvider, NamespaceData}, partition::resolver::PartitionProvider, + table::name_resolver::TableNameProvider, DmlApplyAction, }; use crate::{arcmap::ArcMap, lifecycle::LifecycleHandle}; @@ -39,6 +40,12 @@ pub(crate) struct ShardData { /// [`NamespaceName`]: data_types::NamespaceName namespaces: ArcMap<NamespaceId, NamespaceData>, namespace_name_resolver: Arc<dyn NamespaceNameProvider>, + /// The [`TableName`] provider used by [`NamespaceData`] to initialise a + /// [`TableData`]. + /// + /// [`TableName`]: crate::data::table::TableName + /// [`TableData`]: crate::data::table::TableData + table_name_resolver: Arc<dyn TableNameProvider>, metrics: Arc<metric::Registry>, namespace_count: U64Counter, @@ -50,6 +57,7 @@ impl ShardData { shard_index: ShardIndex, shard_id: ShardId, namespace_name_resolver: Arc<dyn NamespaceNameProvider>, + table_name_resolver: Arc<dyn TableNameProvider>, partition_provider: Arc<dyn PartitionProvider>, metrics: Arc<metric::Registry>, ) -> Self { @@ -65,6 +73,7 @@ impl ShardData { shard_id, namespaces: Default::default(), namespace_name_resolver, + table_name_resolver, metrics, partition_provider, namespace_count, @@ -86,6 +95,7 @@ impl ShardData { Arc::new(NamespaceData::new( namespace_id, self.namespace_name_resolver.for_namespace(namespace_id), + Arc::clone(&self.table_name_resolver), self.shard_id, Arc::clone(&self.partition_provider), &self.metrics, @@ -131,7 +141,7 @@ mod tests { data::{ namespace::name_resolver::mock::MockNamespaceNameProvider, partition::{resolver::MockPartitionProvider, PartitionData, SortKeyState}, - table::TableName, + table::{name_resolver::mock::MockTableNameProvider, TableName}, }, deferred_load::DeferredLoad, lifecycle::mock_handle::MockLifecycleHandle, @@ -172,6 +182,7 @@ mod tests { SHARD_INDEX, SHARD_ID, Arc::new(MockNamespaceNameProvider::new(NAMESPACE_NAME)), + Arc::new(MockTableNameProvider::new(TABLE_NAME)), partition_provider, Arc::clone(&metrics), ); diff --git a/ingester/src/data/table.rs b/ingester/src/data/table.rs index c9fde6f822..ad757c79ad 100644 --- a/ingester/src/data/table.rs +++ b/ingester/src/data/table.rs @@ -1,5 +1,7 @@ //! Table level data buffer structures. +pub(crate) mod name_resolver; + use std::sync::Arc; use data_types::{NamespaceId, PartitionId, PartitionKey, SequenceNumber, ShardId, TableId}; diff --git a/ingester/src/data/table/name_resolver.rs b/ingester/src/data/table/name_resolver.rs new file mode 100644 index 0000000000..c3d6dc832c --- /dev/null +++ b/ingester/src/data/table/name_resolver.rs @@ -0,0 +1,140 @@ +use std::{sync::Arc, time::Duration}; + +use backoff::{Backoff, BackoffConfig}; +use data_types::TableId; +use iox_catalog::interface::Catalog; + +use crate::deferred_load::DeferredLoad; + +use super::TableName; + +/// An abstract provider of a [`DeferredLoad`] configured to fetch the +/// [`TableName`] of the specified [`TableId`]. +pub(crate) trait TableNameProvider: Send + Sync + std::fmt::Debug { + fn for_table(&self, id: TableId) -> DeferredLoad<TableName>; +} + +#[derive(Debug)] +pub(crate) struct TableNameResolver { + max_smear: Duration, + catalog: Arc<dyn Catalog>, + backoff_config: BackoffConfig, +} + +impl TableNameResolver { + pub(crate) fn new( + max_smear: Duration, + catalog: Arc<dyn Catalog>, + backoff_config: BackoffConfig, + ) -> Self { + Self { + max_smear, + catalog, + backoff_config, + } + } + + /// Fetch the [`TableName`] from the [`Catalog`] for specified + /// `table_id`, retrying endlessly when errors occur. + pub(crate) async fn fetch( + table_id: TableId, + catalog: Arc<dyn Catalog>, + backoff_config: BackoffConfig, + ) -> TableName { + Backoff::new(&backoff_config) + .retry_all_errors("fetch table name", || async { + let s = catalog + .repositories() + .await + .tables() + .get_by_id(table_id) + .await? + .expect("resolving table name for non-existent table id") + .name + .into(); + + Result::<_, iox_catalog::interface::Error>::Ok(s) + }) + .await + .expect("retry forever") + } +} + +impl TableNameProvider for TableNameResolver { + fn for_table(&self, id: TableId) -> DeferredLoad<TableName> { + DeferredLoad::new( + self.max_smear, + Self::fetch(id, Arc::clone(&self.catalog), self.backoff_config.clone()), + ) + } +} + +#[cfg(test)] +pub(crate) mod mock { + use super::*; + + #[derive(Debug)] + pub(crate) struct MockTableNameProvider { + name: TableName, + } + + impl MockTableNameProvider { + pub(crate) fn new(name: impl Into<TableName>) -> Self { + Self { name: name.into() } + } + } + + impl Default for MockTableNameProvider { + fn default() -> Self { + Self::new("bananas") + } + } + + impl TableNameProvider for MockTableNameProvider { + fn for_table(&self, _id: TableId) -> DeferredLoad<TableName> { + let name = self.name.clone(); + DeferredLoad::new(Duration::from_secs(1), async { name }) + } + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use data_types::ShardIndex; + use test_helpers::timeout::FutureTimeout; + + use crate::test_util::populate_catalog; + + use super::*; + + const SHARD_INDEX: ShardIndex = ShardIndex::new(24); + const TABLE_NAME: &str = "bananas"; + const NAMESPACE_NAME: &str = "platanos"; + + #[tokio::test] + async fn test_fetch() { + let metrics = Arc::new(metric::Registry::default()); + let backoff_config = BackoffConfig::default(); + let catalog: Arc<dyn Catalog> = + Arc::new(iox_catalog::mem::MemCatalog::new(Arc::clone(&metrics))); + + // Populate the catalog with the shard / namespace / table + let (_shard_id, _ns_id, table_id) = + populate_catalog(&*catalog, SHARD_INDEX, NAMESPACE_NAME, TABLE_NAME).await; + + let fetcher = Arc::new(TableNameResolver::new( + Duration::from_secs(10), + Arc::clone(&catalog), + backoff_config.clone(), + )); + + let got = fetcher + .for_table(table_id) + .get() + .with_timeout_panic(Duration::from_secs(5)) + .await; + assert_eq!(&**got, TABLE_NAME); + } +}
e3fc873b2e96d4fb50fd494e37c360bbeef7b2de
Luke Bond
2023-01-24 12:59:58
enable object store metrics on ingester2 (#6672)
Signed-off-by: Luke Bond <[email protected]>
Signed-off-by: Luke Bond <[email protected]>
feat: enable object store metrics on ingester2 (#6672) Signed-off-by: Luke Bond <[email protected]> Signed-off-by: Luke Bond <[email protected]>
diff --git a/influxdb_iox/src/commands/run/ingester2.rs b/influxdb_iox/src/commands/run/ingester2.rs index 980ce1dc11..ae0ffbd96a 100644 --- a/influxdb_iox/src/commands/run/ingester2.rs +++ b/influxdb_iox/src/commands/run/ingester2.rs @@ -7,11 +7,14 @@ use clap_blocks::{ run_config::RunConfig, }; use iox_query::exec::Executor; +use iox_time::{SystemProvider, TimeProvider}; use ioxd_common::{ server_type::{CommonServerState, CommonServerStateError}, Service, }; use ioxd_ingester2::create_ingester_server_type; +use object_store::DynObjectStore; +use object_store_metrics::ObjectStoreMetrics; use observability_deps::tracing::*; use parquet_file::storage::{ParquetStorage, StorageId}; use std::sync::Arc; @@ -90,6 +93,7 @@ pub async fn command(config: Config) -> Result<()> { } let common_state = CommonServerState::from_config(config.run_config.clone())?; + let time_provider = Arc::new(SystemProvider::new()) as Arc<dyn TimeProvider>; let metric_registry = setup_metric_registry(); let catalog = config @@ -104,6 +108,13 @@ pub async fn command(config: Config) -> Result<()> { let object_store = make_object_store(config.run_config.object_store_config()) .map_err(Error::ObjectStoreParsing)?; + // Decorate the object store with a metric recorder. + let object_store: Arc<DynObjectStore> = Arc::new(ObjectStoreMetrics::new( + object_store, + Arc::clone(&time_provider), + &metric_registry, + )); + let server_type = create_ingester_server_type( &common_state, catalog,
ca31c1eadefc562821acf0c16188d08214909c5e
Marco Neumann
2023-06-29 13:11:44
hook up tokio metrics (#8050)
* feat: metrics for main tokio runtime * feat: instrument executor tokio runtime ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
feat: hook up tokio metrics (#8050) * feat: metrics for main tokio runtime * feat: instrument executor tokio runtime --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/Cargo.lock b/Cargo.lock index f1e3f6378b..2fc4913dbd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1723,12 +1723,14 @@ version = "0.1.0" dependencies = [ "futures", "libc", + "metric", "observability_deps", "once_cell", "parking_lot 0.12.1", "pin-project", "tokio", "tokio-util", + "tokio_metrics_bridge", "workspace-hack", ] @@ -2555,6 +2557,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", + "tokio_metrics_bridge", "tonic", "trace_exporters", "trogging", @@ -2859,6 +2862,7 @@ dependencies = [ "indexmap 2.0.0", "insta", "itertools 0.11.0", + "metric", "object_store", "observability_deps", "once_cell", diff --git a/executor/Cargo.toml b/executor/Cargo.toml index ee95d1f50c..15ddc6fe27 100644 --- a/executor/Cargo.toml +++ b/executor/Cargo.toml @@ -7,12 +7,14 @@ license.workspace = true [dependencies] futures = "0.3" +metric = { path = "../metric" } observability_deps = { path = "../observability_deps" } once_cell = { version = "1.18", features = ["parking_lot"] } parking_lot = "0.12" pin-project = "1.1" tokio = { version = "1.29" } tokio-util = { version = "0.7.8" } +tokio_metrics_bridge = { path = "../tokio_metrics_bridge" } workspace-hack = { version = "0.1", path = "../workspace-hack" } # use libc on unix like platforms to set worker priority in DedicatedExecutor diff --git a/executor/src/lib.rs b/executor/src/lib.rs index 33af856044..0b15fe0366 100644 --- a/executor/src/lib.rs +++ b/executor/src/lib.rs @@ -15,6 +15,8 @@ unused_crate_dependencies )] +use metric::Registry; +use tokio_metrics_bridge::setup_tokio_metrics; // Workaround for "unused crate" lint false positives. use workspace_hack as _; @@ -178,8 +180,14 @@ impl std::fmt::Debug for DedicatedExecutor { } /// [`DedicatedExecutor`] for testing purposes. -static TESTING_EXECUTOR: Lazy<DedicatedExecutor> = - Lazy::new(|| DedicatedExecutor::new_inner("testing", NonZeroUsize::new(1).unwrap(), true)); +static TESTING_EXECUTOR: Lazy<DedicatedExecutor> = Lazy::new(|| { + DedicatedExecutor::new_inner( + "testing", + NonZeroUsize::new(1).unwrap(), + Arc::new(Registry::default()), + true, + ) +}); impl DedicatedExecutor { /// Creates a new `DedicatedExecutor` with a dedicated tokio @@ -198,12 +206,20 @@ impl DedicatedExecutor { /// drop a runtime in a context where blocking is not allowed. This /// happens when a runtime is dropped from within an asynchronous /// context.', .../tokio-1.4.0/src/runtime/blocking/shutdown.rs:51:21 - pub fn new(thread_name: &str, num_threads: NonZeroUsize) -> Self { - Self::new_inner(thread_name, num_threads, false) - } - - fn new_inner(thread_name: &str, num_threads: NonZeroUsize, testing: bool) -> Self { - let thread_name = thread_name.to_string(); + pub fn new( + thread_name: &'static str, + num_threads: NonZeroUsize, + metric_registry: Arc<Registry>, + ) -> Self { + Self::new_inner(thread_name, num_threads, metric_registry, false) + } + + fn new_inner( + thread_name: &'static str, + num_threads: NonZeroUsize, + metric_registry: Arc<Registry>, + testing: bool, + ) -> Self { let thread_counter = Arc::new(AtomicUsize::new(1)); let (tx_tasks, rx_tasks) = std::sync::mpsc::channel::<Task>(); @@ -226,6 +242,8 @@ impl DedicatedExecutor { .build() .expect("Creating tokio runtime"); + setup_tokio_metrics(runtime.metrics(), thread_name, metric_registry); + runtime.block_on(async move { // Dropping the tokio runtime only waits for tasks to yield not to complete // @@ -415,7 +433,7 @@ mod tests { async fn basic() { let barrier = Arc::new(Barrier::new(2)); - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(1).unwrap()); + let exec = exec(); let dedicated_task = exec.spawn(do_work(42, Arc::clone(&barrier))); // Note the dedicated task will never complete if it runs on @@ -433,7 +451,7 @@ mod tests { #[tokio::test] async fn basic_clone() { let barrier = Arc::new(Barrier::new(2)); - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(1).unwrap()); + let exec = exec(); // Run task on clone should work fine let dedicated_task = exec.clone().spawn(do_work(42, Arc::clone(&barrier))); barrier.wait(); @@ -445,7 +463,7 @@ mod tests { #[tokio::test] async fn drop_clone() { let barrier = Arc::new(Barrier::new(2)); - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(1).unwrap()); + let exec = exec(); drop(exec.clone()); @@ -467,7 +485,7 @@ mod tests { } } - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(1).unwrap()); + let exec = exec(); let _s = S(exec); // this must not lead to a double-panic and SIGILL @@ -479,7 +497,7 @@ mod tests { let barrier = Arc::new(Barrier::new(3)); // make an executor with two threads - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(2).unwrap()); + let exec = exec2(); let dedicated_task1 = exec.spawn(do_work(11, Arc::clone(&barrier))); let dedicated_task2 = exec.spawn(do_work(42, Arc::clone(&barrier))); @@ -495,7 +513,7 @@ mod tests { #[tokio::test] async fn worker_priority() { - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(2).unwrap()); + let exec = exec2(); let dedicated_task = exec.spawn(async move { get_current_thread_priority() }); @@ -506,7 +524,7 @@ mod tests { #[tokio::test] async fn tokio_spawn() { - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(2).unwrap()); + let exec = exec2(); // spawn a task that spawns to other tasks and ensure they run on the dedicated // executor @@ -534,7 +552,7 @@ mod tests { #[tokio::test] async fn panic_on_executor_str() { - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(1).unwrap()); + let exec = exec(); let dedicated_task = exec.spawn(async move { if true { panic!("At the disco, on the dedicated task scheduler"); @@ -555,7 +573,7 @@ mod tests { #[tokio::test] async fn panic_on_executor_string() { - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(1).unwrap()); + let exec = exec(); let dedicated_task = exec.spawn(async move { if true { panic!("{} {}", 1, 2); @@ -573,7 +591,7 @@ mod tests { #[tokio::test] async fn panic_on_executor_other() { - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(1).unwrap()); + let exec = exec(); let dedicated_task = exec.spawn(async move { if true { panic_any(1) @@ -594,7 +612,7 @@ mod tests { let barrier = Arc::new(Barrier::new(2)); let captured = Arc::clone(&barrier); - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(1).unwrap()); + let exec = exec(); let dedicated_task = exec.spawn(async move { tokio::time::sleep(tokio::time::Duration::from_millis(1)).await; do_work(42, captured).await @@ -612,7 +630,7 @@ mod tests { #[tokio::test] async fn executor_submit_task_after_shutdown() { - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(1).unwrap()); + let exec = exec(); // Simulate trying to submit tasks once executor has shutdown exec.shutdown(); @@ -630,7 +648,7 @@ mod tests { #[tokio::test] async fn executor_submit_task_after_clone_shutdown() { - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(1).unwrap()); + let exec = exec(); // shutdown the clone (but not the exec) exec.clone().join().await; @@ -650,14 +668,14 @@ mod tests { #[tokio::test] async fn executor_join() { - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(1).unwrap()); + let exec = exec(); // test it doesn't hang exec.join().await; } #[tokio::test] async fn executor_join2() { - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(1).unwrap()); + let exec = exec(); // test it doesn't hang exec.join().await; exec.join().await; @@ -666,7 +684,7 @@ mod tests { #[tokio::test] #[allow(clippy::redundant_clone)] async fn executor_clone_join() { - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(1).unwrap()); + let exec = exec(); // test it doesn't hang exec.clone().join().await; exec.clone().join().await; @@ -676,7 +694,7 @@ mod tests { #[tokio::test] async fn drop_receiver() { // create empty executor - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(1).unwrap()); + let exec = exec(); assert_eq!(exec.tasks(), 0); // create first blocked task @@ -707,7 +725,7 @@ mod tests { #[tokio::test] async fn detach_receiver() { // create empty executor - let exec = DedicatedExecutor::new("Test DedicatedExecutor", NonZeroUsize::new(1).unwrap()); + let exec = exec(); assert_eq!(exec.tasks(), 0); // create first task @@ -761,4 +779,20 @@ mod tests { .await .expect("Did not find expected num tasks within a second") } + + fn exec() -> DedicatedExecutor { + exec_with_threads(1) + } + + fn exec2() -> DedicatedExecutor { + exec_with_threads(2) + } + + fn exec_with_threads(threads: usize) -> DedicatedExecutor { + DedicatedExecutor::new( + "Test DedicatedExecutor", + NonZeroUsize::new(threads).unwrap(), + Arc::new(Registry::default()), + ) + } } diff --git a/influxdb_iox/Cargo.toml b/influxdb_iox/Cargo.toml index 7e04326777..204c9db66c 100644 --- a/influxdb_iox/Cargo.toml +++ b/influxdb_iox/Cargo.toml @@ -39,6 +39,7 @@ prost = { version = "0.11" } iox_query = { path = "../iox_query" } schema = { path = "../schema" } iox_time = { path = "../iox_time" } +tokio_metrics_bridge = { path = "../tokio_metrics_bridge" } trace_exporters = { path = "../trace_exporters" } trogging = { path = "../trogging", default-features = false, features = ["clap"] } wal = { version = "0.1", path = "../wal" } diff --git a/influxdb_iox/src/commands/run/all_in_one.rs b/influxdb_iox/src/commands/run/all_in_one.rs index 2c2e3519a0..6c5231b5ce 100644 --- a/influxdb_iox/src/commands/run/all_in_one.rs +++ b/influxdb_iox/src/commands/run/all_in_one.rs @@ -606,6 +606,7 @@ pub async fn command(config: Config) -> Result<()> { parquet_store_real.id(), Arc::clone(parquet_store_real.object_store()), )]), + metric_registry: Arc::clone(&metrics), mem_pool_size: querier_config.exec_mem_pool_bytes, })); diff --git a/influxdb_iox/src/commands/run/compactor.rs b/influxdb_iox/src/commands/run/compactor.rs index 5b90ca4571..efd0a844ff 100644 --- a/influxdb_iox/src/commands/run/compactor.rs +++ b/influxdb_iox/src/commands/run/compactor.rs @@ -112,6 +112,7 @@ pub async fn command(config: Config) -> Result<(), Error> { .into_iter() .map(|store| (store.id(), Arc::clone(store.object_store()))) .collect(), + metric_registry: Arc::clone(&metric_registry), mem_pool_size: config.compactor_config.exec_mem_pool_bytes, })); let time_provider = Arc::new(SystemProvider::new()); diff --git a/influxdb_iox/src/commands/run/ingester.rs b/influxdb_iox/src/commands/run/ingester.rs index e37d2c6e8f..80cf23a028 100644 --- a/influxdb_iox/src/commands/run/ingester.rs +++ b/influxdb_iox/src/commands/run/ingester.rs @@ -104,6 +104,7 @@ pub async fn command(config: Config) -> Result<()> { let exec = Arc::new(Executor::new( config.exec_thread_count, config.exec_mem_pool_bytes, + Arc::clone(&metric_registry), )); let object_store = make_object_store(config.run_config.object_store_config()) .map_err(Error::ObjectStoreParsing)?; diff --git a/influxdb_iox/src/commands/run/querier.rs b/influxdb_iox/src/commands/run/querier.rs index ccfed9a637..df2a30581d 100644 --- a/influxdb_iox/src/commands/run/querier.rs +++ b/influxdb_iox/src/commands/run/querier.rs @@ -104,6 +104,7 @@ pub async fn command(config: Config) -> Result<(), Error> { let exec = Arc::new(Executor::new( num_threads, config.querier_config.exec_mem_pool_bytes, + Arc::clone(&metric_registry), )); let server_type = create_querier_server_type(QuerierServerTypeArgs { diff --git a/influxdb_iox/src/process_info.rs b/influxdb_iox/src/process_info.rs index 95247c1955..1cf25a081f 100644 --- a/influxdb_iox/src/process_info.rs +++ b/influxdb_iox/src/process_info.rs @@ -3,6 +3,8 @@ use std::sync::Arc; use iox_time::{SystemProvider, Time, TimeProvider}; use metric::U64Gauge; use once_cell::sync::Lazy; +use tokio::runtime::Handle; +use tokio_metrics_bridge::setup_tokio_metrics; /// Package version. pub static IOX_VERSION: Lazy<&'static str> = @@ -51,6 +53,9 @@ pub fn setup_metric_registry() -> Arc<metric::Registry> { #[cfg(all(not(feature = "heappy"), feature = "jemalloc_replacing_malloc"))] registry.register_instrument("jemalloc_metrics", crate::jemalloc::JemallocMetrics::new); + // Register tokio metric for main runtime + setup_tokio_metrics(Handle::current().metrics(), "main", Arc::clone(&registry)); + registry } diff --git a/iox_query/Cargo.toml b/iox_query/Cargo.toml index 388680b2e7..5b2a9b6dbc 100644 --- a/iox_query/Cargo.toml +++ b/iox_query/Cargo.toml @@ -27,6 +27,7 @@ futures = "0.3" hashbrown = { workspace = true } indexmap = { version = "2.0", features = ["std"] } itertools = "0.11.0" +metric = { path = "../metric" } object_store = { workspace = true } observability_deps = { path = "../observability_deps" } once_cell = "1" diff --git a/iox_query/src/exec.rs b/iox_query/src/exec.rs index d3422802ee..eea940b118 100644 --- a/iox_query/src/exec.rs +++ b/iox_query/src/exec.rs @@ -13,6 +13,7 @@ pub(crate) mod split; pub mod stringset; use datafusion_util::config::register_iox_object_store; use executor::DedicatedExecutor; +use metric::Registry; use object_store::DynObjectStore; use parquet_file::storage::StorageId; mod cross_rt_stream; @@ -47,6 +48,9 @@ pub struct ExecutorConfig { /// Object stores pub object_stores: HashMap<StorageId, Arc<DynObjectStore>>, + /// Metric registry + pub metric_registry: Arc<Registry>, + /// Memory pool size in bytes. pub mem_pool_size: usize, } @@ -75,9 +79,10 @@ pub struct DedicatedExecutors { } impl DedicatedExecutors { - pub fn new(num_threads: NonZeroUsize) -> Self { - let query_exec = DedicatedExecutor::new("IOx Query", num_threads); - let reorg_exec = DedicatedExecutor::new("IOx Reorg", num_threads); + pub fn new(num_threads: NonZeroUsize, metric_registry: Arc<Registry>) -> Self { + let query_exec = + DedicatedExecutor::new("IOx Query", num_threads, Arc::clone(&metric_registry)); + let reorg_exec = DedicatedExecutor::new("IOx Reorg", num_threads, metric_registry); Self { query_exec, @@ -136,18 +141,26 @@ pub enum ExecutorType { impl Executor { /// Creates a new executor with a two dedicated thread pools, each /// with num_threads - pub fn new(num_threads: NonZeroUsize, mem_pool_size: usize) -> Self { + pub fn new( + num_threads: NonZeroUsize, + mem_pool_size: usize, + metric_registry: Arc<Registry>, + ) -> Self { Self::new_with_config(ExecutorConfig { num_threads, target_query_partitions: num_threads, object_stores: HashMap::default(), + metric_registry, mem_pool_size, }) } /// Create new executor based on a specific config. pub fn new_with_config(config: ExecutorConfig) -> Self { - let executors = Arc::new(DedicatedExecutors::new(config.num_threads)); + let executors = Arc::new(DedicatedExecutors::new( + config.num_threads, + Arc::clone(&config.metric_registry), + )); Self::new_with_config_and_executors(config, executors) } @@ -158,6 +171,7 @@ impl Executor { num_threads: NonZeroUsize::new(1).unwrap(), target_query_partitions: NonZeroUsize::new(1).unwrap(), object_stores: HashMap::default(), + metric_registry: Arc::new(Registry::default()), mem_pool_size: 1024 * 1024 * 1024, // 1GB }; let executors = Arc::new(DedicatedExecutors::new_testing()); diff --git a/iox_tests/src/catalog.rs b/iox_tests/src/catalog.rs index 1aa3922a9d..8cf760d77e 100644 --- a/iox_tests/src/catalog.rs +++ b/iox_tests/src/catalog.rs @@ -91,6 +91,7 @@ impl TestCatalog { parquet_store.id(), Arc::clone(parquet_store.object_store()), )]), + metric_registry: Arc::clone(&metric_registry), mem_pool_size: 1024 * 1024 * 1024, }, exec,
7e64264eef9e4400b34fd8765c919c4887d1cee8
Marco Neumann
2023-05-17 11:30:04
remove `RedudantSort` optimizer pass (#7809)
* test: add dedup test for multiple partitions and ranges * refactor: remove `RedudantSort` optimizer pass Similar to #7807 this is now covered by DataFusion, as demonstrated by the fact that all query tests (incl. explain tests) still pass. The good thing is: passes that are no longer required don't require any upstreaming, so this also closes #7411.
null
refactor: remove `RedudantSort` optimizer pass (#7809) * test: add dedup test for multiple partitions and ranges * refactor: remove `RedudantSort` optimizer pass Similar to #7807 this is now covered by DataFusion, as demonstrated by the fact that all query tests (incl. explain tests) still pass. The good thing is: passes that are no longer required don't require any upstreaming, so this also closes #7411.
diff --git a/influxdb_iox/tests/query_tests/cases.rs b/influxdb_iox/tests/query_tests/cases.rs index 8de8d94337..b7ca055e4b 100644 --- a/influxdb_iox/tests/query_tests/cases.rs +++ b/influxdb_iox/tests/query_tests/cases.rs @@ -145,6 +145,18 @@ async fn duplicates_parquet_50() { .await; } +#[tokio::test] +async fn duplicates_different_domains() { + test_helpers::maybe_start_logging(); + + TestCase { + input: "cases/in/duplicates_different_domains.sql", + chunk_stage: ChunkStage::Parquet, + } + .run() + .await; +} + #[tokio::test] async fn gapfill() { test_helpers::maybe_start_logging(); diff --git a/influxdb_iox/tests/query_tests/cases/in/duplicates_different_domains.sql b/influxdb_iox/tests/query_tests/cases/in/duplicates_different_domains.sql new file mode 100644 index 0000000000..c11074c501 --- /dev/null +++ b/influxdb_iox/tests/query_tests/cases/in/duplicates_different_domains.sql @@ -0,0 +1,7 @@ +-- Test for dedup across different domains (like time range, partitions, et.c) +-- IOX_SETUP: DuplicateDifferentDomains + +select * from m order by time; + +-- IOX_COMPARE: uuid +explain select * from m order by time; diff --git a/influxdb_iox/tests/query_tests/cases/in/duplicates_different_domains.sql.expected b/influxdb_iox/tests/query_tests/cases/in/duplicates_different_domains.sql.expected new file mode 100644 index 0000000000..5afae58e71 --- /dev/null +++ b/influxdb_iox/tests/query_tests/cases/in/duplicates_different_domains.sql.expected @@ -0,0 +1,35 @@ +-- Test Setup: DuplicateDifferentDomains +-- SQL: select * from m order by time; ++-----+-----+--------------------------------+ +| f | tag | time | ++-----+-----+--------------------------------+ +| 1.0 | A | 1970-01-01T00:00:00Z | +| 3.0 | A | 1970-01-01T00:00:00.000000001Z | +| 2.0 | A | 1970-01-02T00:00:00Z | ++-----+-----+--------------------------------+ +-- SQL: explain select * from m order by time; +-- Results After Normalizing UUIDs +---------- +| plan_type | plan | +---------- +| logical_plan | Sort: m.time ASC NULLS LAST | +| | TableScan: m projection=[f, tag, time] | +| physical_plan | SortPreservingMergeExec: [time@2 ASC NULLS LAST] | +| | UnionExec | +| | SortExec: expr=[time@2 ASC NULLS LAST] | +| | ProjectionExec: expr=[f@1 as f, tag@2 as tag, time@3 as time] | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC,__chunk_order@0 ASC] | +| | ParquetExec: file_groups={2 groups: [[1/1/1/00000000-0000-0000-0000-000000000000.parquet], [1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, projection=[__chunk_order, f, tag, time], output_ordering=[tag@2 ASC, time@3 ASC, __chunk_order@0 ASC] | +| | SortExec: expr=[time@2 ASC NULLS LAST] | +| | ProjectionExec: expr=[f@1 as f, tag@2 as tag, time@3 as time] | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC,__chunk_order@0 ASC] | +| | ParquetExec: file_groups={2 groups: [[1/1/1/00000000-0000-0000-0000-000000000002.parquet], [1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, projection=[__chunk_order, f, tag, time], output_ordering=[tag@2 ASC, time@3 ASC, __chunk_order@0 ASC] | +| | SortExec: expr=[time@2 ASC NULLS LAST] | +| | ProjectionExec: expr=[f@1 as f, tag@2 as tag, time@3 as time] | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC,__chunk_order@0 ASC] | +| | ParquetExec: file_groups={2 groups: [[1/1/1/00000000-0000-0000-0000-000000000004.parquet], [1/1/1/00000000-0000-0000-0000-000000000005.parquet]]}, projection=[__chunk_order, f, tag, time], output_ordering=[tag@2 ASC, time@3 ASC, __chunk_order@0 ASC] | +| | | +---------- \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests/setups.rs b/influxdb_iox/tests/query_tests/setups.rs index 0649fcb071..44946eed8c 100644 --- a/influxdb_iox/tests/query_tests/setups.rs +++ b/influxdb_iox/tests/query_tests/setups.rs @@ -1358,6 +1358,37 @@ pub static SETUPS: Lazy<HashMap<SetupName, SetupSteps>> = Lazy::new(|| { }, ], ), + ( + "DuplicateDifferentDomains", + (0..2) + .flat_map(|_| { + [ + Step::RecordNumParquetFiles, + Step::WriteLineProtocol( + r#" + m,tag=A f=1 0 + m,tag=A f=2 86400000000000 + "#.into(), + ), + Step::Persist, + Step::WaitForPersisted2 { + expected_increase: 2, + }, + Step::RecordNumParquetFiles, + Step::WriteLineProtocol( + r#" + m,tag=A f=3 1 + "#.into(), + ), + Step::Persist, + Step::WaitForPersisted2 { + expected_increase: 1, + }, + ] + .into_iter() + }) + .collect::<Vec<_>>(), + ), ]) }); diff --git a/iox_query/src/physical_optimizer/mod.rs b/iox_query/src/physical_optimizer/mod.rs index 92198f6aeb..40800a2359 100644 --- a/iox_query/src/physical_optimizer/mod.rs +++ b/iox_query/src/physical_optimizer/mod.rs @@ -10,7 +10,7 @@ use self::{ }, predicate_pushdown::PredicatePushdown, projection_pushdown::ProjectionPushdown, - sort::{parquet_sortness::ParquetSortness, redundant_sort::RedundantSort}, + sort::parquet_sortness::ParquetSortness, union::{nested_union::NestedUnion, one_union::OneUnion}, }; @@ -43,7 +43,6 @@ pub fn register_iox_physical_optimizers(state: SessionState) -> SessionState { Arc::new(OneUnion::default()), ]; optimizers.append(&mut state.physical_optimizers().to_vec()); - optimizers.extend([Arc::new(RedundantSort::default()) as _]); state.with_physical_optimizer_rules(optimizers) } diff --git a/iox_query/src/physical_optimizer/sort/mod.rs b/iox_query/src/physical_optimizer/sort/mod.rs index 5e174342eb..f5f250636a 100644 --- a/iox_query/src/physical_optimizer/sort/mod.rs +++ b/iox_query/src/physical_optimizer/sort/mod.rs @@ -3,4 +3,3 @@ //! [`SortExec`]: datafusion::physical_plan::sorts::sort::SortExec pub mod parquet_sortness; -pub mod redundant_sort; diff --git a/iox_query/src/physical_optimizer/sort/redundant_sort.rs b/iox_query/src/physical_optimizer/sort/redundant_sort.rs deleted file mode 100644 index 9638748c00..0000000000 --- a/iox_query/src/physical_optimizer/sort/redundant_sort.rs +++ /dev/null @@ -1,142 +0,0 @@ -use std::sync::Arc; - -use datafusion::{ - common::tree_node::{Transformed, TreeNode}, - config::ConfigOptions, - error::Result, - physical_optimizer::PhysicalOptimizerRule, - physical_plan::{sorts::sort::SortExec, ExecutionPlan}, -}; - -/// Removes [`SortExec`] if it is no longer needed. -#[derive(Debug, Default)] -pub struct RedundantSort; - -impl PhysicalOptimizerRule for RedundantSort { - fn optimize( - &self, - plan: Arc<dyn ExecutionPlan>, - _config: &ConfigOptions, - ) -> Result<Arc<dyn ExecutionPlan>> { - plan.transform_down(&|plan| { - let plan_any = plan.as_any(); - - if let Some(sort_exec) = plan_any.downcast_ref::<SortExec>() { - let child = sort_exec.input(); - - if child.output_ordering() == Some(sort_exec.expr()) { - return Ok(Transformed::Yes(Arc::clone(child))); - } - } - - Ok(Transformed::No(plan)) - }) - } - - fn name(&self) -> &str { - "redundant_sort" - } - - fn schema_check(&self) -> bool { - true - } -} - -#[cfg(test)] -mod tests { - use arrow::datatypes::{DataType, Field, Schema, SchemaRef}; - use datafusion::{ - datasource::object_store::ObjectStoreUrl, - physical_expr::PhysicalSortExpr, - physical_plan::{ - expressions::Column, - file_format::{FileScanConfig, ParquetExec}, - Statistics, - }, - }; - - use crate::physical_optimizer::test_util::OptimizationTest; - - use super::*; - - #[test] - fn test_not_redundant() { - let schema = schema(); - let input = Arc::new(ParquetExec::new( - FileScanConfig { - object_store_url: ObjectStoreUrl::parse("test://").unwrap(), - file_schema: Arc::clone(&schema), - file_groups: vec![], - statistics: Statistics::default(), - projection: None, - limit: None, - table_partition_cols: vec![], - output_ordering: None, - infinite_source: false, - }, - None, - None, - )); - let plan = Arc::new(SortExec::new(sort_expr(schema.as_ref()), input).with_fetch(Some(10))); - let opt = RedundantSort::default(); - insta::assert_yaml_snapshot!( - OptimizationTest::new(plan, opt), - @r###" - --- - input: - - " SortExec: fetch=10, expr=[col@0 ASC]" - - " ParquetExec: file_groups={0 groups: []}, projection=[col]" - output: - Ok: - - " SortExec: fetch=10, expr=[col@0 ASC]" - - " ParquetExec: file_groups={0 groups: []}, projection=[col]" - "### - ); - } - - #[test] - fn test_redundant() { - let schema = schema(); - let sort_expr = sort_expr(schema.as_ref()); - let input = Arc::new(ParquetExec::new( - FileScanConfig { - object_store_url: ObjectStoreUrl::parse("test://").unwrap(), - file_schema: Arc::clone(&schema), - file_groups: vec![], - statistics: Statistics::default(), - projection: None, - limit: None, - table_partition_cols: vec![], - output_ordering: Some(sort_expr.clone()), - infinite_source: false, - }, - None, - None, - )); - let plan = Arc::new(SortExec::new(sort_expr, input).with_fetch(Some(10))); - let opt = RedundantSort::default(); - insta::assert_yaml_snapshot!( - OptimizationTest::new(plan, opt), - @r###" - --- - input: - - " SortExec: fetch=10, expr=[col@0 ASC]" - - " ParquetExec: file_groups={0 groups: []}, projection=[col], output_ordering=[col@0 ASC]" - output: - Ok: - - " ParquetExec: file_groups={0 groups: []}, projection=[col], output_ordering=[col@0 ASC]" - "### - ); - } - - fn sort_expr(schema: &Schema) -> Vec<PhysicalSortExpr> { - vec![PhysicalSortExpr { - expr: Arc::new(Column::new_with_schema("col", schema).unwrap()), - options: Default::default(), - }] - } - - fn schema() -> SchemaRef { - Arc::new(Schema::new(vec![Field::new("col", DataType::Int64, false)])) - } -}
0c36c60d66487b0ed47d4d34988be76ed43b2d5a
Andrew Lamb
2023-03-16 15:27:21
simplfy parameterized statement generation (#7232)
* docs: Add doc link to command enum * refactor(flightsql): simplfy parameterized statement generation ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
refactor(flightsql): simplfy parameterized statement generation (#7232) * docs: Add doc link to command enum * refactor(flightsql): simplfy parameterized statement generation --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/flightsql/src/cmd.rs b/flightsql/src/cmd.rs index 782dfbc256..283799587a 100644 --- a/flightsql/src/cmd.rs +++ b/flightsql/src/cmd.rs @@ -61,6 +61,9 @@ impl From<PreparedStatementHandle> for Bytes { /// /// Handles encoding/decoding prost::Any messages back /// and forth to native Rust types +/// +/// TODO use / contribute upstream arrow-flight implementation, when ready: +/// <https://github.com/apache/arrow-rs/issues/3874> #[derive(Debug, Clone, PartialEq)] pub enum FlightSQLCommand { CommandStatementQuery(CommandStatementQuery), diff --git a/flightsql/src/planner.rs b/flightsql/src/planner.rs index f64aba91e4..5cb3cd8313 100644 --- a/flightsql/src/planner.rs +++ b/flightsql/src/planner.rs @@ -218,41 +218,20 @@ async fn plan_get_db_schemas( catalog: Option<String>, db_schema_filter_pattern: Option<String>, ) -> Result<LogicalPlan> { - let (query, params) = match (catalog, db_schema_filter_pattern) { - (Some(catalog), Some(db_schema_filter_pattern)) => ( - "PREPARE my_plan(VARCHAR, VARCHAR) AS \ - SELECT DISTINCT table_catalog AS catalog_name, table_schema AS db_schema_name \ - FROM information_schema.tables \ - WHERE table_catalog like $1 AND table_schema like $2 \ - ORDER BY table_catalog, table_schema", - vec![ - ScalarValue::Utf8(Some(catalog)), - ScalarValue::Utf8(Some(db_schema_filter_pattern)), - ], - ), - (None, Some(db_schema_filter_pattern)) => ( - "PREPARE my_plan(VARCHAR) AS \ - SELECT DISTINCT table_catalog AS catalog_name, table_schema AS db_schema_name \ - FROM information_schema.tables \ - WHERE table_schema like $1 \ - ORDER BY table_catalog, table_schema", - vec![ScalarValue::Utf8(Some(db_schema_filter_pattern))], - ), - (Some(catalog), None) => ( - "PREPARE my_plan(VARCHAR) AS \ - SELECT DISTINCT table_catalog AS catalog_name, table_schema AS db_schema_name \ - FROM information_schema.tables \ - WHERE table_catalog like $1 \ - ORDER BY table_catalog, table_schema", - vec![ScalarValue::Utf8(Some(catalog))], - ), - (None, None) => ( - "SELECT DISTINCT table_catalog AS catalog_name, table_schema AS db_schema_name \ - FROM information_schema.tables \ - ORDER BY table_catalog, table_schema", - vec![], - ), - }; + // use '%' to match anything if filters are not specified + let catalog = catalog.unwrap_or_else(|| String::from("%")); + let db_schema_filter_pattern = db_schema_filter_pattern.unwrap_or_else(|| String::from("%")); + + let query = "PREPARE my_plan(VARCHAR, VARCHAR) AS \ + SELECT DISTINCT table_catalog AS catalog_name, table_schema AS db_schema_name \ + FROM information_schema.tables \ + WHERE table_catalog like $1 AND table_schema like $2 \ + ORDER BY table_catalog, table_schema"; + + let params = vec![ + ScalarValue::Utf8(Some(catalog)), + ScalarValue::Utf8(Some(db_schema_filter_pattern)), + ]; let plan = ctx.plan_sql(query).await?; debug!(?plan, "Prepared plan is");
b2052c1b813aba6832341f1f6b6cdcc614ed9f5e
Andrew Lamb
2023-03-02 12:24:22
add documentation about the compactor2 file level invariants (#7101)
* chore(compactor2): add documentation about the compactor2 file level invariants * fix: Fix doc links ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
chore(compactor2): add documentation about the compactor2 file level invariants (#7101) * chore(compactor2): add documentation about the compactor2 file level invariants * fix: Fix doc links --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/compactor2/src/lib.rs b/compactor2/src/lib.rs index 1b5bb9818f..9c6884d6c2 100644 --- a/compactor2/src/lib.rs +++ b/compactor2/src/lib.rs @@ -31,6 +31,26 @@ //! computation. Derived from *O6* and *O7*. //! //! +//! # File Levels +//! +//! Each parquet file has a `compaction_level` that the compactor uses +//! to optimize its choice of what files to compact. There are three levels: +//! * `L0`: [`data_types::CompactionLevel::Initial`] +//! * `L1`: [`data_types::CompactionLevel::FileNonOverlapped`] +//! * `L2`: [`data_types::CompactionLevel::Final`]. +//! +//! The compactor maintains the following invariants with levels: +//! +//! 1. The ingester writes all new data as `L0` files. +//! 2. The compactor creates `L1` and `L2` files. +//! 3. `L1` files never overlap with other `L1` files. +//! 4. `L2` files never overlap with other `L2` files. +//! 5. `L0` files can overlap with each other and any `L1` or `L2` files. +//! 6. `L1` files can overlap with `L2` files. +//! +//! Over time the compactor aims to rearrange data in all partitions +//! into a small number of large `L2` files. +//! //! # Crate Layout //! //! This crate tries to decouple "when to do what" from "how to do what". The "when" is described by the [driver] which
701da1363cb2ba4250a434b49a253fbec38347c8
Dom Dwyer
2023-07-10 14:10:03
remove panic on impossible error
Remove the logical complexity of error handling for an error that cannot occur. This was an artifact of pre-PR refactoring - the error being returned SHOULD never be reached, as the only error returned is the "your message is too big" error, and that's not possible because the message size is validated in the GossipHandle::broadcast() method before it reaches the reactor.
null
refactor: remove panic on impossible error Remove the logical complexity of error handling for an error that cannot occur. This was an artifact of pre-PR refactoring - the error being returned SHOULD never be reached, as the only error returned is the "your message is too big" error, and that's not possible because the message size is validated in the GossipHandle::broadcast() method before it reaches the reactor.
diff --git a/gossip/src/reactor.rs b/gossip/src/reactor.rs index d547df6d6e..990b8d3eda 100644 --- a/gossip/src/reactor.rs +++ b/gossip/src/reactor.rs @@ -213,15 +213,18 @@ where // The user is guaranteed MAX_USER_PAYLOAD_BYTES to // be send-able, so send this frame without packing // others with it for simplicity. - if populate_frame( + populate_frame( &mut self.cached_frame, vec![new_payload(Payload::UserData(proto::UserPayload{payload}))], &mut self.serialisation_buf - ).is_err() - { - continue - } - self.peer_list.broadcast(&self.serialisation_buf, &self.socket, &self.metric_frames_sent, &self.metric_bytes_sent).await; + ).expect("size validated in handle at enqueue time"); + + self.peer_list.broadcast( + &self.serialisation_buf, + &self.socket, + &self.metric_frames_sent, + &self.metric_bytes_sent + ).await; } } }
4fb1b1948aac463bce39d58cf010b3668b9a5c55
Marco Neumann
2023-02-06 10:59:33
throttle partitions that do not receive commits (#6831)
* feat: throttle partitions that do not receive commits * test: add failing test * fix: partition ID in "unique" combo * fix: partition ID in "throttle" combo * docs: improve Co-authored-by: Dom <[email protected]> ---------
Co-authored-by: Dom <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
feat: throttle partitions that do not receive commits (#6831) * feat: throttle partitions that do not receive commits * test: add failing test * fix: partition ID in "unique" combo * fix: partition ID in "throttle" combo * docs: improve Co-authored-by: Dom <[email protected]> --------- Co-authored-by: Dom <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/compactor2/src/compactor.rs b/compactor2/src/compactor.rs index 6b328e3364..8a7bdd0bfd 100644 --- a/compactor2/src/compactor.rs +++ b/compactor2/src/compactor.rs @@ -58,7 +58,6 @@ impl Compactor2 { _ = async { loop { compact(config.partition_concurrency, config.partition_timeout, Arc::clone(&job_semaphore), &components).await; - // TODO: implement throttling if there was no work to do } } => unreachable!(), } diff --git a/compactor2/src/components/combos/mod.rs b/compactor2/src/components/combos/mod.rs index 944bec0265..efaf6439ec 100644 --- a/compactor2/src/components/combos/mod.rs +++ b/compactor2/src/components/combos/mod.rs @@ -1,3 +1,7 @@ //! Combinations of multiple components that together can achieve one goal. +pub mod throttle_partition; pub mod unique_partitions; + +#[cfg(test)] +mod tests; diff --git a/compactor2/src/components/combos/tests.rs b/compactor2/src/components/combos/tests.rs new file mode 100644 index 0000000000..5486bea6fd --- /dev/null +++ b/compactor2/src/components/combos/tests.rs @@ -0,0 +1,66 @@ +use std::{sync::Arc, time::Duration}; + +use data_types::{CompactionLevel, PartitionId}; +use iox_time::{MockProvider, Time}; + +use crate::components::{ + combos::{throttle_partition::throttle_partition, unique_partitions::unique_partitions}, + commit::{mock::MockCommit, Commit}, + partition_done_sink::{mock::MockPartitionDoneSink, PartitionDoneSink}, + partitions_source::{mock::MockPartitionsSource, PartitionsSource}, +}; + +#[tokio::test] +async fn test_unique_and_throttle() { + let inner_source = Arc::new(MockPartitionsSource::new(vec![ + PartitionId::new(1), + PartitionId::new(2), + PartitionId::new(3), + ])); + let inner_commit = Arc::new(MockCommit::new()); + let inner_sink = Arc::new(MockPartitionDoneSink::new()); + let time_provider = Arc::new(MockProvider::new(Time::MIN)); + + let (source, sink) = unique_partitions(Arc::clone(&inner_source), Arc::clone(&inner_sink), 1); + let (source, commit, sink) = throttle_partition( + source, + Arc::clone(&inner_commit), + sink, + Arc::clone(&time_provider) as _, + Duration::from_secs(1), + 1, + ); + + assert_eq!( + source.fetch().await, + vec![ + PartitionId::new(1), + PartitionId::new(2), + PartitionId::new(3) + ], + ); + + assert_eq!(source.fetch().await, vec![],); + + commit + .commit(PartitionId::new(1), &[], &[], &[], CompactionLevel::Initial) + .await; + sink.record(PartitionId::new(1), Ok(())).await; + sink.record(PartitionId::new(2), Ok(())).await; + + inner_source.set(vec![ + PartitionId::new(1), + PartitionId::new(2), + PartitionId::new(3), + PartitionId::new(4), + ]); + + assert_eq!( + source.fetch().await, + vec![PartitionId::new(1), PartitionId::new(4)], + ); + + time_provider.inc(Duration::from_secs(1)); + + assert_eq!(source.fetch().await, vec![PartitionId::new(2)],); +} diff --git a/compactor2/src/components/combos/throttle_partition.rs b/compactor2/src/components/combos/throttle_partition.rs new file mode 100644 index 0000000000..874d6a7c85 --- /dev/null +++ b/compactor2/src/components/combos/throttle_partition.rs @@ -0,0 +1,511 @@ +//! Throttle partions that receive no commits. + +use std::{ + collections::HashMap, + fmt::Display, + sync::{Arc, Mutex}, + time::Duration, +}; + +use async_trait::async_trait; +use data_types::{CompactionLevel, ParquetFileId, ParquetFileParams, PartitionId}; +use futures::StreamExt; +use iox_time::{Time, TimeProvider}; + +use crate::components::{ + commit::Commit, partition_done_sink::PartitionDoneSink, partitions_source::PartitionsSource, +}; + +/// Ensures that partitions that do not receive any commits are throttled. +/// +/// This may happen because our catalog query detects that the partition receives writes but the comapctor already +/// finished all the outstandign work. +/// +/// This should be used as a wrapper around the actual [`PartitionsSource`] & [`Commit`] & [`PartitionDoneSink`] and will setup of +/// the following stream layout: +/// +/// ```text +/// +--------------------------------------------+ +/// | | +/// | (5) | +/// | ^ | +/// | | | +/// | +.................................(4) | +/// | : ^ | +/// | V | V +/// (1)====>(2)====>[concurrent processing]---->(3)---->(6)---->(7) +/// ^ : +/// : : +/// : : +/// +...........................................+ +/// ``` +/// +/// | Step | Name | Type | Description | +/// | ---- | --------------------- | ----------------------------------------------------------------- | ----------- | +/// | 1 | **Actual source** | `inner_source`/`T1`/[`PartitionsSource`], wrapped | This is the actual source. | +/// | 2 | **Throttling source** | [`ThrottlePartitionsSourceWrapper`], wraps `inner_source`/`T1` | Throttles partitions that do not receive any commits | +/// | 3 | **Critical section** | -- | The actual partition processing | +/// | 4 | **Throttle commit** | [`ThrottleCommitWrapper`], wraps `inner_commit`/`T2` | Observes commits. | +/// | 5 | **Actual commit** | `inner_commit`/`T2`/[`Commit`] | The actual commit implementation | +/// | 6 | **Throttle sink** | [`ThrottlePartitionDoneSinkWrapper`], wraps `inner_sink`/`T3` | Observes incoming IDs enables throttled if step (4) did not observe any commits. | +/// | 7 | **Actual sink** | `inner_sink`/`T3`/[`PartitionDoneSink`], wrapped | The actual sink. Directly receives all partitions filtered out at step 2. | +/// +/// Note that partitions filtered out by [`ThrottlePartitionsSourceWrapper`] will directly be forwarded to `inner_sink`. No +/// partition is ever lost. This means that `inner_source` and `inner_sink` can perform proper accounting. The +/// concurrency of this bypass can be controlled via `bypass_concurrency`. +/// +/// This setup relies on a fact that it does not process duplicate [`PartitionId`]. You may use +/// [`unique_partitions`](crate::components::combos::unique_partitions::unique_partitions) to achieve that. +pub fn throttle_partition<T1, T2, T3>( + source: T1, + commit: T2, + sink: T3, + time_provider: Arc<dyn TimeProvider>, + throttle_duration: Duration, + bypass_concurrency: usize, +) -> ( + ThrottlePartitionsSourceWrapper<T1, T3>, + ThrottleCommitWrapper<T2>, + ThrottlePartitionDoneSinkWrapper<T3>, +) +where + T1: PartitionsSource, + T2: Commit, + T3: PartitionDoneSink, +{ + let state = SharedState::default(); + let inner_sink = Arc::new(sink); + let source = ThrottlePartitionsSourceWrapper { + inner_source: source, + inner_sink: Arc::clone(&inner_sink), + state: Arc::clone(&state), + time_provider: Arc::clone(&time_provider), + sink_concurrency: bypass_concurrency, + }; + let commit = ThrottleCommitWrapper { + inner: commit, + state: Arc::clone(&state), + }; + let sink = ThrottlePartitionDoneSinkWrapper { + inner: inner_sink, + state, + time_provider, + throttle_duration, + }; + (source, commit, sink) +} + +#[derive(Debug, Default)] +struct State { + // Value is "true" while compaction task is in-flight, and "false" once complete. + // + // Completed compaction tasks are removed from the map each time the source fetch() + // is called. + in_flight: HashMap<PartitionId, bool>, + throttled: HashMap<PartitionId, Time>, +} + +type SharedState = Arc<Mutex<State>>; + +#[derive(Debug)] +pub struct ThrottlePartitionsSourceWrapper<T1, T2> +where + T1: PartitionsSource, + T2: PartitionDoneSink, +{ + inner_source: T1, + inner_sink: Arc<T2>, + state: SharedState, + time_provider: Arc<dyn TimeProvider>, + sink_concurrency: usize, +} + +impl<T1, T2> Display for ThrottlePartitionsSourceWrapper<T1, T2> +where + T1: PartitionsSource, + T2: PartitionDoneSink, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "throttle({}, {})", self.inner_source, self.inner_sink) + } +} + +#[async_trait] +impl<T1, T2> PartitionsSource for ThrottlePartitionsSourceWrapper<T1, T2> +where + T1: PartitionsSource, + T2: PartitionDoneSink, +{ + async fn fetch(&self) -> Vec<PartitionId> { + let res = self.inner_source.fetch().await; + + let (pass, throttle) = { + let mut guard = self.state.lock().expect("not poisoned"); + + // ensure that in-flight data is non-overlapping + for id in &res { + if guard.in_flight.contains_key(id) { + drop(guard); // avoid poison + panic!("Partition already in-flight: {id}"); + } + } + + // clean throttled states + let now = self.time_provider.now(); + guard.throttled = guard + .throttled + .iter() + .filter(|(_id, until)| **until > now) + .map(|(k, v)| (*k, *v)) + .collect(); + + // filter output + let mut pass = Vec::with_capacity(res.len()); + let mut throttle = Vec::with_capacity(res.len()); + for id in res { + if guard.throttled.contains_key(&id) { + throttle.push(id); + } else { + pass.push(id); + } + } + + // set up in-flight + for id in &pass { + guard.in_flight.insert(*id, false); + } + + (pass, throttle) + }; + + futures::stream::iter(throttle) + .map(|id| self.inner_sink.record(id, Ok(()))) + .buffer_unordered(self.sink_concurrency) + .collect::<()>() + .await; + + pass + } +} + +#[derive(Debug)] +pub struct ThrottleCommitWrapper<T> +where + T: Commit, +{ + inner: T, + state: SharedState, +} + +impl<T> Display for ThrottleCommitWrapper<T> +where + T: Commit, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "throttle({})", self.inner) + } +} + +#[async_trait] +impl<T> Commit for ThrottleCommitWrapper<T> +where + T: Commit, +{ + async fn commit( + &self, + partition_id: PartitionId, + delete: &[ParquetFileId], + upgrade: &[ParquetFileId], + create: &[ParquetFileParams], + target_level: CompactionLevel, + ) -> Vec<ParquetFileId> { + let known = { + let mut guard = self.state.lock().expect("not poisoned"); + match guard.in_flight.get_mut(&partition_id) { + Some(val) => { + *val = true; + true + } + None => false, + } + }; + // perform check when NOT holding the mutex to not poison it + assert!( + known, + "Unknown or already done partition in commit: {partition_id}" + ); + + self.inner + .commit(partition_id, delete, upgrade, create, target_level) + .await + } +} + +#[derive(Debug)] +pub struct ThrottlePartitionDoneSinkWrapper<T> +where + T: PartitionDoneSink, +{ + inner: Arc<T>, + state: SharedState, + throttle_duration: Duration, + time_provider: Arc<dyn TimeProvider>, +} + +impl<T> Display for ThrottlePartitionDoneSinkWrapper<T> +where + T: PartitionDoneSink, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "throttle({})", self.inner) + } +} + +#[async_trait] +impl<T> PartitionDoneSink for ThrottlePartitionDoneSinkWrapper<T> +where + T: PartitionDoneSink, +{ + async fn record( + &self, + partition: PartitionId, + res: Result<(), Box<dyn std::error::Error + Send + Sync>>, + ) { + let known = { + let mut guard = self.state.lock().expect("not poisoned"); + match guard.in_flight.remove(&partition) { + Some(val) => { + if !val { + guard + .throttled + .insert(partition, self.time_provider.now() + self.throttle_duration); + } + true + } + None => false, + } + }; + // perform check when NOT holding the mutex to not poison it + assert!( + known, + "Unknown or already done partition in partition done sink: {partition}" + ); + + self.inner.record(partition, res).await; + } +} + +#[cfg(test)] +mod tests { + use iox_time::MockProvider; + + use crate::components::{ + commit::mock::{CommitHistoryEntry, MockCommit}, + partition_done_sink::mock::MockPartitionDoneSink, + partitions_source::mock::MockPartitionsSource, + }; + + use super::*; + + #[test] + fn test_display() { + let (source, commit, sink) = throttle_partition( + MockPartitionsSource::new(vec![]), + MockCommit::new(), + MockPartitionDoneSink::new(), + Arc::new(MockProvider::new(Time::MIN)), + Duration::from_secs(0), + 1, + ); + assert_eq!(source.to_string(), "throttle(mock, mock)"); + assert_eq!(commit.to_string(), "throttle(mock)"); + assert_eq!(sink.to_string(), "throttle(mock)"); + } + + #[tokio::test] + async fn test_throttle() { + let inner_source = Arc::new(MockPartitionsSource::new(vec![ + PartitionId::new(1), + PartitionId::new(2), + PartitionId::new(3), + PartitionId::new(4), + ])); + let inner_commit = Arc::new(MockCommit::new()); + let inner_sink = Arc::new(MockPartitionDoneSink::new()); + let time_provider = Arc::new(MockProvider::new(Time::MIN)); + let (source, commit, sink) = throttle_partition( + Arc::clone(&inner_source), + Arc::clone(&inner_commit), + Arc::clone(&inner_sink), + Arc::clone(&time_provider) as _, + Duration::from_secs(1), + 1, + ); + + // ========== Round 1 ========== + // fetch + assert_eq!( + source.fetch().await, + vec![ + PartitionId::new(1), + PartitionId::new(2), + PartitionId::new(3), + PartitionId::new(4) + ], + ); + assert_eq!(inner_sink.results(), HashMap::from([]),); + + // commit + commit + .commit(PartitionId::new(1), &[], &[], &[], CompactionLevel::Initial) + .await; + commit + .commit(PartitionId::new(2), &[], &[], &[], CompactionLevel::Initial) + .await; + + // record + sink.record(PartitionId::new(1), Ok(())).await; + sink.record(PartitionId::new(3), Ok(())).await; + assert_eq!( + inner_sink.results(), + HashMap::from([(PartitionId::new(1), Ok(())), (PartitionId::new(3), Ok(())),]), + ); + + // ========== Round 2 ========== + // need to remove partition 2 and 4 because they weren't finished yet + inner_source.set(vec![ + PartitionId::new(1), + PartitionId::new(3), + PartitionId::new(5), + ]); + + // fetch + assert_eq!( + source.fetch().await, + vec![ + // ID 1: commit in last round => pass + PartitionId::new(1), + // ID 3: no commit in last round => throttled + // ID 5: new => pass + PartitionId::new(5), + ], + ); + assert_eq!( + inner_sink.results(), + HashMap::from([(PartitionId::new(1), Ok(())), (PartitionId::new(3), Ok(())),]), + ); + + // ========== Round 3 ========== + // advance time to "unthrottle" ID 3 + inner_source.set(vec![PartitionId::new(3)]); + time_provider.inc(Duration::from_secs(1)); + + // fetch + assert_eq!(source.fetch().await, vec![PartitionId::new(3)],); + + // record + // can still finish partition 2 and 4 + sink.record(PartitionId::new(2), Err(String::from("foo").into())) + .await; + sink.record(PartitionId::new(4), Err(String::from("bar").into())) + .await; + assert_eq!( + inner_sink.results(), + HashMap::from([ + (PartitionId::new(1), Ok(())), + (PartitionId::new(2), Err(String::from("foo"))), + (PartitionId::new(3), Ok(())), + (PartitionId::new(4), Err(String::from("bar"))), + ]), + ); + + // ========== Round 4 ========== + inner_source.set(vec![PartitionId::new(2), PartitionId::new(4)]); + + // fetch + assert_eq!(source.fetch().await, vec![PartitionId::new(2)],); + + assert_eq!( + inner_sink.results(), + HashMap::from([ + (PartitionId::new(1), Ok(())), + (PartitionId::new(2), Err(String::from("foo"))), + (PartitionId::new(3), Ok(())), + (PartitionId::new(4), Ok(())), + ]), + ); + + // commits are just forwarded to inner `Commit` impl + assert_eq!( + inner_commit.history(), + vec![ + CommitHistoryEntry { + partition_id: PartitionId::new(1), + delete: vec![], + upgrade: vec![], + created: vec![], + target_level: CompactionLevel::Initial, + }, + CommitHistoryEntry { + partition_id: PartitionId::new(2), + delete: vec![], + upgrade: vec![], + created: vec![], + target_level: CompactionLevel::Initial, + }, + ] + ); + } + + #[tokio::test] + #[should_panic(expected = "Unknown or already done partition in commit: 1")] + async fn test_panic_commit_unknown() { + let (source, commit, sink) = throttle_partition( + MockPartitionsSource::new(vec![PartitionId::new(1)]), + MockCommit::new(), + MockPartitionDoneSink::new(), + Arc::new(MockProvider::new(Time::MIN)), + Duration::from_secs(0), + 1, + ); + + source.fetch().await; + sink.record(PartitionId::new(1), Ok(())).await; + commit + .commit(PartitionId::new(1), &[], &[], &[], CompactionLevel::Initial) + .await; + } + + #[tokio::test] + #[should_panic(expected = "Unknown or already done partition in partition done sink: 1")] + async fn test_panic_sink_unknown() { + let (source, _commit, sink) = throttle_partition( + MockPartitionsSource::new(vec![PartitionId::new(1)]), + MockCommit::new(), + MockPartitionDoneSink::new(), + Arc::new(MockProvider::new(Time::MIN)), + Duration::from_secs(0), + 1, + ); + + source.fetch().await; + sink.record(PartitionId::new(1), Ok(())).await; + sink.record(PartitionId::new(1), Ok(())).await; + } + + #[tokio::test] + #[should_panic(expected = "Partition already in-flight: 1")] + async fn test_panic_duplicate_in_flight() { + let (source, _commit, _sink) = throttle_partition( + MockPartitionsSource::new(vec![PartitionId::new(1)]), + MockCommit::new(), + MockPartitionDoneSink::new(), + Arc::new(MockProvider::new(Time::MIN)), + Duration::from_secs(0), + 1, + ); + + source.fetch().await; + source.fetch().await; + } +} diff --git a/compactor2/src/components/combos/unique_partitions.rs b/compactor2/src/components/combos/unique_partitions.rs index 5547ade370..fad8b6fdfe 100644 --- a/compactor2/src/components/combos/unique_partitions.rs +++ b/compactor2/src/components/combos/unique_partitions.rs @@ -8,6 +8,7 @@ use std::{ use async_trait::async_trait; use data_types::PartitionId; +use futures::StreamExt; use crate::components::{ partition_done_sink::PartitionDoneSink, partitions_source::PartitionsSource, @@ -19,11 +20,15 @@ use crate::components::{ /// the following stream layout: /// /// ```text +/// +---------------------------------------------------+ +/// | | +/// | | +/// | V /// (1)====>(2)====>[concurrent processing]---->(3)---->(4)---->(5) -/// ^ | -/// | | -/// | | -/// +-------------------------------------------+ +/// ^ : +/// : : +/// : : +/// +...........................................+ /// ``` /// /// | Step | Name | Type | Description | @@ -32,22 +37,30 @@ use crate::components::{ /// | 2 | **Unique IDs source** | [`UniquePartionsSourceWrapper`], wraps `inner_source`/`T1` | Outputs that [`PartitionId`]s from the `inner_source` but filters out partitions that have not yet reached the uniqueness sink (step 4) | /// | 3 | **Critical section** | -- | Here it is always ensured that a single [`PartitionId`] does NOT occur more than once. | /// | 4 | **Unique IDs sink** | [`UniquePartitionDoneSinkWrapper`], wraps `inner_sink`/`T2` | Observes incoming IDs and removes them from the filter applied in step 2. | -/// | 5 | **Actual sink** | `inner_sink`/`T2`/[`PartitionDoneSink`], wrapped | The actual sink. | +/// | 5 | **Actual sink** | `inner_sink`/`T2`/[`PartitionDoneSink`], wrapped | The actual sink. Directly receives all partitions filtered out at step 2. | +/// +/// Note that partitions filtered out by [`UniquePartionsSourceWrapper`] will directly be forwarded to `inner_sink`. No +/// partition is ever lost. This means that `inner_source` and `inner_sink` can perform proper accounting. The +/// concurrency of this bypass can be controlled via `bypass_concurrency`. pub fn unique_partitions<T1, T2>( inner_source: T1, inner_sink: T2, + bypass_concurrency: usize, ) -> ( - UniquePartionsSourceWrapper<T1>, + UniquePartionsSourceWrapper<T1, T2>, UniquePartitionDoneSinkWrapper<T2>, ) where T1: PartitionsSource, T2: PartitionDoneSink, { + let inner_sink = Arc::new(inner_sink); let in_flight = Arc::new(Mutex::new(HashSet::default())); let source = UniquePartionsSourceWrapper { - inner: inner_source, + inner_source, + inner_sink: Arc::clone(&inner_sink), in_flight: Arc::clone(&in_flight), + sink_concurrency: bypass_concurrency, }; let sink = UniquePartitionDoneSinkWrapper { inner: inner_sink, @@ -59,33 +72,59 @@ where type InFlight = Arc<Mutex<HashSet<PartitionId>>>; #[derive(Debug)] -pub struct UniquePartionsSourceWrapper<T> +pub struct UniquePartionsSourceWrapper<T1, T2> where - T: PartitionsSource, + T1: PartitionsSource, + T2: PartitionDoneSink, { - inner: T, + inner_source: T1, + inner_sink: Arc<T2>, in_flight: InFlight, + sink_concurrency: usize, } -impl<T> Display for UniquePartionsSourceWrapper<T> +impl<T1, T2> Display for UniquePartionsSourceWrapper<T1, T2> where - T: PartitionsSource, + T1: PartitionsSource, + T2: PartitionDoneSink, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "unique({})", self.inner) + write!(f, "unique({}, {})", self.inner_source, self.inner_sink) } } #[async_trait] -impl<T> PartitionsSource for UniquePartionsSourceWrapper<T> +impl<T1, T2> PartitionsSource for UniquePartionsSourceWrapper<T1, T2> where - T: PartitionsSource, + T1: PartitionsSource, + T2: PartitionDoneSink, { async fn fetch(&self) -> Vec<PartitionId> { - let res = self.inner.fetch().await; + let res = self.inner_source.fetch().await; - let mut guard = self.in_flight.lock().expect("not poisoned"); - res.into_iter().filter(|id| guard.insert(*id)).collect() + let (unique, duplicates) = { + let mut guard = self.in_flight.lock().expect("not poisoned"); + + let mut unique = Vec::with_capacity(res.len()); + let mut duplicates = Vec::with_capacity(res.len()); + for id in res { + if guard.insert(id) { + unique.push(id); + } else { + duplicates.push(id) + } + } + + (unique, duplicates) + }; + + futures::stream::iter(duplicates) + .map(|id| self.inner_sink.record(id, Ok(()))) + .buffer_unordered(self.sink_concurrency) + .collect::<()>() + .await; + + unique } } @@ -94,7 +133,7 @@ pub struct UniquePartitionDoneSinkWrapper<T> where T: PartitionDoneSink, { - inner: T, + inner: Arc<T>, in_flight: InFlight, } @@ -154,8 +193,9 @@ mod tests { let (source, sink) = unique_partitions( MockPartitionsSource::new(vec![]), MockPartitionDoneSink::new(), + 1, ); - assert_eq!(source.to_string(), "unique(mock)"); + assert_eq!(source.to_string(), "unique(mock, mock)"); assert_eq!(sink.to_string(), "unique(mock)"); } @@ -169,8 +209,11 @@ mod tests { PartitionId::new(4), ])); let inner_sink = Arc::new(MockPartitionDoneSink::new()); - let (source, sink) = unique_partitions(Arc::clone(&inner_source), Arc::clone(&inner_sink)); + let (source, sink) = + unique_partitions(Arc::clone(&inner_source), Arc::clone(&inner_sink), 1); + // ========== Round 1 ========== + // fetch assert_eq!( source.fetch().await, vec![ @@ -180,36 +223,71 @@ mod tests { PartitionId::new(4), ], ); + assert_eq!( + inner_sink.results(), + HashMap::from([(PartitionId::new(1), Ok(()))]), + ); + // record sink.record(PartitionId::new(1), Ok(())).await; sink.record(PartitionId::new(2), Ok(())).await; + assert_eq!( + inner_sink.results(), + HashMap::from([(PartitionId::new(1), Ok(())), (PartitionId::new(2), Ok(())),]), + ); + assert_eq!( inner_sink.results(), HashMap::from([(PartitionId::new(1), Ok(())), (PartitionId::new(2), Ok(()))]), ); + // ========== Round 2 ========== inner_source.set(vec![ PartitionId::new(1), PartitionId::new(3), PartitionId::new(5), ]); + // fetch assert_eq!( source.fetch().await, vec![PartitionId::new(1), PartitionId::new(5)], ); + assert_eq!( + inner_sink.results(), + HashMap::from([ + (PartitionId::new(1), Ok(())), + (PartitionId::new(2), Ok(())), + (PartitionId::new(3), Ok(())), + ]), + ); + + // record sink.record(PartitionId::new(1), Err(String::from("foo").into())) .await; + assert_eq!( + inner_sink.results(), + HashMap::from([ + (PartitionId::new(1), Err(String::from("foo"))), + (PartitionId::new(2), Ok(())), + (PartitionId::new(3), Ok(())), + ]), + ); + + // ========== Round 3 ========== + // fetch assert_eq!(source.fetch().await, vec![PartitionId::new(1)],); assert_eq!( inner_sink.results(), HashMap::from([ (PartitionId::new(1), Err(String::from("foo"))), - (PartitionId::new(2), Ok(())) + (PartitionId::new(2), Ok(())), + (PartitionId::new(3), Ok(())), + (PartitionId::new(5), Ok(())), ]), ); } @@ -220,6 +298,7 @@ mod tests { let (source, sink) = unique_partitions( MockPartitionsSource::new(vec![PartitionId::new(1)]), MockPartitionDoneSink::new(), + 1, ); let ids = source.fetch().await; assert_eq!(ids.len(), 1); diff --git a/compactor2/src/components/hardcoded.rs b/compactor2/src/components/hardcoded.rs index 132a64562b..8b9b3a47f6 100644 --- a/compactor2/src/components/hardcoded.rs +++ b/compactor2/src/components/hardcoded.rs @@ -17,7 +17,7 @@ use crate::{ }; use super::{ - combos::unique_partitions::unique_partitions, + combos::{throttle_partition::throttle_partition, unique_partitions::unique_partitions}, commit::{ catalog::CatalogCommit, logging::LoggingCommitWrapper, metrics::MetricsCommitWrapper, mock::MockCommit, Commit, @@ -167,7 +167,15 @@ pub fn hardcoded_components(config: &Config) -> Arc<Components> { }; let (partitions_source, partition_done_sink) = - unique_partitions(partitions_source, partition_done_sink); + unique_partitions(partitions_source, partition_done_sink, 1); + let (partitions_source, commit, partition_done_sink) = throttle_partition( + partitions_source, + commit, + partition_done_sink, + Arc::clone(&config.time_provider), + Duration::from_secs(60), + 1, + ); Arc::new(Components { // Note: Place "not empty" wrapper at the very last so that the logging and metric wrapper work even when there
1025a85e1254d28e4517085138b4780508e890af
Andrew Lamb
2023-08-08 15:22:29
remove `copy_with_replacement` and use upstream API (#8451)
* refactor(influxql): remove `copy_with_replacement` and use upstream version * fix: clippy ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
refactor(influxql): remove `copy_with_replacement` and use upstream API (#8451) * refactor(influxql): remove `copy_with_replacement` and use upstream version * fix: clippy --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/iox_query_influxql/src/plan/mod.rs b/iox_query_influxql/src/plan/mod.rs index df2be38a0d..9c85c780f5 100644 --- a/iox_query_influxql/src/plan/mod.rs +++ b/iox_query_influxql/src/plan/mod.rs @@ -9,7 +9,6 @@ mod rewriter; mod test_utils; mod udf; mod util; -mod util_copy; mod var_ref; pub use planner::InfluxQLToLogicalPlan; diff --git a/iox_query_influxql/src/plan/planner.rs b/iox_query_influxql/src/plan/planner.rs index 2ebb2d2536..61fd6ddd8b 100644 --- a/iox_query_influxql/src/plan/planner.rs +++ b/iox_query_influxql/src/plan/planner.rs @@ -15,7 +15,7 @@ use crate::plan::udf::{ }; use crate::plan::util::{binary_operator_to_df_operator, rebase_expr, IQLSchema}; use crate::plan::var_ref::var_ref_data_type_to_data_type; -use crate::plan::{planner_rewrite_expression, udf, util_copy}; +use crate::plan::{planner_rewrite_expression, udf}; use crate::window::{ CUMULATIVE_SUM, DERIVATIVE, DIFFERENCE, MOVING_AVERAGE, NON_NEGATIVE_DERIVATIVE, NON_NEGATIVE_DIFFERENCE, PERCENT_ROW_NUMBER, @@ -28,7 +28,7 @@ use arrow::datatypes::{DataType, Field as ArrowField, Int32Type, Schema as Arrow use arrow::record_batch::RecordBatch; use chrono_tz::Tz; use datafusion::catalog::TableReference; -use datafusion::common::tree_node::{TreeNode, VisitRecursion}; +use datafusion::common::tree_node::{Transformed, TreeNode, VisitRecursion}; use datafusion::common::{DFSchema, DFSchemaRef, DataFusionError, Result, ScalarValue, ToDFSchema}; use datafusion::datasource::{provider_as_source, MemTable}; use datafusion::logical_expr::expr::{Alias, ScalarFunction}; @@ -99,7 +99,6 @@ use std::sync::Arc; use super::parse_regex; use super::util::contains_expr; -use super::util_copy::clone_with_replacement; /// The column index of the measurement column. const MEASUREMENT_COLUMN_INDEX: u32 = 0; @@ -1186,12 +1185,13 @@ impl<'a> InfluxQLToLogicalPlan<'a> { udf.args.append(&mut additional_args); let selector_new = Expr::AggregateUDF(udf); - select_exprs[selector_index] = - clone_with_replacement(&select_exprs[selector_index], &|expr| { - if expr == &selector { - Ok(Some(selector_new.clone())) + select_exprs[selector_index] = select_exprs[selector_index] + .clone() + .transform_up(&|expr| { + if expr == selector { + Ok(Transformed::Yes(selector_new.clone())) } else { - Ok(None) + Ok(Transformed::No(expr)) } }) .expect("cannot fail"); @@ -1381,11 +1381,11 @@ impl<'a> InfluxQLToLogicalPlan<'a> { let select_exprs = select_exprs .iter() .map(|expr| { - util_copy::clone_with_replacement(expr, &|udf_expr| { - Ok(if udfs.contains(udf_expr) { - Some(expr_as_column_expr(udf_expr, &plan)?) + expr.clone().transform_up(&|udf_expr| { + Ok(if udfs.contains(&udf_expr) { + Transformed::Yes(expr_as_column_expr(&udf_expr, &plan)?) } else { - None + Transformed::No(udf_expr) }) }) }) diff --git a/iox_query_influxql/src/plan/udf.rs b/iox_query_influxql/src/plan/udf.rs index e61e65af9b..437bfda68e 100644 --- a/iox_query_influxql/src/plan/udf.rs +++ b/iox_query_influxql/src/plan/udf.rs @@ -5,7 +5,7 @@ //! call information as the InfluxQL AST. These expressions are then //! rewritten at a later stage of planning, with more context available. -use crate::plan::util_copy::find_exprs_in_exprs; +use crate::plan::util::find_exprs_in_exprs; use crate::{error, NUMERICS}; use arrow::datatypes::{DataType, TimeUnit}; use datafusion::logical_expr::{ diff --git a/iox_query_influxql/src/plan/util.rs b/iox_query_influxql/src/plan/util.rs index c72123cccc..719fcab1eb 100644 --- a/iox_query_influxql/src/plan/util.rs +++ b/iox_query_influxql/src/plan/util.rs @@ -1,7 +1,6 @@ use crate::error; -use crate::plan::util_copy; use arrow::datatypes::{DataType, TimeUnit}; -use datafusion::common::tree_node::{TreeNode, VisitRecursion}; +use datafusion::common::tree_node::{Transformed, TreeNode, VisitRecursion}; use datafusion::common::{DFSchemaRef, Result}; use datafusion::logical_expr::utils::expr_as_column_expr; use datafusion::logical_expr::{lit, Expr, ExprSchemable, LogicalPlan, Operator}; @@ -150,24 +149,24 @@ pub(crate) fn rebase_expr( plan: &LogicalPlan, ) -> Result<Expr> { if let Some(value) = fill_if_null { - util_copy::clone_with_replacement(expr, &|nested_expr| { - Ok(if base_exprs.contains(nested_expr) { - let col_expr = expr_as_column_expr(nested_expr, plan)?; + expr.clone().transform_up(&|nested_expr| { + Ok(if base_exprs.contains(&nested_expr) { + let col_expr = expr_as_column_expr(&nested_expr, plan)?; let data_type = col_expr.get_type(plan.schema())?; - Some(coalesce_struct(vec![ + Transformed::Yes(coalesce_struct(vec![ col_expr, lit(number_to_scalar(value, &data_type)?), ])) } else { - None + Transformed::No(nested_expr) }) }) } else { - util_copy::clone_with_replacement(expr, &|nested_expr| { - Ok(if base_exprs.contains(nested_expr) { - Some(expr_as_column_expr(nested_expr, plan)?) + expr.clone().transform_up(&|nested_expr| { + Ok(if base_exprs.contains(&nested_expr) { + Transformed::Yes(expr_as_column_expr(&nested_expr, plan)?) } else { - None + Transformed::No(nested_expr) }) }) } @@ -186,3 +185,53 @@ pub(crate) fn contains_expr(expr: &Expr, needle: &Expr) -> bool { .expect("cannot fail"); found } + +/// Search the provided `Expr`'s, and all of their nested `Expr`, for any that +/// pass the provided test. The returned `Expr`'s are deduplicated and returned +/// in order of appearance (depth first). +/// +/// # NOTE +/// +/// Copied from DataFusion +pub(crate) fn find_exprs_in_exprs<F>(exprs: &[Expr], test_fn: &F) -> Vec<Expr> +where + F: Fn(&Expr) -> bool, +{ + exprs + .iter() + .flat_map(|expr| find_exprs_in_expr(expr, test_fn)) + .fold(vec![], |mut acc, expr| { + if !acc.contains(&expr) { + acc.push(expr) + } + acc + }) +} + +/// Search an `Expr`, and all of its nested `Expr`'s, for any that pass the +/// provided test. The returned `Expr`'s are deduplicated and returned in order +/// of appearance (depth first). +/// +/// # NOTE +/// +/// Copied from DataFusion +fn find_exprs_in_expr<F>(expr: &Expr, test_fn: &F) -> Vec<Expr> +where + F: Fn(&Expr) -> bool, +{ + let mut exprs = vec![]; + expr.apply(&mut |expr| { + if test_fn(expr) { + if !(exprs.contains(expr)) { + exprs.push(expr.clone()) + } + // stop recursing down this expr once we find a match + return Ok(VisitRecursion::Skip); + } + + Ok(VisitRecursion::Continue) + }) + // pre_visit always returns OK, so this will always too + .expect("no way to return error during recursion"); + exprs +} diff --git a/iox_query_influxql/src/plan/util_copy.rs b/iox_query_influxql/src/plan/util_copy.rs deleted file mode 100644 index b2bb28291a..0000000000 --- a/iox_query_influxql/src/plan/util_copy.rs +++ /dev/null @@ -1,390 +0,0 @@ -// NOTE: This code is copied from DataFusion, as it is not public, -// so all warnings are disabled. -#![allow(warnings)] -#![allow(clippy::all)] -//! A collection of utility functions copied from DataFusion. -//! -//! If these APIs are stabilised and made public, they can be removed from IOx. -//! -//! NOTE -use datafusion::common::tree_node::{TreeNode, VisitRecursion}; -use datafusion::common::Result; -use datafusion::logical_expr::expr::{ - AggregateUDF, Alias, InList, InSubquery, Placeholder, ScalarFunction, ScalarUDF, -}; -use datafusion::logical_expr::GetFieldAccess; -use datafusion::logical_expr::{ - expr::{ - AggregateFunction, Between, BinaryExpr, Case, Cast, Expr, GetIndexedField, GroupingSet, - Like, Sort, TryCast, WindowFunction, - }, - utils::expr_as_column_expr, - LogicalPlan, -}; -use datafusion::physical_plan::expressions::GetFieldAccessExpr; - -/// Returns a cloned `Expr`, but any of the `Expr`'s in the tree may be -/// replaced/customized by the replacement function. -/// -/// The replacement function is called repeatedly with `Expr`, starting with -/// the argument `expr`, then descending depth-first through its -/// descendants. The function chooses to replace or keep (clone) each `Expr`. -/// -/// The function's return type is `Result<Option<Expr>>>`, where: -/// -/// * `Ok(Some(replacement_expr))`: A replacement `Expr` is provided; it is -/// swapped in at the particular node in the tree. Any nested `Expr` are -/// not subject to cloning/replacement. -/// * `Ok(None)`: A replacement `Expr` is not provided. The `Expr` is -/// recreated, with all of its nested `Expr`'s subject to -/// cloning/replacement. -/// * `Err(err)`: Any error returned by the function is returned as-is by -/// `clone_with_replacement()`. -/// -/// Source: <https://github.com/apache/arrow-datafusion/blob/26e1b20ea/datafusion/sql/src/utils.rs#L153> -pub(super) fn clone_with_replacement<F>(expr: &Expr, replacement_fn: &F) -> Result<Expr> -where - F: Fn(&Expr) -> Result<Option<Expr>>, -{ - let replacement_opt = replacement_fn(expr)?; - - match replacement_opt { - // If we were provided a replacement, use the replacement. Do not - // descend further. - Some(replacement) => Ok(replacement), - // No replacement was provided, clone the node and recursively call - // clone_with_replacement() on any nested expressions. - None => match expr { - Expr::AggregateFunction(AggregateFunction { - fun, - args, - distinct, - filter, - order_by, - }) => Ok(Expr::AggregateFunction(AggregateFunction::new( - fun.clone(), - args.iter() - .map(|e| clone_with_replacement(e, replacement_fn)) - .collect::<Result<Vec<Expr>>>()?, - *distinct, - filter.clone(), - order_by.clone(), - ))), - Expr::WindowFunction(WindowFunction { - fun, - args, - partition_by, - order_by, - window_frame, - }) => Ok(Expr::WindowFunction(WindowFunction::new( - fun.clone(), - args.iter() - .map(|e| clone_with_replacement(e, replacement_fn)) - .collect::<Result<Vec<_>>>()?, - partition_by - .iter() - .map(|e| clone_with_replacement(e, replacement_fn)) - .collect::<Result<Vec<_>>>()?, - order_by - .iter() - .map(|e| clone_with_replacement(e, replacement_fn)) - .collect::<Result<Vec<_>>>()?, - window_frame.clone(), - ))), - Expr::AggregateUDF(AggregateUDF { - fun, - args, - filter, - order_by, - }) => Ok(Expr::AggregateUDF(AggregateUDF { - fun: fun.clone(), - args: args - .iter() - .map(|e| clone_with_replacement(e, replacement_fn)) - .collect::<Result<Vec<Expr>>>()?, - filter: filter.clone(), - order_by: order_by.clone(), - })), - Expr::Alias(Alias { - expr: nested_expr, - name: alias_name, - }) => Ok(Expr::Alias(Alias { - expr: Box::new(clone_with_replacement(nested_expr, replacement_fn)?), - name: alias_name.clone(), - })), - Expr::Between(Between { - expr, - negated, - low, - high, - }) => Ok(Expr::Between(Between::new( - Box::new(clone_with_replacement(expr, replacement_fn)?), - *negated, - Box::new(clone_with_replacement(low, replacement_fn)?), - Box::new(clone_with_replacement(high, replacement_fn)?), - ))), - Expr::InList(InList { - expr: nested_expr, - list, - negated, - }) => Ok(Expr::InList(InList { - expr: Box::new(clone_with_replacement(nested_expr, replacement_fn)?), - list: list - .iter() - .map(|e| clone_with_replacement(e, replacement_fn)) - .collect::<Result<Vec<Expr>>>()?, - negated: *negated, - })), - Expr::BinaryExpr(BinaryExpr { left, right, op }) => { - Ok(Expr::BinaryExpr(BinaryExpr::new( - Box::new(clone_with_replacement(left, replacement_fn)?), - *op, - Box::new(clone_with_replacement(right, replacement_fn)?), - ))) - } - Expr::Like(Like { - negated, - expr, - pattern, - case_insensitive, - escape_char, - }) => Ok(Expr::Like(Like::new( - *negated, - Box::new(clone_with_replacement(expr, replacement_fn)?), - Box::new(clone_with_replacement(pattern, replacement_fn)?), - *escape_char, - *case_insensitive, - ))), - Expr::SimilarTo(Like { - negated, - expr, - pattern, - case_insensitive, - escape_char, - }) => Ok(Expr::SimilarTo(Like::new( - *negated, - Box::new(clone_with_replacement(expr, replacement_fn)?), - Box::new(clone_with_replacement(pattern, replacement_fn)?), - *escape_char, - *case_insensitive, - ))), - Expr::Case(case) => Ok(Expr::Case(Case::new( - match &case.expr { - Some(case_expr) => { - Some(Box::new(clone_with_replacement(case_expr, replacement_fn)?)) - } - None => None, - }, - case.when_then_expr - .iter() - .map(|(a, b)| { - Ok(( - Box::new(clone_with_replacement(a, replacement_fn)?), - Box::new(clone_with_replacement(b, replacement_fn)?), - )) - }) - .collect::<Result<Vec<(_, _)>>>()?, - match &case.else_expr { - Some(else_expr) => { - Some(Box::new(clone_with_replacement(else_expr, replacement_fn)?)) - } - None => None, - }, - ))), - Expr::ScalarFunction(ScalarFunction { fun, args }) => { - Ok(Expr::ScalarFunction(ScalarFunction { - fun: fun.clone(), - args: args - .iter() - .map(|e| clone_with_replacement(e, replacement_fn)) - .collect::<Result<Vec<Expr>>>()?, - })) - } - Expr::ScalarUDF(ScalarUDF { fun, args }) => Ok(Expr::ScalarUDF(ScalarUDF { - fun: fun.clone(), - args: args - .iter() - .map(|arg| clone_with_replacement(arg, replacement_fn)) - .collect::<Result<Vec<Expr>>>()?, - })), - Expr::Negative(nested_expr) => Ok(Expr::Negative(Box::new(clone_with_replacement( - nested_expr, - replacement_fn, - )?))), - Expr::Not(nested_expr) => Ok(Expr::Not(Box::new(clone_with_replacement( - nested_expr, - replacement_fn, - )?))), - Expr::IsNotNull(nested_expr) => Ok(Expr::IsNotNull(Box::new(clone_with_replacement( - nested_expr, - replacement_fn, - )?))), - Expr::IsNull(nested_expr) => Ok(Expr::IsNull(Box::new(clone_with_replacement( - nested_expr, - replacement_fn, - )?))), - Expr::IsTrue(nested_expr) => Ok(Expr::IsTrue(Box::new(clone_with_replacement( - nested_expr, - replacement_fn, - )?))), - Expr::IsFalse(nested_expr) => Ok(Expr::IsFalse(Box::new(clone_with_replacement( - nested_expr, - replacement_fn, - )?))), - Expr::IsUnknown(nested_expr) => Ok(Expr::IsUnknown(Box::new(clone_with_replacement( - nested_expr, - replacement_fn, - )?))), - Expr::IsNotTrue(nested_expr) => Ok(Expr::IsNotTrue(Box::new(clone_with_replacement( - nested_expr, - replacement_fn, - )?))), - Expr::IsNotFalse(nested_expr) => Ok(Expr::IsNotFalse(Box::new( - clone_with_replacement(nested_expr, replacement_fn)?, - ))), - Expr::IsNotUnknown(nested_expr) => Ok(Expr::IsNotUnknown(Box::new( - clone_with_replacement(nested_expr, replacement_fn)?, - ))), - Expr::Cast(Cast { expr, data_type }) => Ok(Expr::Cast(Cast::new( - Box::new(clone_with_replacement(expr, replacement_fn)?), - data_type.clone(), - ))), - Expr::TryCast(TryCast { - expr: nested_expr, - data_type, - }) => Ok(Expr::TryCast(TryCast::new( - Box::new(clone_with_replacement(nested_expr, replacement_fn)?), - data_type.clone(), - ))), - Expr::Sort(Sort { - expr: nested_expr, - asc, - nulls_first, - }) => Ok(Expr::Sort(Sort::new( - Box::new(clone_with_replacement(nested_expr, replacement_fn)?), - *asc, - *nulls_first, - ))), - Expr::Column { .. } - | Expr::OuterReferenceColumn(_, _) - | Expr::Literal(_) - | Expr::ScalarVariable(_, _) - | Expr::Exists { .. } - | Expr::ScalarSubquery(_) => Ok(expr.clone()), - Expr::InSubquery(InSubquery { - expr: nested_expr, - subquery, - negated, - }) => Ok(Expr::InSubquery(InSubquery { - expr: Box::new(clone_with_replacement(nested_expr, replacement_fn)?), - subquery: subquery.clone(), - negated: *negated, - })), - Expr::Wildcard => Ok(Expr::Wildcard), - Expr::QualifiedWildcard { .. } => Ok(expr.clone()), - Expr::GetIndexedField(GetIndexedField { field, expr }) => { - let field = match field { - GetFieldAccess::NamedStructField { name } => { - GetFieldAccess::NamedStructField { name: name.clone() } - } - GetFieldAccess::ListIndex { key } => GetFieldAccess::ListIndex { - key: Box::new(clone_with_replacement(key.as_ref(), replacement_fn)?), - }, - GetFieldAccess::ListRange { start, stop } => GetFieldAccess::ListRange { - start: Box::new(clone_with_replacement(start.as_ref(), replacement_fn)?), - stop: Box::new(clone_with_replacement(stop.as_ref(), replacement_fn)?), - }, - }; - - Ok(Expr::GetIndexedField(GetIndexedField::new( - Box::new(clone_with_replacement(expr.as_ref(), replacement_fn)?), - field, - ))) - } - Expr::GroupingSet(set) => match set { - GroupingSet::Rollup(exprs) => Ok(Expr::GroupingSet(GroupingSet::Rollup( - exprs - .iter() - .map(|e| clone_with_replacement(e, replacement_fn)) - .collect::<Result<Vec<Expr>>>()?, - ))), - GroupingSet::Cube(exprs) => Ok(Expr::GroupingSet(GroupingSet::Cube( - exprs - .iter() - .map(|e| clone_with_replacement(e, replacement_fn)) - .collect::<Result<Vec<Expr>>>()?, - ))), - GroupingSet::GroupingSets(lists_of_exprs) => { - let mut new_lists_of_exprs = vec![]; - for exprs in lists_of_exprs { - new_lists_of_exprs.push( - exprs - .iter() - .map(|e| clone_with_replacement(e, replacement_fn)) - .collect::<Result<Vec<Expr>>>()?, - ); - } - Ok(Expr::GroupingSet(GroupingSet::GroupingSets( - new_lists_of_exprs, - ))) - } - }, - Expr::Placeholder(Placeholder { id, data_type }) => { - Ok(Expr::Placeholder(Placeholder { - id: id.clone(), - data_type: data_type.clone(), - })) - } - }, - } -} - -/// Search the provided `Expr`'s, and all of their nested `Expr`, for any that -/// pass the provided test. The returned `Expr`'s are deduplicated and returned -/// in order of appearance (depth first). -/// -/// # NOTE -/// -/// Copied from DataFusion -pub(super) fn find_exprs_in_exprs<F>(exprs: &[Expr], test_fn: &F) -> Vec<Expr> -where - F: Fn(&Expr) -> bool, -{ - exprs - .iter() - .flat_map(|expr| find_exprs_in_expr(expr, test_fn)) - .fold(vec![], |mut acc, expr| { - if !acc.contains(&expr) { - acc.push(expr) - } - acc - }) -} - -/// Search an `Expr`, and all of its nested `Expr`'s, for any that pass the -/// provided test. The returned `Expr`'s are deduplicated and returned in order -/// of appearance (depth first). -/// -/// # NOTE -/// -/// Copied from DataFusion -fn find_exprs_in_expr<F>(expr: &Expr, test_fn: &F) -> Vec<Expr> -where - F: Fn(&Expr) -> bool, -{ - let mut exprs = vec![]; - expr.apply(&mut |expr| { - if test_fn(expr) { - if !(exprs.contains(expr)) { - exprs.push(expr.clone()) - } - // stop recursing down this expr once we find a match - return Ok(VisitRecursion::Skip); - } - - Ok(VisitRecursion::Continue) - }) - // pre_visit always returns OK, so this will always too - .expect("no way to return error during recursion"); - exprs -}
f93baf7693e016d1bc942943e67dbcc90d6f4a58
Andrew Lamb
2023-02-22 22:24:20
Update DataFusion and `arrow` / `arrow-flight` / `parquet` to `33.0.0` (#7045)
* chore: Update DataFusion and arrow/arrow-flight/parquet to 33.0.0 * fix: Update test output * fix: update more test output * fix: Update querier test output * chore: Run cargo hakari tasks * test: fix formatting Fix formatting of batch pretty printing. * test: fix formatting Fix formatting of batch pretty printing. * test: fix formatting for selector tests ---------
Co-authored-by: CircleCI[bot] <[email protected]> Co-authored-by: Dom Dwyer <[email protected]> Co-authored-by: Christopher Wolff <[email protected]>
chore: Update DataFusion and `arrow` / `arrow-flight` / `parquet` to `33.0.0` (#7045) * chore: Update DataFusion and arrow/arrow-flight/parquet to 33.0.0 * fix: Update test output * fix: update more test output * fix: Update querier test output * chore: Run cargo hakari tasks * test: fix formatting Fix formatting of batch pretty printing. * test: fix formatting Fix formatting of batch pretty printing. * test: fix formatting for selector tests --------- Co-authored-by: CircleCI[bot] <[email protected]> Co-authored-by: Dom Dwyer <[email protected]> Co-authored-by: Christopher Wolff <[email protected]>
diff --git a/Cargo.lock b/Cargo.lock index 53604db83a..630382759a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -100,9 +100,9 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "arrow" -version = "32.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87d948f553cf556656eb89265700258e1032d26fec9b7920cd20319336e06afd" +checksum = "f3724c874f1517cf898cd1c3ad18ab5071edf893c48e73139ab1e16cf0f2affe" dependencies = [ "ahash 0.8.3", "arrow-arith", @@ -123,9 +123,9 @@ dependencies = [ [[package]] name = "arrow-arith" -version = "32.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf30d4ebc3df9dfd8bd26883aa30687d4ddcfd7b2443e62bd7c8fedf153b8e45" +checksum = "e958823b8383ca14d0a2e973de478dd7674cd9f72837f8c41c132a0fda6a4e5e" dependencies = [ "arrow-array", "arrow-buffer", @@ -138,9 +138,9 @@ dependencies = [ [[package]] name = "arrow-array" -version = "32.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fe66ec388d882a61fff3eb613b5266af133aa08a3318e5e493daf0f5c1696cb" +checksum = "db670eab50e76654065b5aed930f4367101fcddcb2223802007d1e0b4d5a2579" dependencies = [ "ahash 0.8.3", "arrow-buffer", @@ -154,9 +154,9 @@ dependencies = [ [[package]] name = "arrow-buffer" -version = "32.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ef967dadbccd4586ec8d7aab27d7033ecb5dfae8a605c839613039eac227bda" +checksum = "9f0e01c931882448c0407bd32311a624b9f099739e94e786af68adc97016b5f2" dependencies = [ "half 2.2.1", "num", @@ -164,9 +164,9 @@ dependencies = [ [[package]] name = "arrow-cast" -version = "32.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "491a7979ea9e76dc218f532896e2d245fde5235e2e6420ce80d27cf6395dda84" +checksum = "4bf35d78836c93f80d9362f3ccb47ff5e2c5ecfc270ff42cdf1ef80334961d44" dependencies = [ "arrow-array", "arrow-buffer", @@ -180,9 +180,9 @@ dependencies = [ [[package]] name = "arrow-csv" -version = "32.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b1d4fc91078dbe843c2c50d90f8119c96e8dfac2f78d30f7a8cb9397399c61d" +checksum = "0a6aa7c2531d89d01fed8c469a9b1bf97132a0bdf70b4724fe4bbb4537a50880" dependencies = [ "arrow-array", "arrow-buffer", @@ -199,9 +199,9 @@ dependencies = [ [[package]] name = "arrow-data" -version = "32.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee0c0e3c5d3b80be8f267f4b2af714c08cad630569be01a8379cfe27b4866495" +checksum = "ea50db4d1e1e4c2da2bfdea7b6d2722eef64267d5ab680d815f7ae42428057f5" dependencies = [ "arrow-buffer", "arrow-schema", @@ -211,9 +211,9 @@ dependencies = [ [[package]] name = "arrow-flight" -version = "32.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "039ae2ddb0e2c74255a7131cce73f47fb43b7debb13b382a92bd33bcf22f3017" +checksum = "6ad4c883d509d89f05b2891ad889729f17ab2191b5fd22b0cf3660a28cc40af5" dependencies = [ "arrow-array", "arrow-buffer", @@ -234,9 +234,9 @@ dependencies = [ [[package]] name = "arrow-ipc" -version = "32.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a3ca7eb8d23c83fe40805cbafec70a6a31df72de47355545ff34c850f715403" +checksum = "a4042fe6585155d1ec28a8e4937ec901a3ca7a19a22b9f6cd3f551b935cd84f5" dependencies = [ "arrow-array", "arrow-buffer", @@ -248,9 +248,9 @@ dependencies = [ [[package]] name = "arrow-json" -version = "32.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf65aff76d2e340d827d5cab14759e7dd90891a288347e2202e4ee28453d9bed" +checksum = "7c907c4ab4f26970a3719dc06e78e8054a01d0c96da3664d23b941e201b33d2b" dependencies = [ "arrow-array", "arrow-buffer", @@ -267,9 +267,9 @@ dependencies = [ [[package]] name = "arrow-ord" -version = "32.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "074a5a55c37ae4750af4811c8861c0378d8ab2ff6c262622ad24efae6e0b73b3" +checksum = "e131b447242a32129efc7932f58ed8931b42f35d8701c1a08f9f524da13b1d3c" dependencies = [ "arrow-array", "arrow-buffer", @@ -281,9 +281,9 @@ dependencies = [ [[package]] name = "arrow-row" -version = "32.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e064ac4e64960ebfbe35f218f5e7d9dc9803b59c2e56f611da28ce6d008f839e" +checksum = "b591ef70d76f4ac28dd7666093295fece0e5f9298f49af51ea49c001e1635bb6" dependencies = [ "ahash 0.8.3", "arrow-array", @@ -296,15 +296,15 @@ dependencies = [ [[package]] name = "arrow-schema" -version = "32.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ead3f373b9173af52f2fdefcb5a7dd89f453fbc40056f574a8aeb23382a4ef81" +checksum = "eb327717d87eb94be5eff3b0cb8987f54059d343ee5235abf7f143c85f54cfc8" [[package]] name = "arrow-select" -version = "32.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "646b4f15b5a77c970059e748aeb1539705c68cd397ecf0f0264c4ef3737d35f3" +checksum = "79d3c389d1cea86793934f31594f914c8547d82e91e3411d4833ad0aac3266a7" dependencies = [ "arrow-array", "arrow-buffer", @@ -315,9 +315,9 @@ dependencies = [ [[package]] name = "arrow-string" -version = "32.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8b8bf150caaeca03f39f1a91069701387d93f7cfd256d27f423ac8496d99a51" +checksum = "30ee67790496dd310ddbf5096870324431e89aa76453e010020ac29b1184d356" dependencies = [ "arrow-array", "arrow-buffer", @@ -1412,7 +1412,7 @@ dependencies = [ [[package]] name = "datafusion" version = "18.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=253550c6c936f75f654dcdc9480025a9ef55d9fd#253550c6c936f75f654dcdc9480025a9ef55d9fd" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f6e49ac7a027abb95d8b7fa755502dfa7d53c21c#f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" dependencies = [ "ahash 0.8.3", "arrow", @@ -1458,7 +1458,7 @@ dependencies = [ [[package]] name = "datafusion-common" version = "18.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=253550c6c936f75f654dcdc9480025a9ef55d9fd#253550c6c936f75f654dcdc9480025a9ef55d9fd" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f6e49ac7a027abb95d8b7fa755502dfa7d53c21c#f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" dependencies = [ "arrow", "chrono", @@ -1471,7 +1471,7 @@ dependencies = [ [[package]] name = "datafusion-expr" version = "18.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=253550c6c936f75f654dcdc9480025a9ef55d9fd#253550c6c936f75f654dcdc9480025a9ef55d9fd" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f6e49ac7a027abb95d8b7fa755502dfa7d53c21c#f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" dependencies = [ "ahash 0.8.3", "arrow", @@ -1483,7 +1483,7 @@ dependencies = [ [[package]] name = "datafusion-optimizer" version = "18.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=253550c6c936f75f654dcdc9480025a9ef55d9fd#253550c6c936f75f654dcdc9480025a9ef55d9fd" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f6e49ac7a027abb95d8b7fa755502dfa7d53c21c#f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" dependencies = [ "arrow", "async-trait", @@ -1499,7 +1499,7 @@ dependencies = [ [[package]] name = "datafusion-physical-expr" version = "18.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=253550c6c936f75f654dcdc9480025a9ef55d9fd#253550c6c936f75f654dcdc9480025a9ef55d9fd" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f6e49ac7a027abb95d8b7fa755502dfa7d53c21c#f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" dependencies = [ "ahash 0.8.3", "arrow", @@ -1529,7 +1529,7 @@ dependencies = [ [[package]] name = "datafusion-proto" version = "18.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=253550c6c936f75f654dcdc9480025a9ef55d9fd#253550c6c936f75f654dcdc9480025a9ef55d9fd" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f6e49ac7a027abb95d8b7fa755502dfa7d53c21c#f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" dependencies = [ "arrow", "chrono", @@ -1546,7 +1546,7 @@ dependencies = [ [[package]] name = "datafusion-row" version = "18.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=253550c6c936f75f654dcdc9480025a9ef55d9fd#253550c6c936f75f654dcdc9480025a9ef55d9fd" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f6e49ac7a027abb95d8b7fa755502dfa7d53c21c#f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" dependencies = [ "arrow", "datafusion-common", @@ -1557,7 +1557,7 @@ dependencies = [ [[package]] name = "datafusion-sql" version = "18.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=253550c6c936f75f654dcdc9480025a9ef55d9fd#253550c6c936f75f654dcdc9480025a9ef55d9fd" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f6e49ac7a027abb95d8b7fa755502dfa7d53c21c#f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" dependencies = [ "arrow-schema", "datafusion-common", @@ -3975,9 +3975,9 @@ dependencies = [ [[package]] name = "parquet" -version = "32.0.0" +version = "33.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b3d4917209e17e1da5fb07d276da237a42465f0def2b8d5fa5ce0e85855b4c" +checksum = "b1b076829801167d889795cd1957989055543430fa1469cb1f6e32b789bfc764" dependencies = [ "ahash 0.8.3", "arrow-array", @@ -4387,9 +4387,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.50" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ef7d57beacfaf2d8aee5937dab7b7f28de3cb8b1828479bb5de2a7106f2bae2" +checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" dependencies = [ "unicode-ident", ] diff --git a/Cargo.toml b/Cargo.toml index 2e593380f6..f39e6f819c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -115,12 +115,12 @@ edition = "2021" license = "MIT OR Apache-2.0" [workspace.dependencies] -arrow = { version = "32.0.0" } -arrow-flight = { version = "32.0.0" } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="253550c6c936f75f654dcdc9480025a9ef55d9fd", default-features = false } -datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="253550c6c936f75f654dcdc9480025a9ef55d9fd" } +arrow = { version = "33.0.0" } +arrow-flight = { version = "33.0.0" } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="f6e49ac7a027abb95d8b7fa755502dfa7d53c21c", default-features = false } +datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" } hashbrown = { version = "0.13.2" } -parquet = { version = "32.0.0" } +parquet = { version = "33.0.0" } # This profile optimizes for runtime performance and small binary size at the expense of longer # build times. It's most suitable for final release builds. diff --git a/arrow_util/src/display.rs b/arrow_util/src/display.rs index 5cb74e2db3..d723d12619 100644 --- a/arrow_util/src/display.rs +++ b/arrow_util/src/display.rs @@ -179,9 +179,9 @@ mod tests { "+------+-------+--------+---------+-------+--------+--------------------------------+", "| dict | int64 | uint64 | float64 | bool | string | time |", "+------+-------+--------+---------+-------+--------+--------------------------------+", - "| a | -1 | 1 | 1 | true | foo | |", + "| a | -1 | 1 | 1.0 | true | foo | |", "| | | | | | | 1970-01-01T00:00:00.000000100Z |", - "| b | 2 | 2 | 2 | false | bar | 2021-07-20T23:28:50Z |", + "| b | 2 | 2 | 2.0 | false | bar | 2021-07-20T23:28:50Z |", "+------+-------+--------+---------+-------+--------+--------------------------------+", ]; diff --git a/arrow_util/src/optimize.rs b/arrow_util/src/optimize.rs index d6b634a02e..e1919205c5 100644 --- a/arrow_util/src/optimize.rs +++ b/arrow_util/src/optimize.rs @@ -222,18 +222,18 @@ mod tests { assert_batches_eq!( vec![ - "+----+----+----+", - "| f1 | t2 | t1 |", - "+----+----+----+", - "| 1 | a | a |", - "| 2 | g | a |", - "| 3 | a | b |", - "| 4 | b | b |", - "| 1 | a | a |", - "| 5 | b | d |", - "| 2 | a | a |", - "| 46 | a | b |", - "+----+----+----+", + "+------+----+----+", + "| f1 | t2 | t1 |", + "+------+----+----+", + "| 1.0 | a | a |", + "| 2.0 | g | a |", + "| 3.0 | a | b |", + "| 4.0 | b | b |", + "| 1.0 | a | a |", + "| 5.0 | b | d |", + "| 2.0 | a | a |", + "| 46.0 | a | b |", + "+------+----+----+", ], &[optimized] ); diff --git a/influxdb_iox/tests/end_to_end_cases/cli.rs b/influxdb_iox/tests/end_to_end_cases/cli.rs index 7948ba5acf..bdfa982116 100644 --- a/influxdb_iox/tests/end_to_end_cases/cli.rs +++ b/influxdb_iox/tests/end_to_end_cases/cli.rs @@ -296,7 +296,7 @@ async fn write_and_query() { state, "SELECT * from m0 order by time desc limit 10;", Some(QueryLanguage::Sql), - "| value1 | value9 | value9 | value49 | value0 | 2021-04-26T13:47:39.727574Z | 1 |" + "| value1 | value9 | value9 | value49 | value0 | 2021-04-26T13:47:39.727574Z | 1.0 |" ).await; // data from 'cpu.parquet' diff --git a/influxdb_iox/tests/query_tests2/cases/in/aggregates.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/aggregates.sql.expected index 5142128d3a..746e4c6a74 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/aggregates.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/aggregates.sql.expected @@ -4,7 +4,7 @@ +-----------------+-----------------+----------------+--------------+--------------+--------------------------------+--------------------------------+ | COUNT(cpu.time) | COUNT(UInt8(1)) | COUNT(cpu.bar) | MIN(cpu.bar) | MAX(cpu.bar) | MIN(cpu.time) | MAX(cpu.time) | +-----------------+-----------------+----------------+--------------+--------------+--------------------------------+--------------------------------+ -| 4 | 4 | 4 | 1 | 2 | 1970-01-01T00:00:00.000000010Z | 1970-01-01T00:00:00.000000040Z | +| 4 | 4 | 4 | 1.0 | 2.0 | 1970-01-01T00:00:00.000000010Z | 1970-01-01T00:00:00.000000040Z | +-----------------+-----------------+----------------+--------------+--------------+--------------------------------+--------------------------------+ -- SQL: SELECT max(foo) FROM cpu; -- Results After Sorting diff --git a/influxdb_iox/tests/query_tests2/cases/in/basic.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/basic.sql.expected index ae7d145702..2d8917f2cb 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/basic.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/basic.sql.expected @@ -4,7 +4,7 @@ | region | time | user | +--------+--------------------------------+------+ | west | 1970-01-01T00:00:00.000000100Z | 23.2 | -| west | 1970-01-01T00:00:00.000000150Z | 21 | +| west | 1970-01-01T00:00:00.000000150Z | 21.0 | +--------+--------------------------------+------+ -- SQL: SELECT min(region) from cpu; +-----------------+ @@ -17,33 +17,33 @@ | user | region | +------+--------+ | 23.2 | west | -| 21 | west | +| 21.0 | west | +------+--------+ -- SQL: SELECT * from cpu where time > to_timestamp('1970-01-01T00:00:00.000000120+00:00'); +--------+--------------------------------+------+ | region | time | user | +--------+--------------------------------+------+ -| west | 1970-01-01T00:00:00.000000150Z | 21 | +| west | 1970-01-01T00:00:00.000000150Z | 21.0 | +--------+--------------------------------+------+ -- SQL: SELECT * from cpu where time > '1970-01-01T00:00:00'::timestamp ORDER BY time; +--------+--------------------------------+------+ | region | time | user | +--------+--------------------------------+------+ | west | 1970-01-01T00:00:00.000000100Z | 23.2 | -| west | 1970-01-01T00:00:00.000000150Z | 21 | +| west | 1970-01-01T00:00:00.000000150Z | 21.0 | +--------+--------------------------------+------+ -- SQL: SELECT * from cpu where time > '1970-01-01T00:00:00' ORDER BY time; +--------+--------------------------------+------+ | region | time | user | +--------+--------------------------------+------+ | west | 1970-01-01T00:00:00.000000100Z | 23.2 | -| west | 1970-01-01T00:00:00.000000150Z | 21 | +| west | 1970-01-01T00:00:00.000000150Z | 21.0 | +--------+--------------------------------+------+ -- SQL: SELECT "user", region from cpu where time > to_timestamp('1970-01-01T00:00:00.000000120+00:00'); +------+--------+ | user | region | +------+--------+ -| 21 | west | +| 21.0 | west | +------+--------+ -- SQL: SELECT count(*) from cpu group by region; +-----------------+ @@ -62,4 +62,4 @@ | MEDIAN(cpu.user) | region | +------------------+--------+ | 22.1 | west | -+------------------+--------+ ++------------------+--------+ \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/dedup_and_predicates_parquet.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/dedup_and_predicates_parquet.sql.expected index 68c1b6f0d1..f99a078835 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/dedup_and_predicates_parquet.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/dedup_and_predicates_parquet.sql.expected @@ -3,8 +3,8 @@ +-----+-----+-----+----------------------+ | bar | foo | tag | time | +-----+-----+-----+----------------------+ -| 2 | 1 | A | 1970-01-01T00:00:00Z | -| | 1 | B | 1970-01-01T00:00:00Z | +| 2.0 | 1.0 | A | 1970-01-01T00:00:00Z | +| | 1.0 | B | 1970-01-01T00:00:00Z | +-----+-----+-----+----------------------+ -- SQL: EXPLAIN SELECT * FROM "table" ORDER BY tag; -- Results After Normalizing UUIDs @@ -27,7 +27,7 @@ +-----+-----+-----+----------------------+ | bar | foo | tag | time | +-----+-----+-----+----------------------+ -| 2 | 1 | A | 1970-01-01T00:00:00Z | +| 2.0 | 1.0 | A | 1970-01-01T00:00:00Z | +-----+-----+-----+----------------------+ -- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A'; -- Results After Normalizing UUIDs @@ -51,7 +51,7 @@ +-----+-----+-----+----------------------+ | bar | foo | tag | time | +-----+-----+-----+----------------------+ -| 2 | 1 | A | 1970-01-01T00:00:00Z | +| 2.0 | 1.0 | A | 1970-01-01T00:00:00Z | +-----+-----+-----+----------------------+ -- SQL: EXPLAIN SELECT * FROM "table" WHERE foo=1 AND bar=2; -- Results After Normalizing UUIDs @@ -75,8 +75,8 @@ +-----+-----+-----+----------------------+ | bar | foo | tag | time | +-----+-----+-----+----------------------+ -| 2 | 1 | A | 1970-01-01T00:00:00Z | -| | 1 | B | 1970-01-01T00:00:00Z | +| 2.0 | 1.0 | A | 1970-01-01T00:00:00Z | +| | 1.0 | B | 1970-01-01T00:00:00Z | +-----+-----+-----+----------------------+ -- SQL: EXPLAIN SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag; -- Results After Normalizing UUIDs @@ -104,7 +104,7 @@ +-----+-----+-----+----------------------+ | bar | foo | tag | time | +-----+-----+-----+----------------------+ -| 2 | 1 | A | 1970-01-01T00:00:00Z | +| 2.0 | 1.0 | A | 1970-01-01T00:00:00Z | +-----+-----+-----+----------------------+ -- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00'); -- Results After Normalizing UUIDs diff --git a/influxdb_iox/tests/query_tests2/cases/in/dedup_and_predicates_parquet_ingester.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/dedup_and_predicates_parquet_ingester.sql.expected index 4697f4d650..4757303f01 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/dedup_and_predicates_parquet_ingester.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/dedup_and_predicates_parquet_ingester.sql.expected @@ -3,8 +3,8 @@ +-----+-----+-----+----------------------+ | bar | foo | tag | time | +-----+-----+-----+----------------------+ -| 2 | 1 | A | 1970-01-01T00:00:00Z | -| | 1 | B | 1970-01-01T00:00:00Z | +| 2.0 | 1.0 | A | 1970-01-01T00:00:00Z | +| | 1.0 | B | 1970-01-01T00:00:00Z | +-----+-----+-----+----------------------+ -- SQL: EXPLAIN SELECT * FROM "table" ORDER BY tag; -- Results After Normalizing UUIDs @@ -28,7 +28,7 @@ +-----+-----+-----+----------------------+ | bar | foo | tag | time | +-----+-----+-----+----------------------+ -| 2 | 1 | A | 1970-01-01T00:00:00Z | +| 2.0 | 1.0 | A | 1970-01-01T00:00:00Z | +-----+-----+-----+----------------------+ -- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A'; -- Results After Normalizing UUIDs @@ -53,7 +53,7 @@ +-----+-----+-----+----------------------+ | bar | foo | tag | time | +-----+-----+-----+----------------------+ -| 2 | 1 | A | 1970-01-01T00:00:00Z | +| 2.0 | 1.0 | A | 1970-01-01T00:00:00Z | +-----+-----+-----+----------------------+ -- SQL: EXPLAIN SELECT * FROM "table" WHERE foo=1 AND bar=2; -- Results After Normalizing UUIDs @@ -78,8 +78,8 @@ +-----+-----+-----+----------------------+ | bar | foo | tag | time | +-----+-----+-----+----------------------+ -| 2 | 1 | A | 1970-01-01T00:00:00Z | -| | 1 | B | 1970-01-01T00:00:00Z | +| 2.0 | 1.0 | A | 1970-01-01T00:00:00Z | +| | 1.0 | B | 1970-01-01T00:00:00Z | +-----+-----+-----+----------------------+ -- SQL: EXPLAIN SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag; -- Results After Normalizing UUIDs @@ -108,7 +108,7 @@ +-----+-----+-----+----------------------+ | bar | foo | tag | time | +-----+-----+-----+----------------------+ -| 2 | 1 | A | 1970-01-01T00:00:00Z | +| 2.0 | 1.0 | A | 1970-01-01T00:00:00Z | +-----+-----+-----+----------------------+ -- SQL: EXPLAIN SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00'); -- Results After Normalizing UUIDs diff --git a/influxdb_iox/tests/query_tests2/cases/in/different_tag_sets.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/different_tag_sets.sql.expected index 8d4d984e66..fc728a81da 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/different_tag_sets.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/different_tag_sets.sql.expected @@ -15,7 +15,7 @@ | city | other_temp | reading | state | temp | time | +--------+------------+---------+-------+------+--------------------------------+ | Boston | 72.4 | | | | 1970-01-01T00:00:00.000000350Z | -| Boston | | 51 | | 53.4 | 1970-01-01T00:00:00.000000050Z | +| Boston | | 51.0 | | 53.4 | 1970-01-01T00:00:00.000000050Z | | | 70.4 | | MA | | 1970-01-01T00:00:00.000000250Z | | | | | MA | 70.4 | 1970-01-01T00:00:00.000000050Z | +--------+------------+---------+-------+------+--------------------------------+ \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/duplicates_ingester.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/duplicates_ingester.sql.expected index 73d2a63b5e..da04b3c4b8 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/duplicates_ingester.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/duplicates_ingester.sql.expected @@ -9,12 +9,12 @@ | 1970-01-01T00:00:00.000000250Z | MA | Andover | | 69.2 | | | 1970-01-01T00:00:00.000000250Z | MA | Boston | 65.4 | 75.4 | | | 1970-01-01T00:00:00.000000250Z | MA | Reading | 53.4 | | | -| 1970-01-01T00:00:00.000000300Z | CA | SF | 79 | 87.2 | 500 | -| 1970-01-01T00:00:00.000000300Z | CA | SJ | 78.5 | 88 | | +| 1970-01-01T00:00:00.000000300Z | CA | SF | 79.0 | 87.2 | 500 | +| 1970-01-01T00:00:00.000000300Z | CA | SJ | 78.5 | 88.0 | | | 1970-01-01T00:00:00.000000350Z | CA | SJ | 75.5 | 84.08 | | | 1970-01-01T00:00:00.000000400Z | MA | Bedford | 65.22 | 80.75 | 750 | | 1970-01-01T00:00:00.000000400Z | MA | Boston | 65.4 | 82.67 | | -| 1970-01-01T00:00:00.000000450Z | CA | SJ | 77 | 90.7 | | +| 1970-01-01T00:00:00.000000450Z | CA | SJ | 77.0 | 90.7 | | | 1970-01-01T00:00:00.000000500Z | CA | SJ | 69.5 | 88.2 | | | 1970-01-01T00:00:00.000000600Z | MA | Bedford | | 88.75 | 742 | | 1970-01-01T00:00:00.000000600Z | MA | Boston | 67.4 | | | diff --git a/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet.sql.expected index 7a04681146..8b0370f508 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet.sql.expected @@ -9,12 +9,12 @@ | 1970-01-01T00:00:00.000000250Z | MA | Andover | | 69.2 | | | 1970-01-01T00:00:00.000000250Z | MA | Boston | 65.4 | 75.4 | | | 1970-01-01T00:00:00.000000250Z | MA | Reading | 53.4 | | | -| 1970-01-01T00:00:00.000000300Z | CA | SF | 79 | 87.2 | 500 | -| 1970-01-01T00:00:00.000000300Z | CA | SJ | 78.5 | 88 | | +| 1970-01-01T00:00:00.000000300Z | CA | SF | 79.0 | 87.2 | 500 | +| 1970-01-01T00:00:00.000000300Z | CA | SJ | 78.5 | 88.0 | | | 1970-01-01T00:00:00.000000350Z | CA | SJ | 75.5 | 84.08 | | | 1970-01-01T00:00:00.000000400Z | MA | Bedford | 65.22 | 80.75 | 750 | | 1970-01-01T00:00:00.000000400Z | MA | Boston | 65.4 | 82.67 | | -| 1970-01-01T00:00:00.000000450Z | CA | SJ | 77 | 90.7 | | +| 1970-01-01T00:00:00.000000450Z | CA | SJ | 77.0 | 90.7 | | | 1970-01-01T00:00:00.000000500Z | CA | SJ | 69.5 | 88.2 | | | 1970-01-01T00:00:00.000000600Z | MA | Bedford | | 88.75 | 742 | | 1970-01-01T00:00:00.000000600Z | MA | Boston | 67.4 | | | diff --git a/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet_many.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet_many.sql.expected index 6e97ca53df..253415dca1 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet_many.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet_many.sql.expected @@ -3,7 +3,7 @@ +-----------------+----------+ | COUNT(UInt8(1)) | SUM(m.f) | +-----------------+----------+ -| 21 | 33 | +| 21 | 33.0 | +-----------------+----------+ -- SQL: EXPLAIN select count(*), sum(f) from m; -- Results After Normalizing UUIDs diff --git a/influxdb_iox/tests/query_tests2/cases/in/gapfill.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/gapfill.sql.expected index 238e3a6db1..868ca6b58f 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/gapfill.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/gapfill.sql.expected @@ -17,9 +17,9 @@ | | CoalesceBatchesExec: target_batch_size=8192 | | | RepartitionExec: partitioning=Hash([Column { name: "date_bin_gapfill(IntervalDayTime(\"600000\"),cpu.time,Utf8(\"1970-01-01T00:00:00Z\"))", index: 0 }], 4), input_partitions=4 | | | AggregateExec: mode=Partial, gby=[datebin(600000, time@0, 0) as date_bin_gapfill(IntervalDayTime("600000"),cpu.time,Utf8("1970-01-01T00:00:00Z"))], aggr=[COUNT(cpu.user)] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: time@0 >= 957528000000000000 AND time@0 <= 957531540000000000 | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | +| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: time@0 >= 957528000000000000 AND time@0 <= 957531540000000000 | | | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=time >= TimestampNanosecond(957528000000000000, None) AND time <= TimestampNanosecond(957531540000000000, None), pruning_predicate=time_max@0 >= 957528000000000000 AND time_min@1 <= 957531540000000000, projection=[time, user] | | | | ---------- diff --git a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected index 205d2bb1b7..9818274054 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected @@ -274,14 +274,14 @@ +----------------------+------+------+---------------------+---------------------+--------------------+--------------------+-------------------+------------------+--------------------+-------------------+-------------------+--------------------+--------------------+--------------------+--------------------+-------+------+-------+ | time | f64 | abs | sin | cos | tan | asin | acos | atan | atan2 | exp | ln | log2 | log10 | sqrt | pow | floor | ceil | round | +----------------------+------+------+---------------------+---------------------+--------------------+--------------------+-------------------+------------------+--------------------+-------------------+-------------------+--------------------+--------------------+--------------------+--------------------+-------+------+-------+ -| 2022-10-31T02:00:00Z | 10.1 | 10.1 | -0.6250706488928821 | -0.7805681801691837 | 0.8007893029375109 | 0.0991723838059207 | 1.471623942988976 | 1.47210806614649 | 1.3753055265462157 | 24343.00942440838 | 2.312535423847214 | 3.3362833878644325 | 1.0043213737826426 | 3.1780497164141406 | 102.00999999999999 | 10 | 11 | 10 | +| 2022-10-31T02:00:00Z | 10.1 | 10.1 | -0.6250706488928821 | -0.7805681801691837 | 0.8007893029375109 | 0.0991723838059207 | 1.471623942988976 | 1.47210806614649 | 1.3753055265462157 | 24343.00942440838 | 2.312535423847214 | 3.3362833878644325 | 1.0043213737826426 | 3.1780497164141406 | 102.00999999999999 | 10.0 | 11.0 | 10.0 | +----------------------+------+------+---------------------+---------------------+--------------------+--------------------+-------------------+------------------+--------------------+-------------------+-------------------+--------------------+--------------------+--------------------+--------------------+-------+------+-------+ -- InfluxQL: SELECT i64, abs(i64 * -1), sin(i64), cos(i64), tan(i64), acos(1/i64), atan(i64), atan2(i64, 2), exp(i64), ln(i64), log2(i64), log10(i64), sqrt(i64), pow(i64, 2), floor(i64), ceil(i64), round(i64) FROM m0 LIMIT 1; -+----------------------+-----+-----+---------------------+--------------------+--------------------+--------------------+-------------------+-----------+----------------------------------------------+------------------+-------------------+--------------------+-------------------+-------+-------+------+-------+ -| time | i64 | abs | sin | cos | tan | acos | atan | atan2 | exp | ln | log2 | log10 | sqrt | pow | floor | ceil | round | -+----------------------+-----+-----+---------------------+--------------------+--------------------+--------------------+-------------------+-----------+----------------------------------------------+------------------+-------------------+--------------------+-------------------+-------+-------+------+-------+ -| 2022-10-31T02:00:00Z | 101 | 101 | 0.45202578717835057 | 0.8920048697881602 | 0.5067526002248183 | 1.5707963267948966 | 1.560895660206908 | 1.5509969 | 73070599793680670000000000000000000000000000 | 4.61512051684126 | 6.658211482751795 | 2.0043213737826426 | 10.04987562112089 | 10201 | 101 | 101 | 101 | -+----------------------+-----+-----+---------------------+--------------------+--------------------+--------------------+-------------------+-----------+----------------------------------------------+------------------+-------------------+--------------------+-------------------+-------+-------+------+-------+ ++----------------------+-----+-------+---------------------+--------------------+--------------------+--------------------+-------------------+-----------+----------------------+------------------+-------------------+--------------------+-------------------+-------+-------+-------+-------+ +| time | i64 | abs | sin | cos | tan | acos | atan | atan2 | exp | ln | log2 | log10 | sqrt | pow | floor | ceil | round | ++----------------------+-----+-------+---------------------+--------------------+--------------------+--------------------+-------------------+-----------+----------------------+------------------+-------------------+--------------------+-------------------+-------+-------+-------+-------+ +| 2022-10-31T02:00:00Z | 101 | 101.0 | 0.45202578717835057 | 0.8920048697881602 | 0.5067526002248183 | 1.5707963267948966 | 1.560895660206908 | 1.5509969 | 7.307059979368067e43 | 4.61512051684126 | 6.658211482751795 | 2.0043213737826426 | 10.04987562112089 | 10201 | 101.0 | 101.0 | 101.0 | ++----------------------+-----+-------+---------------------+--------------------+--------------------+--------------------+-------------------+-----------+----------------------+------------------+-------------------+--------------------+-------------------+-------+-------+-------+-------+ -- InfluxQL: SELECT f64, asin(f64), acos(f64) FROM m0 LIMIT 1; +----------------------+------+------+------+ | time | f64 | asin | acos | @@ -393,7 +393,7 @@ | time | tag0 | i64 | i64_1 | i64_f64 | i64_2 | +----------------------+-------+-----+-------+---------+-------+ | 2022-10-31T02:00:10Z | val00 | 211 | 105.5 | 232 | 1 | -| 2022-10-31T02:00:30Z | val00 | 392 | 196 | 411 | 0 | +| 2022-10-31T02:00:30Z | val00 | 392 | 196.0 | 411 | 0 | +----------------------+-------+-----+-------+---------+-------+ -- InfluxQL: SELECT f64, non_existing, f64 + non_existing FROM m0 WHERE f64 > 19; +----------------------+------+--------------+------------------+ diff --git a/influxdb_iox/tests/query_tests2/cases/in/periods.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/periods.sql.expected index 47a9dd7349..4d47435660 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/periods.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/periods.sql.expected @@ -3,8 +3,8 @@ +-----------+-----------+---------+---------+--------------------------------+ | field.one | field.two | tag.one | tag.two | time | +-----------+-----------+---------+---------+--------------------------------+ -| 1 | true | value | other | 2021-01-01T00:00:01.000000001Z | -| 1 | false | value2 | other2 | 2021-01-01T00:00:01.000000002Z | +| 1.0 | true | value | other | 2021-01-01T00:00:01.000000001Z | +| 1.0 | false | value2 | other2 | 2021-01-01T00:00:01.000000002Z | +-----------+-----------+---------+---------+--------------------------------+ -- SQL: SELECT "tag.one" from "measurement.one"; +---------+ @@ -18,4 +18,4 @@ | tag.one | +---------+ | value | -+---------+ ++---------+ \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/pushdown.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/pushdown.sql.expected index fdd7839ddc..e4655f8718 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/pushdown.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/pushdown.sql.expected @@ -4,13 +4,13 @@ +-------+--------+--------------------------------+-----------+ | count | system | time | town | +-------+--------+--------------------------------+-----------+ -| 189 | 7 | 1970-01-01T00:00:00.000000110Z | bedford | -| 372 | 5 | 1970-01-01T00:00:00.000000100Z | lexington | -| 471 | 6 | 1970-01-01T00:00:00.000000110Z | tewsbury | -| 632 | 5 | 1970-01-01T00:00:00.000000120Z | reading | -| 632 | 6 | 1970-01-01T00:00:00.000000130Z | reading | -| 872 | 6 | 1970-01-01T00:00:00.000000110Z | lawrence | -| 40000 | 5 | 1970-01-01T00:00:00.000000100Z | andover | +| 189 | 7.0 | 1970-01-01T00:00:00.000000110Z | bedford | +| 372 | 5.0 | 1970-01-01T00:00:00.000000100Z | lexington | +| 471 | 6.0 | 1970-01-01T00:00:00.000000110Z | tewsbury | +| 632 | 5.0 | 1970-01-01T00:00:00.000000120Z | reading | +| 632 | 6.0 | 1970-01-01T00:00:00.000000130Z | reading | +| 872 | 6.0 | 1970-01-01T00:00:00.000000110Z | lawrence | +| 40000 | 5.0 | 1970-01-01T00:00:00.000000100Z | andover | +-------+--------+--------------------------------+-----------+ -- SQL: EXPLAIN SELECT * from restaurant; -- Results After Normalizing UUIDs @@ -28,12 +28,12 @@ +-------+--------+--------------------------------+-----------+ | count | system | time | town | +-------+--------+--------------------------------+-----------+ -| 372 | 5 | 1970-01-01T00:00:00.000000100Z | lexington | -| 471 | 6 | 1970-01-01T00:00:00.000000110Z | tewsbury | -| 632 | 5 | 1970-01-01T00:00:00.000000120Z | reading | -| 632 | 6 | 1970-01-01T00:00:00.000000130Z | reading | -| 872 | 6 | 1970-01-01T00:00:00.000000110Z | lawrence | -| 40000 | 5 | 1970-01-01T00:00:00.000000100Z | andover | +| 372 | 5.0 | 1970-01-01T00:00:00.000000100Z | lexington | +| 471 | 6.0 | 1970-01-01T00:00:00.000000110Z | tewsbury | +| 632 | 5.0 | 1970-01-01T00:00:00.000000120Z | reading | +| 632 | 6.0 | 1970-01-01T00:00:00.000000130Z | reading | +| 872 | 6.0 | 1970-01-01T00:00:00.000000110Z | lawrence | +| 40000 | 5.0 | 1970-01-01T00:00:00.000000100Z | andover | +-------+--------+--------------------------------+-----------+ -- SQL: EXPLAIN SELECT * from restaurant where count > 200; -- Results After Normalizing UUIDs @@ -46,8 +46,7 @@ | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | | | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: count@0 > 200 | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200), pruning_predicate=count_max@0 > 200, projection=[count, system, time, town] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200), pruning_predicate=count_max@0 > 200, projection=[count, system, time, town] | | | | ---------- -- SQL: EXPLAIN SELECT * from restaurant where count > 200.0; @@ -61,8 +60,7 @@ | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | | | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: CAST(count@0 AS Float64) > 200 | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=CAST(count AS Float64) > Float64(200), projection=[count, system, time, town] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=CAST(count AS Float64) > Float64(200), projection=[count, system, time, town] | | | | ---------- -- SQL: EXPLAIN SELECT * from restaurant where system > 4.0; @@ -76,8 +74,7 @@ | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | | | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: system@1 > 4 | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(4), pruning_predicate=system_max@0 > 4, projection=[count, system, time, town] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(4), pruning_predicate=system_max@0 > 4, projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where count > 200 and town != 'tewsbury'; @@ -85,11 +82,11 @@ +-------+--------+--------------------------------+-----------+ | count | system | time | town | +-------+--------+--------------------------------+-----------+ -| 372 | 5 | 1970-01-01T00:00:00.000000100Z | lexington | -| 632 | 5 | 1970-01-01T00:00:00.000000120Z | reading | -| 632 | 6 | 1970-01-01T00:00:00.000000130Z | reading | -| 872 | 6 | 1970-01-01T00:00:00.000000110Z | lawrence | -| 40000 | 5 | 1970-01-01T00:00:00.000000100Z | andover | +| 372 | 5.0 | 1970-01-01T00:00:00.000000100Z | lexington | +| 632 | 5.0 | 1970-01-01T00:00:00.000000120Z | reading | +| 632 | 6.0 | 1970-01-01T00:00:00.000000130Z | reading | +| 872 | 6.0 | 1970-01-01T00:00:00.000000110Z | lawrence | +| 40000 | 5.0 | 1970-01-01T00:00:00.000000100Z | andover | +-------+--------+--------------------------------+-----------+ -- SQL: EXPLAIN SELECT * from restaurant where count > 200 and town != 'tewsbury'; -- Results After Normalizing UUIDs @@ -102,8 +99,7 @@ | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | | | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: count@0 > 200 AND town@3 != tewsbury | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2, projection=[count, system, time, town] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2, projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where count > 200 and town != 'tewsbury' and (system =5 or town = 'lawrence'); @@ -111,10 +107,10 @@ +-------+--------+--------------------------------+-----------+ | count | system | time | town | +-------+--------+--------------------------------+-----------+ -| 372 | 5 | 1970-01-01T00:00:00.000000100Z | lexington | -| 632 | 5 | 1970-01-01T00:00:00.000000120Z | reading | -| 872 | 6 | 1970-01-01T00:00:00.000000110Z | lawrence | -| 40000 | 5 | 1970-01-01T00:00:00.000000100Z | andover | +| 372 | 5.0 | 1970-01-01T00:00:00.000000100Z | lexington | +| 632 | 5.0 | 1970-01-01T00:00:00.000000120Z | reading | +| 872 | 6.0 | 1970-01-01T00:00:00.000000110Z | lawrence | +| 40000 | 5.0 | 1970-01-01T00:00:00.000000100Z | andover | +-------+--------+--------------------------------+-----------+ -- SQL: EXPLAIN SELECT * from restaurant where count > 200 and town != 'tewsbury' and (system =5 or town = 'lawrence'); -- Results After Normalizing UUIDs @@ -127,8 +123,7 @@ | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | | | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: count@0 > 200 AND town@3 != tewsbury AND system@1 = 5 OR town@3 = lawrence | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")) AND (system = Float64(5) OR town = Dictionary(Int32, Utf8("lawrence"))), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2, projection=[count, system, time, town] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")) AND (system = Float64(5) OR town = Dictionary(Int32, Utf8("lawrence"))), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2, projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where count > 200 and town != 'tewsbury' and (system =5 or town = 'lawrence') and count < 40000; @@ -136,9 +131,9 @@ +-------+--------+--------------------------------+-----------+ | count | system | time | town | +-------+--------+--------------------------------+-----------+ -| 372 | 5 | 1970-01-01T00:00:00.000000100Z | lexington | -| 632 | 5 | 1970-01-01T00:00:00.000000120Z | reading | -| 872 | 6 | 1970-01-01T00:00:00.000000110Z | lawrence | +| 372 | 5.0 | 1970-01-01T00:00:00.000000100Z | lexington | +| 632 | 5.0 | 1970-01-01T00:00:00.000000120Z | reading | +| 872 | 6.0 | 1970-01-01T00:00:00.000000110Z | lawrence | +-------+--------+--------------------------------+-----------+ -- SQL: EXPLAIN SELECT * from restaurant where count > 200 and town != 'tewsbury' and (system =5 or town = 'lawrence') and count < 40000; -- Results After Normalizing UUIDs @@ -151,8 +146,7 @@ | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | | | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: count@0 > 200 AND town@3 != tewsbury AND system@1 = 5 OR town@3 = lawrence AND count@0 < 40000 | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")) AND (system = Float64(5) OR town = Dictionary(Int32, Utf8("lawrence"))) AND count < UInt64(40000), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2 AND count_min@5 < 40000, projection=[count, system, time, town] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")) AND (system = Float64(5) OR town = Dictionary(Int32, Utf8("lawrence"))) AND count < UInt64(40000), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2 AND count_min@5 < 40000, projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where count > 200 and count < 40000; @@ -160,11 +154,11 @@ +-------+--------+--------------------------------+-----------+ | count | system | time | town | +-------+--------+--------------------------------+-----------+ -| 372 | 5 | 1970-01-01T00:00:00.000000100Z | lexington | -| 471 | 6 | 1970-01-01T00:00:00.000000110Z | tewsbury | -| 632 | 5 | 1970-01-01T00:00:00.000000120Z | reading | -| 632 | 6 | 1970-01-01T00:00:00.000000130Z | reading | -| 872 | 6 | 1970-01-01T00:00:00.000000110Z | lawrence | +| 372 | 5.0 | 1970-01-01T00:00:00.000000100Z | lexington | +| 471 | 6.0 | 1970-01-01T00:00:00.000000110Z | tewsbury | +| 632 | 5.0 | 1970-01-01T00:00:00.000000120Z | reading | +| 632 | 6.0 | 1970-01-01T00:00:00.000000130Z | reading | +| 872 | 6.0 | 1970-01-01T00:00:00.000000110Z | lawrence | +-------+--------+--------------------------------+-----------+ -- SQL: EXPLAIN SELECT * from restaurant where count > 200 and count < 40000; -- Results After Normalizing UUIDs @@ -177,8 +171,7 @@ | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | | | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: count@0 > 200 AND count@0 < 40000 | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND count < UInt64(40000), pruning_predicate=count_max@0 > 200 AND count_min@1 < 40000, projection=[count, system, time, town] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND count < UInt64(40000), pruning_predicate=count_max@0 > 200 AND count_min@1 < 40000, projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where system > 4.0 and system < 7.0; @@ -186,12 +179,12 @@ +-------+--------+--------------------------------+-----------+ | count | system | time | town | +-------+--------+--------------------------------+-----------+ -| 372 | 5 | 1970-01-01T00:00:00.000000100Z | lexington | -| 471 | 6 | 1970-01-01T00:00:00.000000110Z | tewsbury | -| 632 | 5 | 1970-01-01T00:00:00.000000120Z | reading | -| 632 | 6 | 1970-01-01T00:00:00.000000130Z | reading | -| 872 | 6 | 1970-01-01T00:00:00.000000110Z | lawrence | -| 40000 | 5 | 1970-01-01T00:00:00.000000100Z | andover | +| 372 | 5.0 | 1970-01-01T00:00:00.000000100Z | lexington | +| 471 | 6.0 | 1970-01-01T00:00:00.000000110Z | tewsbury | +| 632 | 5.0 | 1970-01-01T00:00:00.000000120Z | reading | +| 632 | 6.0 | 1970-01-01T00:00:00.000000130Z | reading | +| 872 | 6.0 | 1970-01-01T00:00:00.000000110Z | lawrence | +| 40000 | 5.0 | 1970-01-01T00:00:00.000000100Z | andover | +-------+--------+--------------------------------+-----------+ -- SQL: EXPLAIN SELECT * from restaurant where system > 4.0 and system < 7.0; -- Results After Normalizing UUIDs @@ -204,8 +197,7 @@ | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | | | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: system@1 > 4 AND system@1 < 7 | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(4) AND system < Float64(7), pruning_predicate=system_max@0 > 4 AND system_min@1 < 7, projection=[count, system, time, town] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(4) AND system < Float64(7), pruning_predicate=system_max@0 > 4 AND system_min@1 < 7, projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where system > 5.0 and system < 7.0; @@ -213,9 +205,9 @@ +-------+--------+--------------------------------+----------+ | count | system | time | town | +-------+--------+--------------------------------+----------+ -| 471 | 6 | 1970-01-01T00:00:00.000000110Z | tewsbury | -| 632 | 6 | 1970-01-01T00:00:00.000000130Z | reading | -| 872 | 6 | 1970-01-01T00:00:00.000000110Z | lawrence | +| 471 | 6.0 | 1970-01-01T00:00:00.000000110Z | tewsbury | +| 632 | 6.0 | 1970-01-01T00:00:00.000000130Z | reading | +| 872 | 6.0 | 1970-01-01T00:00:00.000000110Z | lawrence | +-------+--------+--------------------------------+----------+ -- SQL: EXPLAIN SELECT * from restaurant where system > 5.0 and system < 7.0; -- Results After Normalizing UUIDs @@ -228,8 +220,7 @@ | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | | | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: system@1 > 5 AND system@1 < 7 | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND system < Float64(7), pruning_predicate=system_max@0 > 5 AND system_min@1 < 7, projection=[count, system, time, town] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND system < Float64(7), pruning_predicate=system_max@0 > 5 AND system_min@1 < 7, projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where system > 5.0 and town != 'tewsbury' and 7.0 > system; @@ -237,8 +228,8 @@ +-------+--------+--------------------------------+----------+ | count | system | time | town | +-------+--------+--------------------------------+----------+ -| 632 | 6 | 1970-01-01T00:00:00.000000130Z | reading | -| 872 | 6 | 1970-01-01T00:00:00.000000110Z | lawrence | +| 632 | 6.0 | 1970-01-01T00:00:00.000000130Z | reading | +| 872 | 6.0 | 1970-01-01T00:00:00.000000110Z | lawrence | +-------+--------+--------------------------------+----------+ -- SQL: EXPLAIN SELECT * from restaurant where system > 5.0 and town != 'tewsbury' and 7.0 > system; -- Results After Normalizing UUIDs @@ -251,8 +242,7 @@ | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | | | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: system@1 > 5 AND town@3 != tewsbury AND 7 > system@1 | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > system, pruning_predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7, projection=[count, system, time, town] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > system, pruning_predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7, projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where system > 5.0 and 'tewsbury' != town and system < 7.0 and (count = 632 or town = 'reading'); @@ -260,7 +250,7 @@ +-------+--------+--------------------------------+---------+ | count | system | time | town | +-------+--------+--------------------------------+---------+ -| 632 | 6 | 1970-01-01T00:00:00.000000130Z | reading | +| 632 | 6.0 | 1970-01-01T00:00:00.000000130Z | reading | +-------+--------+--------------------------------+---------+ -- SQL: EXPLAIN SELECT * from restaurant where system > 5.0 and 'tewsbury' != town and system < 7.0 and (count = 632 or town = 'reading'); -- Results After Normalizing UUIDs @@ -273,8 +263,7 @@ | physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | | | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: system@1 > 5 AND tewsbury != town@3 AND system@1 < 7 AND count@0 = 632 OR town@3 = reading | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND Dictionary(Int32, Utf8("tewsbury")) != town AND system < Float64(7) AND (count = UInt64(632) OR town = Dictionary(Int32, Utf8("reading"))), pruning_predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7 AND count_min@4 <= 632 AND 632 <= count_max@5 OR town_min@1 <= reading AND reading <= town_max@2, projection=[count, system, time, town] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND Dictionary(Int32, Utf8("tewsbury")) != town AND system < Float64(7) AND (count = UInt64(632) OR town = Dictionary(Int32, Utf8("reading"))), pruning_predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7 AND count_min@4 <= 632 AND 632 <= count_max@5 OR town_min@1 <= reading AND reading <= town_max@2, projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where 5.0 < system and town != 'tewsbury' and system < 7.0 and (count = 632 or town = 'reading') and time > to_timestamp('1970-01-01T00:00:00.000000130+00:00'); @@ -300,22 +289,22 @@ +-------+--------+--------------------------------+---------+ | count | system | time | town | +-------+--------+--------------------------------+---------+ -| 632 | 6 | 1970-01-01T00:00:00.000000130Z | reading | +| 632 | 6.0 | 1970-01-01T00:00:00.000000130Z | reading | +-------+--------+--------------------------------+---------+ -- SQL: SELECT * from restaurant where system > 5.0 and system < 7.0 and town = 'reading'; -- Results After Sorting +-------+--------+--------------------------------+---------+ | count | system | time | town | +-------+--------+--------------------------------+---------+ -| 632 | 6 | 1970-01-01T00:00:00.000000130Z | reading | +| 632 | 6.0 | 1970-01-01T00:00:00.000000130Z | reading | +-------+--------+--------------------------------+---------+ -- SQL: SELECT * from restaurant where count > 500.76 and count < 640.0; -- Results After Sorting +-------+--------+--------------------------------+---------+ | count | system | time | town | +-------+--------+--------------------------------+---------+ -| 632 | 5 | 1970-01-01T00:00:00.000000120Z | reading | -| 632 | 6 | 1970-01-01T00:00:00.000000130Z | reading | +| 632 | 5.0 | 1970-01-01T00:00:00.000000120Z | reading | +| 632 | 6.0 | 1970-01-01T00:00:00.000000130Z | reading | +-------+--------+--------------------------------+---------+ -- SQL: EXPLAIN SELECT * from restaurant where influx_regex_match(town, 'foo|bar|baz') and influx_regex_not_match(town, 'one|two'); -- Results After Normalizing UUIDs @@ -329,8 +318,8 @@ | physical_plan | ProjectionExec: expr=[count@1 as count, system@2 as system, time@3 as time, town@4 as town] | | | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: CAST(restaurant.town AS Utf8)restaurant.town@0 LIKE %foo% OR CAST(restaurant.town AS Utf8)restaurant.town@0 LIKE %bar% OR CAST(restaurant.town AS Utf8)restaurant.town@0 LIKE %baz% AND CAST(restaurant.town AS Utf8)restaurant.town@0 NOT LIKE %one% AND CAST(restaurant.town AS Utf8)restaurant.town@0 NOT LIKE %two% | -| | ProjectionExec: expr=[CAST(town@3 AS Utf8) as CAST(restaurant.town AS Utf8)restaurant.town, count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | +| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | +| | ProjectionExec: expr=[CAST(town@3 AS Utf8) as CAST(restaurant.town AS Utf8)restaurant.town, count@0 as count, system@1 as system, time@2 as time, town@3 as town] | | | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=(CAST(town AS Utf8) AS restaurant.town LIKE Utf8("%foo%") OR CAST(town AS Utf8) AS restaurant.town LIKE Utf8("%bar%") OR CAST(town AS Utf8) AS restaurant.town LIKE Utf8("%baz%")) AND CAST(town AS Utf8) AS restaurant.town NOT LIKE Utf8("%one%") AND CAST(town AS Utf8) AS restaurant.town NOT LIKE Utf8("%two%") AND (CAST(town AS Utf8) LIKE Utf8("%foo%") OR CAST(town AS Utf8) LIKE Utf8("%bar%") OR CAST(town AS Utf8) LIKE Utf8("%baz%")) AND CAST(town AS Utf8) NOT LIKE Utf8("%one%") AND CAST(town AS Utf8) NOT LIKE Utf8("%two%"), projection=[count, system, time, town] | | | | ---------- \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/retention.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/retention.sql.expected index 97bc8d0ea6..9e30861e3a 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/retention.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/retention.sql.expected @@ -3,9 +3,9 @@ +------+------+----------------------+ | host | load | time | +------+------+----------------------+ -| a | 1 | 2022-01-01T01:00:00Z | -| b | 2 | 2022-01-01T01:00:00Z | -| bb | 21 | 2022-01-01T01:00:00Z | +| a | 1.0 | 2022-01-01T01:00:00Z | +| b | 2.0 | 2022-01-01T01:00:00Z | +| bb | 21.0 | 2022-01-01T01:00:00Z | +------+------+----------------------+ -- SQL: EXPLAIN SELECT * FROM cpu order by host, load, time; -- Results After Normalizing UUIDs @@ -33,8 +33,8 @@ +------+------+----------------------+ | host | load | time | +------+------+----------------------+ -| a | 1 | 2022-01-01T01:00:00Z | -| bb | 21 | 2022-01-01T01:00:00Z | +| a | 1.0 | 2022-01-01T01:00:00Z | +| bb | 21.0 | 2022-01-01T01:00:00Z | +------+------+----------------------+ -- SQL: EXPLAIN SELECT * FROM cpu WHERE host != 'b' ORDER BY host,time; -- Results After Normalizing UUIDs diff --git a/influxdb_iox/tests/query_tests2/cases/in/schema_merge.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/schema_merge.sql.expected index 02b562b85b..b2f6ce6cee 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/schema_merge.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/schema_merge.sql.expected @@ -4,10 +4,10 @@ +------+--------+--------+--------------------------------+------+ | host | region | system | time | user | +------+--------+--------+--------------------------------+------+ -| bar | west | | 1970-01-01T00:00:00.000000250Z | 21 | +| bar | west | | 1970-01-01T00:00:00.000000250Z | 21.0 | | foo | east | | 1970-01-01T00:00:00.000000100Z | 23.2 | -| | west | 5 | 1970-01-01T00:00:00.000000100Z | 23.2 | -| | west | 6 | 1970-01-01T00:00:00.000000150Z | 21 | +| | west | 5.0 | 1970-01-01T00:00:00.000000100Z | 23.2 | +| | west | 6.0 | 1970-01-01T00:00:00.000000150Z | 21.0 | +------+--------+--------+--------------------------------+------+ -- SQL: SELECT host, region, system from cpu; -- Results After Sorting @@ -16,6 +16,6 @@ +------+--------+--------+ | bar | west | | | foo | east | | -| | west | 5 | -| | west | 6 | +| | west | 5.0 | +| | west | 6.0 | +------+--------+--------+ \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/selectors.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/selectors.sql.expected index 1f1e986ac6..e6110726a4 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/selectors.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/selectors.sql.expected @@ -3,180 +3,180 @@ +------------+-------------+-----------+--------------+------+--------------------------------+------------+ | bool_field | float_field | int_field | string_field | tag | time | uint_field | +------------+-------------+-----------+--------------+------+--------------------------------+------------+ -| false | 65 | 18 | fruz | row1 | 1970-01-01T00:00:00.000000100Z | 40 | -| false | 62 | 21 | ba | row1 | 1970-01-01T00:00:00.000000200Z | 30 | -| false | 63 | 20 | baz | row1 | 1970-01-01T00:00:00.000000300Z | 35 | -| true | 64 | 19 | bar | row1 | 1970-01-01T00:00:00.000000400Z | 20 | -| | 64 | | | row1 | 1970-01-01T00:00:00.000000450Z | | -| true | 61 | 22 | foo | row1 | 1970-01-01T00:00:00.000000500Z | 25 | -| | | 64 | | row1 | 1970-01-01T00:00:00.000000550Z | | -| true | 66 | 17 | faa | row1 | 1970-01-01T00:00:00.000000600Z | 10 | +| false | 65.0 | 18.0 | fruz | row1 | 1970-01-01T00:00:00.000000100Z | 40 | +| false | 62.0 | 21.0 | ba | row1 | 1970-01-01T00:00:00.000000200Z | 30 | +| false | 63.0 | 20.0 | baz | row1 | 1970-01-01T00:00:00.000000300Z | 35 | +| true | 64.0 | 19.0 | bar | row1 | 1970-01-01T00:00:00.000000400Z | 20 | +| | 64.0 | | | row1 | 1970-01-01T00:00:00.000000450Z | | +| true | 61.0 | 22.0 | foo | row1 | 1970-01-01T00:00:00.000000500Z | 25 | +| | | 64.0 | | row1 | 1970-01-01T00:00:00.000000550Z | | +| true | 66.0 | 17.0 | faa | row1 | 1970-01-01T00:00:00.000000600Z | 10 | +------------+-------------+-----------+--------------+------+--------------------------------+------------+ -- SQL: select selector_first(float_field, time) from m; -+------------------------------------------------------+ -| selector_first(m.float_field,m.time) | -+------------------------------------------------------+ -| {"value": 65, "time": 1970-01-01T00:00:00.000000100} | -+------------------------------------------------------+ ++----------------------------------------------------+ +| selector_first(m.float_field,m.time) | ++----------------------------------------------------+ +| {value: 65.0, time: 1970-01-01T00:00:00.000000100} | ++----------------------------------------------------+ -- SQL: select selector_first(int_field, time) from m; -+------------------------------------------------------+ -| selector_first(m.int_field,m.time) | -+------------------------------------------------------+ -| {"value": 18, "time": 1970-01-01T00:00:00.000000100} | -+------------------------------------------------------+ ++----------------------------------------------------+ +| selector_first(m.int_field,m.time) | ++----------------------------------------------------+ +| {value: 18.0, time: 1970-01-01T00:00:00.000000100} | ++----------------------------------------------------+ -- SQL: select selector_first(uint_field, time) from m; -+------------------------------------------------------+ -| selector_first(m.uint_field,m.time) | -+------------------------------------------------------+ -| {"value": 40, "time": 1970-01-01T00:00:00.000000100} | -+------------------------------------------------------+ ++--------------------------------------------------+ +| selector_first(m.uint_field,m.time) | ++--------------------------------------------------+ +| {value: 40, time: 1970-01-01T00:00:00.000000100} | ++--------------------------------------------------+ -- SQL: select selector_first(string_field, time) from m; -+----------------------------------------------------------+ -| selector_first(m.string_field,m.time) | -+----------------------------------------------------------+ -| {"value": "fruz", "time": 1970-01-01T00:00:00.000000100} | -+----------------------------------------------------------+ ++----------------------------------------------------+ +| selector_first(m.string_field,m.time) | ++----------------------------------------------------+ +| {value: fruz, time: 1970-01-01T00:00:00.000000100} | ++----------------------------------------------------+ -- SQL: select selector_first(bool_field, time) from m; -+---------------------------------------------------------+ -| selector_first(m.bool_field,m.time) | -+---------------------------------------------------------+ -| {"value": false, "time": 1970-01-01T00:00:00.000000100} | -+---------------------------------------------------------+ ++-----------------------------------------------------+ +| selector_first(m.bool_field,m.time) | ++-----------------------------------------------------+ +| {value: false, time: 1970-01-01T00:00:00.000000100} | ++-----------------------------------------------------+ -- SQL: select selector_first(float_field, time)['value'], selector_first(float_field, time)['time'] from m; +---------------------------------------------+--------------------------------------------+ | selector_first(m.float_field,m.time)[value] | selector_first(m.float_field,m.time)[time] | +---------------------------------------------+--------------------------------------------+ -| 65 | 1970-01-01T00:00:00.000000100Z | +| 65.0 | 1970-01-01T00:00:00.000000100Z | +---------------------------------------------+--------------------------------------------+ -- SQL: select f['value'], f['time'] from (select selector_first(float_field, time) as f from m) as sq; +-------------+--------------------------------+ | sq.f[value] | sq.f[time] | +-------------+--------------------------------+ -| 65 | 1970-01-01T00:00:00.000000100Z | +| 65.0 | 1970-01-01T00:00:00.000000100Z | +-------------+--------------------------------+ -- SQL: select selector_last(float_field, time) from m; -+------------------------------------------------------+ -| selector_last(m.float_field,m.time) | -+------------------------------------------------------+ -| {"value": 66, "time": 1970-01-01T00:00:00.000000600} | -+------------------------------------------------------+ ++----------------------------------------------------+ +| selector_last(m.float_field,m.time) | ++----------------------------------------------------+ +| {value: 66.0, time: 1970-01-01T00:00:00.000000600} | ++----------------------------------------------------+ -- SQL: select selector_last(int_field, time) from m; -+------------------------------------------------------+ -| selector_last(m.int_field,m.time) | -+------------------------------------------------------+ -| {"value": 17, "time": 1970-01-01T00:00:00.000000600} | -+------------------------------------------------------+ ++----------------------------------------------------+ +| selector_last(m.int_field,m.time) | ++----------------------------------------------------+ +| {value: 17.0, time: 1970-01-01T00:00:00.000000600} | ++----------------------------------------------------+ -- SQL: select selector_last(uint_field, time) from m; -+------------------------------------------------------+ -| selector_last(m.uint_field,m.time) | -+------------------------------------------------------+ -| {"value": 10, "time": 1970-01-01T00:00:00.000000600} | -+------------------------------------------------------+ ++--------------------------------------------------+ +| selector_last(m.uint_field,m.time) | ++--------------------------------------------------+ +| {value: 10, time: 1970-01-01T00:00:00.000000600} | ++--------------------------------------------------+ -- SQL: select selector_last(string_field, time) from m; -+---------------------------------------------------------+ -| selector_last(m.string_field,m.time) | -+---------------------------------------------------------+ -| {"value": "faa", "time": 1970-01-01T00:00:00.000000600} | -+---------------------------------------------------------+ ++---------------------------------------------------+ +| selector_last(m.string_field,m.time) | ++---------------------------------------------------+ +| {value: faa, time: 1970-01-01T00:00:00.000000600} | ++---------------------------------------------------+ -- SQL: select selector_last(bool_field, time) from m; -+--------------------------------------------------------+ -| selector_last(m.bool_field,m.time) | -+--------------------------------------------------------+ -| {"value": true, "time": 1970-01-01T00:00:00.000000600} | -+--------------------------------------------------------+ ++----------------------------------------------------+ +| selector_last(m.bool_field,m.time) | ++----------------------------------------------------+ +| {value: true, time: 1970-01-01T00:00:00.000000600} | ++----------------------------------------------------+ -- SQL: select selector_last(float_field, time)['value'], selector_last(float_field, time)['time'] from m; +--------------------------------------------+-------------------------------------------+ | selector_last(m.float_field,m.time)[value] | selector_last(m.float_field,m.time)[time] | +--------------------------------------------+-------------------------------------------+ -| 66 | 1970-01-01T00:00:00.000000600Z | +| 66.0 | 1970-01-01T00:00:00.000000600Z | +--------------------------------------------+-------------------------------------------+ -- SQL: select f['value'], f['time'] from (select selector_last(float_field, time) as f from m) as sq; +-------------+--------------------------------+ | sq.f[value] | sq.f[time] | +-------------+--------------------------------+ -| 66 | 1970-01-01T00:00:00.000000600Z | +| 66.0 | 1970-01-01T00:00:00.000000600Z | +-------------+--------------------------------+ -- SQL: select selector_min(float_field, time) from m; -+------------------------------------------------------+ -| selector_min(m.float_field,m.time) | -+------------------------------------------------------+ -| {"value": 61, "time": 1970-01-01T00:00:00.000000500} | -+------------------------------------------------------+ ++----------------------------------------------------+ +| selector_min(m.float_field,m.time) | ++----------------------------------------------------+ +| {value: 61.0, time: 1970-01-01T00:00:00.000000500} | ++----------------------------------------------------+ -- SQL: select selector_min(int_field, time) from m; -+------------------------------------------------------+ -| selector_min(m.int_field,m.time) | -+------------------------------------------------------+ -| {"value": 17, "time": 1970-01-01T00:00:00.000000600} | -+------------------------------------------------------+ ++----------------------------------------------------+ +| selector_min(m.int_field,m.time) | ++----------------------------------------------------+ +| {value: 17.0, time: 1970-01-01T00:00:00.000000600} | ++----------------------------------------------------+ -- SQL: select selector_min(uint_field, time) from m; -+------------------------------------------------------+ -| selector_min(m.uint_field,m.time) | -+------------------------------------------------------+ -| {"value": 10, "time": 1970-01-01T00:00:00.000000600} | -+------------------------------------------------------+ ++--------------------------------------------------+ +| selector_min(m.uint_field,m.time) | ++--------------------------------------------------+ +| {value: 10, time: 1970-01-01T00:00:00.000000600} | ++--------------------------------------------------+ -- SQL: select selector_min(string_field, time) from m; -+--------------------------------------------------------+ -| selector_min(m.string_field,m.time) | -+--------------------------------------------------------+ -| {"value": "ba", "time": 1970-01-01T00:00:00.000000200} | -+--------------------------------------------------------+ ++--------------------------------------------------+ +| selector_min(m.string_field,m.time) | ++--------------------------------------------------+ +| {value: ba, time: 1970-01-01T00:00:00.000000200} | ++--------------------------------------------------+ -- SQL: select selector_min(bool_field, time) from m; -+---------------------------------------------------------+ -| selector_min(m.bool_field,m.time) | -+---------------------------------------------------------+ -| {"value": false, "time": 1970-01-01T00:00:00.000000100} | -+---------------------------------------------------------+ ++-----------------------------------------------------+ +| selector_min(m.bool_field,m.time) | ++-----------------------------------------------------+ +| {value: false, time: 1970-01-01T00:00:00.000000100} | ++-----------------------------------------------------+ -- SQL: select selector_min(float_field, time)['value'], selector_min(float_field, time)['time'] from m; +-------------------------------------------+------------------------------------------+ | selector_min(m.float_field,m.time)[value] | selector_min(m.float_field,m.time)[time] | +-------------------------------------------+------------------------------------------+ -| 61 | 1970-01-01T00:00:00.000000500Z | +| 61.0 | 1970-01-01T00:00:00.000000500Z | +-------------------------------------------+------------------------------------------+ -- SQL: select f['value'], f['time'] from (select selector_min(float_field, time) as f from m) as sq; +-------------+--------------------------------+ | sq.f[value] | sq.f[time] | +-------------+--------------------------------+ -| 61 | 1970-01-01T00:00:00.000000500Z | +| 61.0 | 1970-01-01T00:00:00.000000500Z | +-------------+--------------------------------+ -- SQL: select selector_max(float_field, time) from m; -+------------------------------------------------------+ -| selector_max(m.float_field,m.time) | -+------------------------------------------------------+ -| {"value": 66, "time": 1970-01-01T00:00:00.000000600} | -+------------------------------------------------------+ ++----------------------------------------------------+ +| selector_max(m.float_field,m.time) | ++----------------------------------------------------+ +| {value: 66.0, time: 1970-01-01T00:00:00.000000600} | ++----------------------------------------------------+ -- SQL: select selector_max(int_field, time) from m; -+------------------------------------------------------+ -| selector_max(m.int_field,m.time) | -+------------------------------------------------------+ -| {"value": 64, "time": 1970-01-01T00:00:00.000000550} | -+------------------------------------------------------+ ++----------------------------------------------------+ +| selector_max(m.int_field,m.time) | ++----------------------------------------------------+ +| {value: 64.0, time: 1970-01-01T00:00:00.000000550} | ++----------------------------------------------------+ -- SQL: select selector_max(uint_field, time) from m; -+------------------------------------------------------+ -| selector_max(m.uint_field,m.time) | -+------------------------------------------------------+ -| {"value": 40, "time": 1970-01-01T00:00:00.000000100} | -+------------------------------------------------------+ ++--------------------------------------------------+ +| selector_max(m.uint_field,m.time) | ++--------------------------------------------------+ +| {value: 40, time: 1970-01-01T00:00:00.000000100} | ++--------------------------------------------------+ -- SQL: select selector_max(string_field, time) from m; -+----------------------------------------------------------+ -| selector_max(m.string_field,m.time) | -+----------------------------------------------------------+ -| {"value": "fruz", "time": 1970-01-01T00:00:00.000000100} | -+----------------------------------------------------------+ ++----------------------------------------------------+ +| selector_max(m.string_field,m.time) | ++----------------------------------------------------+ +| {value: fruz, time: 1970-01-01T00:00:00.000000100} | ++----------------------------------------------------+ -- SQL: select selector_max(bool_field, time) from m; -+--------------------------------------------------------+ -| selector_max(m.bool_field,m.time) | -+--------------------------------------------------------+ -| {"value": true, "time": 1970-01-01T00:00:00.000000400} | -+--------------------------------------------------------+ ++----------------------------------------------------+ +| selector_max(m.bool_field,m.time) | ++----------------------------------------------------+ +| {value: true, time: 1970-01-01T00:00:00.000000400} | ++----------------------------------------------------+ -- SQL: select selector_max(float_field, time)['value'], selector_max(float_field, time)['time'] from m; +-------------------------------------------+------------------------------------------+ | selector_max(m.float_field,m.time)[value] | selector_max(m.float_field,m.time)[time] | +-------------------------------------------+------------------------------------------+ -| 66 | 1970-01-01T00:00:00.000000600Z | +| 66.0 | 1970-01-01T00:00:00.000000600Z | +-------------------------------------------+------------------------------------------+ -- SQL: select f['value'], f['time'] from (select selector_max(float_field, time) as f from m) as sq; +-------------+--------------------------------+ | sq.f[value] | sq.f[time] | +-------------+--------------------------------+ -| 66 | 1970-01-01T00:00:00.000000600Z | -+-------------+--------------------------------+ +| 66.0 | 1970-01-01T00:00:00.000000600Z | ++-------------+--------------------------------+ \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/several_chunks.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/several_chunks.sql.expected index 994cf40998..cdf0da39ac 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/several_chunks.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/several_chunks.sql.expected @@ -7,7 +7,7 @@ | Andover | 72.4 | CA | | 1970-01-01T00:00:00.000000150Z | | Andover | | CA | 67.3 | 1970-01-01T00:00:00.000000500Z | | Boston | 68.2 | MA | | 1970-01-01T00:00:00.000000450Z | -| Boston | 80 | MA | | 1970-01-01T00:00:00.000000250Z | +| Boston | 80.0 | MA | | 1970-01-01T00:00:00.000000250Z | | Boston | | MA | 70.4 | 1970-01-01T00:00:00.000000050Z | | Boston | | MA | 80.7 | 1970-01-01T00:00:00.000000350Z | | Boston | | MA | 88.6 | 1970-01-01T00:00:00.000000230Z | @@ -44,7 +44,7 @@ | 88.6 | | 1970-01-01T00:00:00.000000230Z | | | 68.2 | 1970-01-01T00:00:00.000000450Z | | | 72.4 | 1970-01-01T00:00:00.000000150Z | -| | 80 | 1970-01-01T00:00:00.000000250Z | +| | 80.0 | 1970-01-01T00:00:00.000000250Z | +------+------------+--------------------------------+ -- SQL: EXPLAIN select temp, other_temp, time from h2o; -- Results After Normalizing UUIDs diff --git a/influxdb_iox/tests/query_tests2/cases/in/timestamps.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/timestamps.sql.expected index 1bb8eff5f2..26f2aa80dc 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/timestamps.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/timestamps.sql.expected @@ -4,30 +4,30 @@ | region | time | user | +--------+----------------------+------+ | west | 2021-07-20T19:28:50Z | 23.2 | -| west | 2021-07-20T19:30:30Z | 21 | +| west | 2021-07-20T19:30:30Z | 21.0 | +--------+----------------------+------+ -- SQL: SELECT * FROM cpu WHERE time > to_timestamp('2021-07-20 19:28:50+00:00'); +--------+----------------------+------+ | region | time | user | +--------+----------------------+------+ -| west | 2021-07-20T19:30:30Z | 21 | +| west | 2021-07-20T19:30:30Z | 21.0 | +--------+----------------------+------+ -- SQL: SELECT * FROM cpu WHERE time > to_timestamp('2021-07-20T19:28:50Z'); +--------+----------------------+------+ | region | time | user | +--------+----------------------+------+ -| west | 2021-07-20T19:30:30Z | 21 | +| west | 2021-07-20T19:30:30Z | 21.0 | +--------+----------------------+------+ -- SQL: SELECT * FROM cpu WHERE CAST(time AS BIGINT) > CAST(to_timestamp('2021-07-20T19:28:50Z') AS BIGINT); +--------+----------------------+------+ | region | time | user | +--------+----------------------+------+ -| west | 2021-07-20T19:30:30Z | 21 | +| west | 2021-07-20T19:30:30Z | 21.0 | +--------+----------------------+------+ -- SQL: SELECT * FROM cpu where cast(time as bigint) > 10 order by region, time, "user"; +--------+----------------------+------+ | region | time | user | +--------+----------------------+------+ | west | 2021-07-20T19:28:50Z | 23.2 | -| west | 2021-07-20T19:30:30Z | 21 | -+--------+----------------------+------+ +| west | 2021-07-20T19:30:30Z | 21.0 | ++--------+----------------------+------+ \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/two_chunks_missing_columns.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/two_chunks_missing_columns.sql.expected index 8694f327af..8a27c1d398 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/two_chunks_missing_columns.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/two_chunks_missing_columns.sql.expected @@ -3,6 +3,6 @@ +--------+--------+--------+------+------+------+--------------------------------+ | field1 | field2 | field3 | tag1 | tag2 | tag3 | time | +--------+--------+--------+------+------+------+--------------------------------+ -| 10 | 11 | | a | b | | 1970-01-01T00:00:00.000000100Z | -| 20 | | 22 | a | | c | 1970-01-01T00:00:00.000000200Z | -+--------+--------+--------+------+------+------+--------------------------------+ +| 10.0 | 11.0 | | a | b | | 1970-01-01T00:00:00.000000100Z | +| 20.0 | | 22.0 | a | | c | 1970-01-01T00:00:00.000000200Z | ++--------+--------+--------+------+------+------+--------------------------------+ \ No newline at end of file diff --git a/ingester/src/buffer_tree/partition.rs b/ingester/src/buffer_tree/partition.rs index ee2154dc9a..7b2be1b12a 100644 --- a/ingester/src/buffer_tree/partition.rs +++ b/ingester/src/buffer_tree/partition.rs @@ -515,7 +515,7 @@ mod tests { "+--------+--------+----------+--------------------------------+", "| city | people | pigeons | time |", "+--------+--------+----------+--------------------------------+", - "| London | 2 | millions | 1970-01-01T00:00:00.000000010Z |", + "| London | 2.0 | millions | 1970-01-01T00:00:00.000000010Z |", "+--------+--------+----------+--------------------------------+", ]; assert_batches_eq!( @@ -561,8 +561,8 @@ mod tests { "+--------+--------+----------+--------------------------------+", "| city | people | pigeons | time |", "+--------+--------+----------+--------------------------------+", - "| London | 2 | millions | 1970-01-01T00:00:00.000000010Z |", - "| Madrid | 4 | none | 1970-01-01T00:00:00.000000020Z |", + "| London | 2.0 | millions | 1970-01-01T00:00:00.000000010Z |", + "| Madrid | 4.0 | none | 1970-01-01T00:00:00.000000020Z |", "+--------+--------+----------+--------------------------------+", ]; assert_batches_eq!( @@ -611,7 +611,7 @@ mod tests { "+--------+--------+----------+--------------------------------+", "| city | people | pigeons | time |", "+--------+--------+----------+--------------------------------+", - "| London | 2 | millions | 1970-01-01T00:00:00.000000010Z |", + "| London | 2.0 | millions | 1970-01-01T00:00:00.000000010Z |", "+--------+--------+----------+--------------------------------+", ]; assert_batches_eq!( @@ -656,8 +656,8 @@ mod tests { "+--------+--------+----------+--------------------------------+", "| city | people | pigeons | time |", "+--------+--------+----------+--------------------------------+", - "| London | 2 | millions | 1970-01-01T00:00:00.000000010Z |", - "| Madrid | 4 | none | 1970-01-01T00:00:00.000000020Z |", + "| London | 2.0 | millions | 1970-01-01T00:00:00.000000010Z |", + "| Madrid | 4.0 | none | 1970-01-01T00:00:00.000000020Z |", "+--------+--------+----------+--------------------------------+", ]; assert_batches_eq!( @@ -729,7 +729,7 @@ mod tests { "+--------+--------+---------+--------------------------------+", "| city | people | pigeons | time |", "+--------+--------+---------+--------------------------------+", - "| Madrid | 4 | none | 1970-01-01T00:00:00.000000020Z |", + "| Madrid | 4.0 | none | 1970-01-01T00:00:00.000000020Z |", "+--------+--------+---------+--------------------------------+", ]; assert_batches_eq!( @@ -802,11 +802,11 @@ mod tests { assert_eq!(p.get_query_data().unwrap().record_batches().len(), 1); assert_deduped( &[ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 1 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 1.0 |", + "+--------------------------------+-----+", ], p.get_query_data().unwrap(), ) @@ -820,11 +820,11 @@ mod tests { assert_eq!(p.get_query_data().unwrap().record_batches().len(), 1); assert_deduped( &[ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 2 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 2.0 |", + "+--------------------------------+-----+", ], p.get_query_data().unwrap(), ) @@ -836,11 +836,11 @@ mod tests { assert_eq!(batches.record_batches().len(), 1); assert_deduped( &[ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 2 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 2.0 |", + "+--------------------------------+-----+", ], batches, ) @@ -855,11 +855,11 @@ mod tests { assert_eq!(p.get_query_data().unwrap().record_batches().len(), 2); assert_deduped( &[ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 3 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 3.0 |", + "+--------------------------------+-----+", ], p.get_query_data().unwrap(), ) @@ -876,11 +876,11 @@ mod tests { assert_eq!(p.get_query_data().unwrap().record_batches().len(), 1); assert_deduped( &[ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 3 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 3.0 |", + "+--------------------------------+-----+", ], p.get_query_data().unwrap(), ) diff --git a/ingester/src/buffer_tree/partition/buffer/state_machine.rs b/ingester/src/buffer_tree/partition/buffer/state_machine.rs index bd565f630c..2bd12bf9fb 100644 --- a/ingester/src/buffer_tree/partition/buffer/state_machine.rs +++ b/ingester/src/buffer_tree/partition/buffer/state_machine.rs @@ -169,7 +169,7 @@ mod tests { "+-------+----------+----------+--------------------------------+", "| great | how_much | tag | time |", "+-------+----------+----------+--------------------------------+", - "| true | 42 | platanos | 1991-03-10T00:00:42.000000042Z |", + "| true | 42.0 | platanos | 1991-03-10T00:00:42.000000042Z |", "+-------+----------+----------+--------------------------------+", ]; assert_batches_eq!(&expected, &[w1_data[0].deref().clone()]); @@ -197,8 +197,8 @@ mod tests { "+-------+----------+----------+--------------------------------+", "| great | how_much | tag | time |", "+-------+----------+----------+--------------------------------+", - "| true | 42 | platanos | 1991-03-10T00:00:42.000000042Z |", - "| true | 1000 | platanos | 1991-03-10T00:00:42.000000043Z |", + "| true | 42.0 | platanos | 1991-03-10T00:00:42.000000042Z |", + "| true | 1000.0 | platanos | 1991-03-10T00:00:42.000000043Z |", "+-------+----------+----------+--------------------------------+", ]; assert_eq!(w2_data.len(), 1); diff --git a/ingester/src/compact.rs b/ingester/src/compact.rs index 62b10bffea..bb8be1583d 100644 --- a/ingester/src/compact.rs +++ b/ingester/src/compact.rs @@ -217,7 +217,7 @@ mod tests { "+-----+--------------------------------+", "| bar | time |", "+-----+--------------------------------+", - "| 2 | 1970-01-01T00:00:00.000000020Z |", + "| 2.0 | 1970-01-01T00:00:00.000000020Z |", "+-----+--------------------------------+", ]; assert_batches_eq!(&expected_data, &output_batches); diff --git a/ingester/src/querier_handler.rs b/ingester/src/querier_handler.rs index a56ba2ec6f..014049ffdf 100644 --- a/ingester/src/querier_handler.rs +++ b/ingester/src/querier_handler.rs @@ -426,13 +426,13 @@ mod tests { "+------------+-----+------+--------------------------------+", "| city | day | temp | time |", "+------------+-----+------+--------------------------------+", - "| Andover | tue | 56 | 1970-01-01T00:00:00.000000030Z |", // in group 1 - seq_num: 2 - "| Andover | mon | | 1970-01-01T00:00:00.000000046Z |", // in group 2 - seq_num: 3 - "| Boston | sun | 60 | 1970-01-01T00:00:00.000000036Z |", // in group 1 - seq_num: 1 - "| Boston | mon | | 1970-01-01T00:00:00.000000038Z |", // in group 3 - seq_num: 5 - "| Medford | sun | 55 | 1970-01-01T00:00:00.000000022Z |", // in group 4 - seq_num: 7 + "| Andover | mon | | 1970-01-01T00:00:00.000000046Z |", // in group 1 - seq_num: 2 + "| Andover | tue | 56.0 | 1970-01-01T00:00:00.000000030Z |", // in group 2 - seq_num: 3 + "| Boston | mon | | 1970-01-01T00:00:00.000000038Z |", // in group 1 - seq_num: 1 + "| Boston | sun | 60.0 | 1970-01-01T00:00:00.000000036Z |", // in group 3 - seq_num: 5 + "| Medford | sun | 55.0 | 1970-01-01T00:00:00.000000022Z |", // in group 4 - seq_num: 7 "| Medford | wed | | 1970-01-01T00:00:00.000000026Z |", // in group 2 - seq_num: 4 - "| Reading | mon | 58 | 1970-01-01T00:00:00.000000040Z |", // in group 4 - seq_num: 8 + "| Reading | mon | 58.0 | 1970-01-01T00:00:00.000000040Z |", // in group 4 - seq_num: 8 "| Wilmington | mon | | 1970-01-01T00:00:00.000000035Z |", // in group 3 - seq_num: 6 "+------------+-----+------+--------------------------------+", ]; @@ -462,12 +462,12 @@ mod tests { "| city | temp | time |", "+------------+------+--------------------------------+", "| Andover | | 1970-01-01T00:00:00.000000046Z |", - "| Andover | 56 | 1970-01-01T00:00:00.000000030Z |", + "| Andover | 56.0 | 1970-01-01T00:00:00.000000030Z |", "| Boston | | 1970-01-01T00:00:00.000000038Z |", - "| Boston | 60 | 1970-01-01T00:00:00.000000036Z |", + "| Boston | 60.0 | 1970-01-01T00:00:00.000000036Z |", "| Medford | | 1970-01-01T00:00:00.000000026Z |", - "| Medford | 55 | 1970-01-01T00:00:00.000000022Z |", - "| Reading | 58 | 1970-01-01T00:00:00.000000040Z |", + "| Medford | 55.0 | 1970-01-01T00:00:00.000000022Z |", + "| Reading | 58.0 | 1970-01-01T00:00:00.000000040Z |", "| Wilmington | | 1970-01-01T00:00:00.000000035Z |", "+------------+------+--------------------------------+", ]; @@ -506,12 +506,12 @@ mod tests { "| city | temp | time |", "+------------+------+--------------------------------+", "| Andover | | 1970-01-01T00:00:00.000000046Z |", - "| Andover | 56 | 1970-01-01T00:00:00.000000030Z |", + "| Andover | 56.0 | 1970-01-01T00:00:00.000000030Z |", "| Boston | | 1970-01-01T00:00:00.000000038Z |", - "| Boston | 60 | 1970-01-01T00:00:00.000000036Z |", + "| Boston | 60.0 | 1970-01-01T00:00:00.000000036Z |", "| Medford | | 1970-01-01T00:00:00.000000026Z |", - "| Medford | 55 | 1970-01-01T00:00:00.000000022Z |", - "| Reading | 58 | 1970-01-01T00:00:00.000000040Z |", + "| Medford | 55.0 | 1970-01-01T00:00:00.000000022Z |", + "| Reading | 58.0 | 1970-01-01T00:00:00.000000040Z |", "| Wilmington | | 1970-01-01T00:00:00.000000035Z |", "+------------+------+--------------------------------+", ]; diff --git a/ingester/tests/write.rs b/ingester/tests/write.rs index 1162d745ca..22905100d6 100644 --- a/ingester/tests/write.rs +++ b/ingester/tests/write.rs @@ -67,7 +67,7 @@ async fn test_write_query() { "| count | greatness | time |", "+-------+-----------+--------------------------------+", "| | unbounded | 1970-01-01T00:00:00.000000010Z |", - "| 42 | | 1970-01-01T00:00:00.000000200Z |", + "| 42.0 | | 1970-01-01T00:00:00.000000200Z |", "+-------+-----------+--------------------------------+", ]; assert_batches_sorted_eq!(&expected, &data); @@ -172,7 +172,7 @@ async fn test_seek_on_init() { "+-----------+----------+--------------------------------+", "| greatness | platanos | time |", "+-----------+----------+--------------------------------+", - "| amazing | 42 | 1970-01-01T00:00:00.000000020Z |", + "| amazing | 42.0 | 1970-01-01T00:00:00.000000020Z |", "| unbounded | | 1970-01-01T00:00:00.000000010Z |", "+-----------+----------+--------------------------------+", ]; @@ -211,7 +211,7 @@ async fn test_seek_on_init() { "+-----------+----------+--------------------------------+", "| greatness | platanos | time |", "+-----------+----------+--------------------------------+", - "| amazing | 42 | 1970-01-01T00:00:00.000000020Z |", + "| amazing | 42.0 | 1970-01-01T00:00:00.000000020Z |", "+-----------+----------+--------------------------------+", ]; assert_batches_sorted_eq!(&expected, &data); @@ -262,7 +262,7 @@ async fn test_skip_previously_applied_partition_ops() { "+-----------+----------+--------------------------------+", "| greatness | platanos | time |", "+-----------+----------+--------------------------------+", - "| amazing | 42 | 1970-01-01T00:00:00.000000020Z |", + "| amazing | 42.0 | 1970-01-01T00:00:00.000000020Z |", "| unbounded | | 1970-01-01T00:00:00.000000010Z |", "+-----------+----------+--------------------------------+", ]; @@ -313,7 +313,7 @@ async fn test_skip_previously_applied_partition_ops() { "+-----------+----------+--------------------------------+", "| greatness | platanos | time |", "+-----------+----------+--------------------------------+", - "| amazing | 42 | 1970-01-01T00:00:00.000000020Z |", + "| amazing | 42.0 | 1970-01-01T00:00:00.000000020Z |", "+-----------+----------+--------------------------------+", ]; assert_batches_sorted_eq!(&expected, &data); diff --git a/ingester2/src/buffer_tree/partition.rs b/ingester2/src/buffer_tree/partition.rs index 9b0d819356..79abda50c2 100644 --- a/ingester2/src/buffer_tree/partition.rs +++ b/ingester2/src/buffer_tree/partition.rs @@ -405,7 +405,7 @@ mod tests { "+--------+--------+----------+--------------------------------+", "| city | people | pigeons | time |", "+--------+--------+----------+--------------------------------+", - "| London | 2 | millions | 1970-01-01T00:00:00.000000010Z |", + "| London | 2.0 | millions | 1970-01-01T00:00:00.000000010Z |", "+--------+--------+----------+--------------------------------+", ]; assert_batches_eq!( @@ -434,8 +434,8 @@ mod tests { "+--------+--------+----------+--------------------------------+", "| city | people | pigeons | time |", "+--------+--------+----------+--------------------------------+", - "| London | 2 | millions | 1970-01-01T00:00:00.000000010Z |", - "| Madrid | 4 | none | 1970-01-01T00:00:00.000000020Z |", + "| London | 2.0 | millions | 1970-01-01T00:00:00.000000010Z |", + "| Madrid | 4.0 | none | 1970-01-01T00:00:00.000000020Z |", "+--------+--------+----------+--------------------------------+", ]; assert_batches_eq!( @@ -489,7 +489,7 @@ mod tests { "+--------+--------+----------+--------------------------------+", "| city | people | pigeons | time |", "+--------+--------+----------+--------------------------------+", - "| London | 2 | millions | 1970-01-01T00:00:00.000000010Z |", + "| London | 2.0 | millions | 1970-01-01T00:00:00.000000010Z |", "+--------+--------+----------+--------------------------------+", ]; assert_batches_eq!( @@ -523,8 +523,8 @@ mod tests { "+--------+--------+----------+--------------------------------+", "| city | people | pigeons | time |", "+--------+--------+----------+--------------------------------+", - "| London | 2 | millions | 1970-01-01T00:00:00.000000010Z |", - "| Madrid | 4 | none | 1970-01-01T00:00:00.000000020Z |", + "| London | 2.0 | millions | 1970-01-01T00:00:00.000000010Z |", + "| Madrid | 4.0 | none | 1970-01-01T00:00:00.000000020Z |", "+--------+--------+----------+--------------------------------+", ]; assert_batches_eq!( @@ -557,7 +557,7 @@ mod tests { "+--------+--------+---------+--------------------------------+", "| city | people | pigeons | time |", "+--------+--------+---------+--------------------------------+", - "| Madrid | 4 | none | 1970-01-01T00:00:00.000000020Z |", + "| Madrid | 4.0 | none | 1970-01-01T00:00:00.000000020Z |", "+--------+--------+---------+--------------------------------+", ]; assert_batches_eq!( @@ -634,11 +634,11 @@ mod tests { assert_eq!(p.get_query_data().unwrap().record_batches().len(), 1); assert_deduped( &[ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 1 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 1.0 |", + "+--------------------------------+-----+", ], p.get_query_data().unwrap(), ) @@ -652,11 +652,11 @@ mod tests { assert_eq!(p.get_query_data().unwrap().record_batches().len(), 1); assert_deduped( &[ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 2 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 2.0 |", + "+--------------------------------+-----+", ], p.get_query_data().unwrap(), ) @@ -671,11 +671,11 @@ mod tests { assert_eq!(persisting_data1.record_batches().len(), 1); assert_deduped( &[ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 2 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 2.0 |", + "+--------------------------------+-----+", ], (*persisting_data1).clone(), ) @@ -694,11 +694,11 @@ mod tests { assert_eq!(p.get_query_data().unwrap().record_batches().len(), 2); assert_deduped( &[ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 3 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 3.0 |", + "+--------------------------------+-----+", ], p.get_query_data().unwrap(), ) @@ -711,11 +711,11 @@ mod tests { assert_eq!(persisting_data2.record_batches().len(), 1); assert_deduped( &[ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 3 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 3.0 |", + "+--------------------------------+-----+", ], (*persisting_data2).clone(), ) @@ -735,11 +735,11 @@ mod tests { assert_eq!(p.get_query_data().unwrap().record_batches().len(), 3); assert_deduped( &[ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 4 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 4.0 |", + "+--------------------------------+-----+", ], p.get_query_data().unwrap(), ) @@ -755,11 +755,11 @@ mod tests { assert_eq!(p.get_query_data().unwrap().record_batches().len(), 2); assert_deduped( &[ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 4 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 4.0 |", + "+--------------------------------+-----+", ], p.get_query_data().unwrap(), ) @@ -774,11 +774,11 @@ mod tests { assert_eq!(p.get_query_data().unwrap().record_batches().len(), 1); assert_deduped( &[ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 4 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 4.0 |", + "+--------------------------------+-----+", ], p.get_query_data().unwrap(), ) @@ -828,12 +828,12 @@ mod tests { let data = p.get_query_data().unwrap(); assert_batches_eq!( [ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 1 |", - "| 1970-01-01T00:00:00.000000042Z | 2 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 1.0 |", + "| 1970-01-01T00:00:00.000000042Z | 2.0 |", + "+--------------------------------+-----+", ], &*data .record_batches() @@ -856,13 +856,13 @@ mod tests { let data = p.get_query_data().unwrap(); assert_batches_eq!( [ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 1 |", - "| 1970-01-01T00:00:00.000000042Z | 2 |", - "| 1970-01-01T00:00:00.000000042Z | 3 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 1.0 |", + "| 1970-01-01T00:00:00.000000042Z | 2.0 |", + "| 1970-01-01T00:00:00.000000042Z | 3.0 |", + "+--------------------------------+-----+", ], &*data .record_batches() @@ -885,14 +885,14 @@ mod tests { let data = p.get_query_data().unwrap(); assert_batches_eq!( [ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 1 |", - "| 1970-01-01T00:00:00.000000042Z | 2 |", - "| 1970-01-01T00:00:00.000000042Z | 3 |", - "| 1970-01-01T00:00:00.000000042Z | 4 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 1.0 |", + "| 1970-01-01T00:00:00.000000042Z | 2.0 |", + "| 1970-01-01T00:00:00.000000042Z | 3.0 |", + "| 1970-01-01T00:00:00.000000042Z | 4.0 |", + "+--------------------------------+-----+", ], &*data .record_batches() @@ -911,13 +911,13 @@ mod tests { let data = p.get_query_data().unwrap(); assert_batches_eq!( [ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 1 |", - "| 1970-01-01T00:00:00.000000042Z | 3 |", - "| 1970-01-01T00:00:00.000000042Z | 4 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 1.0 |", + "| 1970-01-01T00:00:00.000000042Z | 3.0 |", + "| 1970-01-01T00:00:00.000000042Z | 4.0 |", + "+--------------------------------+-----+", ], &*data .record_batches() @@ -935,12 +935,12 @@ mod tests { let data = p.get_query_data().unwrap(); assert_batches_eq!( [ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 1 |", - "| 1970-01-01T00:00:00.000000042Z | 4 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 1.0 |", + "| 1970-01-01T00:00:00.000000042Z | 4.0 |", + "+--------------------------------+-----+", ], &*data .record_batches() @@ -959,11 +959,11 @@ mod tests { let data = p.get_query_data().unwrap(); assert_batches_eq!( [ - "+--------------------------------+---+", - "| time | x |", - "+--------------------------------+---+", - "| 1970-01-01T00:00:00.000000042Z | 4 |", - "+--------------------------------+---+", + "+--------------------------------+-----+", + "| time | x |", + "+--------------------------------+-----+", + "| 1970-01-01T00:00:00.000000042Z | 4.0 |", + "+--------------------------------+-----+", ], &*data .record_batches() @@ -1098,8 +1098,8 @@ mod tests { "+--------+--------+----------+--------------------------------+", "| city | people | pigeons | time |", "+--------+--------+----------+--------------------------------+", - "| London | 2 | millions | 1970-01-01T00:00:00.000000010Z |", - "| Madrid | 2 | none | 1970-01-01T00:00:00.000000011Z |", + "| London | 2.0 | millions | 1970-01-01T00:00:00.000000010Z |", + "| Madrid | 2.0 | none | 1970-01-01T00:00:00.000000011Z |", "+--------+--------+----------+--------------------------------+", ], &*data diff --git a/ingester2/src/buffer_tree/partition/buffer/state_machine.rs b/ingester2/src/buffer_tree/partition/buffer/state_machine.rs index e233de3691..8d7d633df2 100644 --- a/ingester2/src/buffer_tree/partition/buffer/state_machine.rs +++ b/ingester2/src/buffer_tree/partition/buffer/state_machine.rs @@ -172,7 +172,7 @@ mod tests { "+-------+----------+----------+--------------------------------+", "| great | how_much | tag | time |", "+-------+----------+----------+--------------------------------+", - "| true | 42 | platanos | 1991-03-10T00:00:42.000000042Z |", + "| true | 42.0 | platanos | 1991-03-10T00:00:42.000000042Z |", "+-------+----------+----------+--------------------------------+", ]; assert_batches_eq!(&expected, &[w1_data[0].deref().clone()]); @@ -200,8 +200,8 @@ mod tests { "+-------+----------+----------+--------------------------------+", "| great | how_much | tag | time |", "+-------+----------+----------+--------------------------------+", - "| true | 42 | platanos | 1991-03-10T00:00:42.000000042Z |", - "| true | 1000 | platanos | 1991-03-10T00:00:42.000000043Z |", + "| true | 42.0 | platanos | 1991-03-10T00:00:42.000000042Z |", + "| true | 1000.0 | platanos | 1991-03-10T00:00:42.000000043Z |", "+-------+----------+----------+--------------------------------+", ]; assert_eq!(w2_data.len(), 1); diff --git a/ingester2/src/buffer_tree/root.rs b/ingester2/src/buffer_tree/root.rs index 9373add813..670b8ad046 100644 --- a/ingester2/src/buffer_tree/root.rs +++ b/ingester2/src/buffer_tree/root.rs @@ -415,7 +415,7 @@ mod tests { "+----------+------+-------------------------------+", "| region | temp | time |", "+----------+------+-------------------------------+", - "| Asturias | 35 | 1970-01-01T00:00:04.242424242 |", + "| Asturias | 35.0 | 1970-01-01T00:00:04.242424242 |", "+----------+------+-------------------------------+", ] ); @@ -476,8 +476,8 @@ mod tests { "+----------+------+-------------------------------+", "| region | temp | time |", "+----------+------+-------------------------------+", - "| Madrid | 35 | 1970-01-01T00:00:04.242424242 |", - "| Asturias | 25 | 1970-01-01T00:00:04.242424242 |", + "| Madrid | 35.0 | 1970-01-01T00:00:04.242424242 |", + "| Asturias | 25.0 | 1970-01-01T00:00:04.242424242 |", "+----------+------+-------------------------------+", ] ); @@ -538,7 +538,7 @@ mod tests { "+--------+------+-------------------------------+", "| region | temp | time |", "+--------+------+-------------------------------+", - "| Madrid | 25 | 1970-01-01T00:00:04.242424242 |", + "| Madrid | 25.0 | 1970-01-01T00:00:04.242424242 |", "+--------+------+-------------------------------+", ] ); @@ -599,7 +599,7 @@ mod tests { "+--------+------+-------------------------------+", "| region | temp | time |", "+--------+------+-------------------------------+", - "| Madrid | 25 | 1970-01-01T00:00:04.242424242 |", + "| Madrid | 25.0 | 1970-01-01T00:00:04.242424242 |", "+--------+------+-------------------------------+", ] ); @@ -646,8 +646,8 @@ mod tests { "+----------+------+-------------------------------+", "| region | temp | time |", "+----------+------+-------------------------------+", - "| Asturias | 35 | 1970-01-01T00:00:04.242424242 |", - "| Asturias | 12 | 1970-01-01T00:00:04.242424242 |", + "| Asturias | 35.0 | 1970-01-01T00:00:04.242424242 |", + "| Asturias | 12.0 | 1970-01-01T00:00:04.242424242 |", "+----------+------+-------------------------------+", ] ); @@ -1064,8 +1064,8 @@ mod tests { "+--------+------+-------------------------------+", "| region | temp | time |", "+--------+------+-------------------------------+", - "| Madrid | 35 | 1970-01-01T00:00:04.242424242 |", - "| Murcia | 30 | 1970-01-01T00:00:04.242424242 |", + "| Madrid | 35.0 | 1970-01-01T00:00:04.242424242 |", + "| Murcia | 30.0 | 1970-01-01T00:00:04.242424242 |", "+--------+------+-------------------------------+", ], &batches diff --git a/ingester2/src/persist/compact.rs b/ingester2/src/persist/compact.rs index 84fabfcd55..741d164b3d 100644 --- a/ingester2/src/persist/compact.rs +++ b/ingester2/src/persist/compact.rs @@ -152,7 +152,7 @@ mod tests { "+-----+--------------------------------+", "| bar | time |", "+-----+--------------------------------+", - "| 2 | 1970-01-01T00:00:00.000000020Z |", + "| 2.0 | 1970-01-01T00:00:00.000000020Z |", "+-----+--------------------------------+", ]; assert_batches_eq!(&expected_data, &output_batches); diff --git a/ingester2/tests/write.rs b/ingester2/tests/write.rs index 1c02b465d2..51fa25ce2b 100644 --- a/ingester2/tests/write.rs +++ b/ingester2/tests/write.rs @@ -60,7 +60,7 @@ async fn write_query() { "| count | greatness | time |", "+-------+-----------+--------------------------------+", "| | unbounded | 1970-01-01T00:00:00.000000010Z |", - "| 42 | | 1970-01-01T00:00:00.000000200Z |", + "| 42.0 | | 1970-01-01T00:00:00.000000200Z |", "+-------+-----------+--------------------------------+", ]; assert_batches_sorted_eq!(&expected, &data); @@ -139,7 +139,7 @@ async fn wal_replay() { "| count | greatness | time |", "+-------+-----------+--------------------------------+", "| | unbounded | 1970-01-01T00:00:00.000000010Z |", - "| 42 | | 1970-01-01T00:00:00.000000200Z |", + "| 42.0 | | 1970-01-01T00:00:00.000000200Z |", "+-------+-----------+--------------------------------+", ]; assert_batches_sorted_eq!(&expected, &data); diff --git a/iox_query/src/exec/seriesset/converter.rs b/iox_query/src/exec/seriesset/converter.rs index e8f35b3557..f20a71e144 100644 --- a/iox_query/src/exec/seriesset/converter.rs +++ b/iox_query/src/exec/seriesset/converter.rs @@ -904,7 +904,7 @@ mod tests { "+-------+-------+-------------+-----------+------+", "| tag_a | tag_b | float_field | int_field | time |", "+-------+-------+-------------+-----------+------+", - "| one | ten | 10 | 1 | 1000 |", + "| one | ten | 10.0 | 1 | 1000 |", "| one | ten | 10.1 | 2 | 2000 |", "+-------+-------+-------------+-----------+------+", ], @@ -939,7 +939,7 @@ mod tests { "+-------+-------+-------------+-----------+------+", "| tag_a | tag_b | float_field | int_field | time |", "+-------+-------+-------------+-----------+------+", - "| one | ten | 10 | | 1000 |", + "| one | ten | 10.0 | | 1000 |", "| one | ten | 10.1 | | 2000 |", "+-------+-------+-------------+-----------+------+", ], @@ -973,7 +973,7 @@ mod tests { "+-------+-------+-------------+-----------+------+", "| tag_a | tag_b | float_field | int_field | time |", "+-------+-------+-------------+-----------+------+", - "| one | ten | 10 | 1 | 1000 |", + "| one | ten | 10.0 | 1 | 1000 |", "| one | ten | 10.1 | 2 | 2000 |", "+-------+-------+-------------+-----------+------+", ], @@ -1014,7 +1014,7 @@ mod tests { "+-------+-------+-------------+-----------+------+", "| tag_a | tag_b | float_field | int_field | time |", "+-------+-------+-------------+-----------+------+", - "| one | ten | 10 | 1 | 1000 |", + "| one | ten | 10.0 | 1 | 1000 |", "| one | ten | 10.1 | 2 | 2000 |", "| one | ten | 10.2 | 3 | 3000 |", "+-------+-------+-------------+-----------+------+", @@ -1057,7 +1057,7 @@ mod tests { "+-------+--------+-------------+-----------+------+", "| tag_a | tag_b | float_field | int_field | time |", "+-------+--------+-------------+-----------+------+", - "| one | ten | 10 | 1 | 1000 |", + "| one | ten | 10.0 | 1 | 1000 |", "| one | ten | 10.1 | 2 | 2000 |", "| one | eleven | 10.1 | 3 | 3000 |", "+-------+--------+-------------+-----------+------+", @@ -1115,7 +1115,7 @@ mod tests { "+-------+-------+-------------+-----------+------+", "| tag_a | tag_b | float_field | int_field | time |", "+-------+-------+-------------+-----------+------+", - "| one | ten | 10 | 1 | 1000 |", + "| one | ten | 10.0 | 1 | 1000 |", "| one | ten | 10.1 | 2 | 2000 |", "+-------+-------+-------------+-----------+------+", ], @@ -1186,7 +1186,7 @@ mod tests { "+-------+-------+-------------+-----------+-----------------------------+", "| tag_a | tag_b | float_field | int_field | time |", "+-------+-------+-------------+-----------+-----------------------------+", - "| one | ten | 10 | 1 | 1970-01-01T00:00:00.000001Z |", + "| one | ten | 10.0 | 1 | 1970-01-01T00:00:00.000001Z |", "| one | ten | 10.1 | 2 | 1970-01-01T00:00:00.000002Z |", "+-------+-------+-------------+-----------+-----------------------------+", ], diff --git a/iox_query/src/provider/deduplicate.rs b/iox_query/src/provider/deduplicate.rs index 01bf306e25..a163e0153f 100644 --- a/iox_query/src/provider/deduplicate.rs +++ b/iox_query/src/provider/deduplicate.rs @@ -393,13 +393,13 @@ mod test { let results = dedupe(vec![batch], sort_keys).await; let expected = vec![ - "+----+----+----+", - "| t1 | f1 | f2 |", - "+----+----+----+", - "| a | 2 | 4 |", - "| b | 5 | 6 |", - "| c | 7 | 8 |", - "+----+----+----+", + "+----+-----+-----+", + "| t1 | f1 | f2 |", + "+----+-----+-----+", + "| a | 2.0 | 4.0 |", + "| b | 5.0 | 6.0 |", + "| c | 7.0 | 8.0 |", + "+----+-----+-----+", ]; assert_batches_eq!(&expected, &results.output); } @@ -440,11 +440,11 @@ mod test { let results = dedupe(vec![batch], sort_keys).await; let expected = vec![ - "+----+----+--------------------------------+", - "| f1 | f2 | time |", - "+----+----+--------------------------------+", - "| 1 | 3 | 1970-01-01T00:00:00.000000100Z |", - "+----+----+--------------------------------+", + "+-----+-----+--------------------------------+", + "| f1 | f2 | time |", + "+-----+-----+--------------------------------+", + "| 1.0 | 3.0 | 1970-01-01T00:00:00.000000100Z |", + "+-----+-----+--------------------------------+", ]; assert_batches_eq!(&expected, &results.output); } @@ -567,18 +567,18 @@ mod test { let results = dedupe(vec![batch], sort_keys).await; let expected = vec![ - "+----+----+----+----+", - "| t1 | t2 | f1 | f2 |", - "+----+----+----+----+", - "| a | b | 2 | 4 |", - "| a | z | 5 | |", - "| b | b | 6 | |", - "| b | c | 7 | 6 |", - "| c | c | 8 | |", - "| d | b | | 9 |", - "| e | | 12 | 11 |", - "| | f | 13 | 14 |", - "+----+----+----+----+", + "+----+----+------+------+", + "| t1 | t2 | f1 | f2 |", + "+----+----+------+------+", + "| a | b | 2.0 | 4.0 |", + "| a | z | 5.0 | |", + "| b | b | 6.0 | |", + "| b | c | 7.0 | 6.0 |", + "| c | c | 8.0 | |", + "| d | b | | 9.0 |", + "| e | | 12.0 | 11.0 |", + "| | f | 13.0 | 14.0 |", + "+----+----+------+------+", ]; assert_batches_eq!(&expected, &results.output); } @@ -754,13 +754,13 @@ mod test { let results = dedupe(vec![batch1, batch2], sort_keys).await; let expected = vec![ - "+----+----+----+----+", - "| t1 | t2 | f1 | f2 |", - "+----+----+----+----+", - "| a | b | 1 | 2 |", - "| a | c | 4 | 6 |", - "| b | d | 7 | 8 |", - "+----+----+----+----+", + "+----+----+-----+-----+", + "| t1 | t2 | f1 | f2 |", + "+----+----+-----+-----+", + "| a | b | 1.0 | 2.0 |", + "| a | c | 4.0 | 6.0 |", + "| b | d | 7.0 | 8.0 |", + "+----+----+-----+-----+", ]; assert_batches_eq!(&expected, &results.output); // 5 rows in initial input, 3 rows in output ==> 2 dupes @@ -813,12 +813,12 @@ mod test { let results = dedupe(vec![batch1, batch2], sort_keys).await; let expected = vec![ - "+----+----+", - "| t1 | f1 |", - "+----+----+", - "| a | 1 |", - "| b | 2 |", - "+----+----+", + "+----+-----+", + "| t1 | f1 |", + "+----+-----+", + "| a | 1.0 |", + "| b | 2.0 |", + "+----+-----+", ]; assert_batches_eq!(&expected, &results.output); @@ -864,11 +864,11 @@ mod test { let results = dedupe(vec![batch], sort_keys).await; let expected = vec![ - "+----+----+----+", - "| t1 | f1 | f2 |", - "+----+----+----+", - "| a | 3 | 4 |", - "+----+----+----+", + "+----+-----+-----+", + "| t1 | f1 | f2 |", + "+----+-----+-----+", + "| a | 3.0 | 4.0 |", + "+----+-----+-----+", ]; assert_batches_eq!(&expected, &results.output); } @@ -924,13 +924,13 @@ mod test { let results = dedupe(vec![batch], sort_keys).await; let expected = vec![ - "+----+----+----+", - "| f1 | t2 | t1 |", - "+----+----+----+", - "| 2 | a | a |", - "| 3 | a | b |", - "| 4 | b | b |", - "+----+----+----+", + "+-----+----+----+", + "| f1 | t2 | t1 |", + "+-----+----+----+", + "| 2.0 | a | a |", + "| 3.0 | a | b |", + "| 4.0 | b | b |", + "+-----+----+----+", ]; assert_batches_eq!(&expected, &results.output); } @@ -1049,14 +1049,14 @@ mod test { assert_eq!(cols[2].values().len(), 1); // "c" let expected = vec![ - "+----+----+----+----+", - "| t1 | t2 | f1 | f2 |", - "+----+----+----+----+", - "| a | b | 1 | 2 |", - "| a | c | 3 | |", - "| b | c | 4 | 6 |", - "| c | d | 7 | 8 |", - "+----+----+----+----+", + "+----+----+-----+-----+", + "| t1 | t2 | f1 | f2 |", + "+----+----+-----+-----+", + "| a | b | 1.0 | 2.0 |", + "| a | c | 3.0 | |", + "| b | c | 4.0 | 6.0 |", + "| c | d | 7.0 | 8.0 |", + "+----+----+-----+-----+", ]; assert_batches_eq!(&expected, &results.output); // 5 rows in initial input, 4 rows in output ==> 1 dupes diff --git a/iox_query/src/provider/deduplicate/algo.rs b/iox_query/src/provider/deduplicate/algo.rs index 56b28b2789..073ccf8535 100644 --- a/iox_query/src/provider/deduplicate/algo.rs +++ b/iox_query/src/provider/deduplicate/algo.rs @@ -480,11 +480,11 @@ mod test { .unwrap(); let expected = vec![ - "+----+----+----+----+", - "| t1 | t2 | f1 | f2 |", - "+----+----+----+----+", - "| a | c | 4 | 2 |", - "+----+----+----+----+", + "+----+----+-----+-----+", + "| t1 | t2 | f1 | f2 |", + "+----+----+-----+-----+", + "| a | c | 4.0 | 2.0 |", + "+----+----+-----+-----+", ]; assert_batches_eq!(&expected, &[results]); } @@ -563,11 +563,11 @@ mod test { .unwrap(); let expected = vec![ - "+----+----+----+----+", - "| t1 | t2 | f1 | f2 |", - "+----+----+----+----+", - "| a | c | 4 | 5 |", - "+----+----+----+----+", + "+----+----+-----+-----+", + "| t1 | t2 | f1 | f2 |", + "+----+----+-----+-----+", + "| a | c | 4.0 | 5.0 |", + "+----+----+-----+-----+", ]; assert_batches_eq!(&expected, &[results]); } diff --git a/mutable_batch/tests/extend_range.rs b/mutable_batch/tests/extend_range.rs index 020c8a8c82..4deffa9944 100644 --- a/mutable_batch/tests/extend_range.rs +++ b/mutable_batch/tests/extend_range.rs @@ -58,15 +58,15 @@ fn test_extend_range() { assert_batches_eq!( &[ - "+-----+------+--------------------------------+", - "| f64 | tag1 | time |", - "+-----+------+--------------------------------+", - "| 23 | v2 | 1970-01-01T00:00:00Z |", - "| 23 | | 1970-01-01T00:00:00.000000001Z |", - "| | v2 | 1970-01-01T00:00:00.000000002Z |", - "| 5 | | 1970-01-01T00:00:00.000000003Z |", - "| | v2 | 1970-01-01T00:00:00.000000004Z |", - "+-----+------+--------------------------------+", + "+------+------+--------------------------------+", + "| f64 | tag1 | time |", + "+------+------+--------------------------------+", + "| 23.0 | v2 | 1970-01-01T00:00:00Z |", + "| 23.0 | | 1970-01-01T00:00:00.000000001Z |", + "| | v2 | 1970-01-01T00:00:00.000000002Z |", + "| 5.0 | | 1970-01-01T00:00:00.000000003Z |", + "| | v2 | 1970-01-01T00:00:00.000000004Z |", + "+------+------+--------------------------------+", ], &[a.to_arrow(Projection::All).unwrap()] ); @@ -93,18 +93,18 @@ fn test_extend_range() { assert_batches_eq!( &[ - "+-------+-----+------+------+--------------------------------+", - "| bool | f64 | tag1 | tag3 | time |", - "+-------+-----+------+------+--------------------------------+", - "| | 23 | v2 | | 1970-01-01T00:00:00Z |", - "| | 23 | | | 1970-01-01T00:00:00.000000001Z |", - "| | | v2 | | 1970-01-01T00:00:00.000000002Z |", - "| | 5 | | | 1970-01-01T00:00:00.000000003Z |", - "| | | v2 | | 1970-01-01T00:00:00.000000004Z |", - "| false | | v1 | v1 | 1970-01-01T00:00:00.000000006Z |", - "| | | | v3 | 1970-01-01T00:00:00.000000007Z |", - "| | | | v1 | 1970-01-01T00:00:00.000000008Z |", - "+-------+-----+------+------+--------------------------------+", + "+-------+------+------+------+--------------------------------+", + "| bool | f64 | tag1 | tag3 | time |", + "+-------+------+------+------+--------------------------------+", + "| | 23.0 | v2 | | 1970-01-01T00:00:00Z |", + "| | 23.0 | | | 1970-01-01T00:00:00.000000001Z |", + "| | | v2 | | 1970-01-01T00:00:00.000000002Z |", + "| | 5.0 | | | 1970-01-01T00:00:00.000000003Z |", + "| | | v2 | | 1970-01-01T00:00:00.000000004Z |", + "| false | | v1 | v1 | 1970-01-01T00:00:00.000000006Z |", + "| | | | v3 | 1970-01-01T00:00:00.000000007Z |", + "| | | | v1 | 1970-01-01T00:00:00.000000008Z |", + "+-------+------+------+------+--------------------------------+", ], &[a.to_arrow(Projection::All).unwrap()] ); diff --git a/mutable_batch/tests/writer.rs b/mutable_batch/tests/writer.rs index 8c4ccfdb27..8f4533441e 100644 --- a/mutable_batch/tests/writer.rs +++ b/mutable_batch/tests/writer.rs @@ -90,10 +90,10 @@ fn test_basic() { "| b1 | b2 | f64 | i64 | i64_2 | tag1 | tag2 | tag3 | time | u64 |", "+-------+-------+-------+-----+-------+------+------+------+--------------------------------+-----+", "| true | true | 343.3 | 234 | -8 | v1 | v1 | v2 | 1970-01-01T00:00:00.000000007Z | 23 |", - "| true | | 443 | 6 | | v1 | v2 | v1 | 1970-01-01T00:00:00.000000005Z | |", + "| true | | 443.0 | 6 | | v1 | v2 | v1 | 1970-01-01T00:00:00.000000005Z | |", "| false | false | | 2 | | v2 | | | 1970-01-01T00:00:00.000000007Z | |", - "| false | false | 477 | 6 | | v2 | v2 | v1 | 1970-01-01T00:00:00.000000003Z | 5 |", - "| false | true | -24 | -3 | | v1 | | v2 | 1970-01-01T00:00:00.000000005Z | |", + "| false | false | 477.0 | 6 | | v2 | v2 | v1 | 1970-01-01T00:00:00.000000003Z | 5 |", + "| false | true | -24.0 | -3 | | v1 | | v2 | 1970-01-01T00:00:00.000000005Z | |", "+-------+-------+-------+-----+-------+------+------+------+--------------------------------+-----+", ]; @@ -241,32 +241,32 @@ fn test_basic() { let stats: Vec<_> = get_stats(&batch); let expected_data = &[ - "+-------+-------+-------+-----+-------+------+------+------+--------------------------------+-----+", - "| b1 | b2 | f64 | i64 | i64_2 | tag1 | tag2 | tag3 | time | u64 |", - "+-------+-------+-------+-----+-------+------+------+------+--------------------------------+-----+", - "| true | true | 343.3 | 234 | -8 | v1 | v1 | v2 | 1970-01-01T00:00:00.000000007Z | 23 |", - "| true | | 443 | 6 | | v1 | v2 | v1 | 1970-01-01T00:00:00.000000005Z | |", - "| false | false | | 2 | | v2 | | | 1970-01-01T00:00:00.000000007Z | |", - "| false | false | 477 | 6 | | v2 | v2 | v1 | 1970-01-01T00:00:00.000000003Z | 5 |", - "| false | true | -24 | -3 | | v1 | | v2 | 1970-01-01T00:00:00.000000005Z | |", - "| | | | | | | v4 | v2 | 1970-01-01T00:00:00Z | |", - "| | | 4 | | | | v1 | v2 | 1970-01-01T00:00:00.000000001Z | |", - "| | | | | | | v1 | v2 | 1970-01-01T00:00:00.000000002Z | |", - "| | | | | | | v4 | v2 | 1970-01-01T00:00:00.000000003Z | |", - "| | | | | | | v1 | v2 | 1970-01-01T00:00:00.000000004Z | |", - "| | | | | | | | v2 | 1970-01-01T00:00:00.000000005Z | |", - "| | | 945 | | | | v1 | v2 | 1970-01-01T00:00:00.000000006Z | |", - "| | | | | | | v1 | v2 | 1970-01-01T00:00:00.000000007Z | |", - "| | | | | | | v4 | v2 | 1970-01-01T00:00:00.000000008Z | |", - "| | | | | | | | v2 | 1970-01-01T00:00:00.000000009Z | |", - "| | | -222 | | | | v4 | v2 | 1970-01-01T00:00:00.000000010Z | |", - "| | | | | | | v4 | v2 | 1970-01-01T00:00:00.000000011Z | |", - "| | | | | | | v4 | v2 | 1970-01-01T00:00:00.000000012Z | |", - "| | | 4 | | | | | v2 | 1970-01-01T00:00:00.000000013Z | |", - "| | | | | | | v1 | v2 | 1970-01-01T00:00:00.000000014Z | |", - "| | | | | | | v1 | v2 | 1970-01-01T00:00:00.000000015Z | |", - "| | | 7 | | | | | v2 | 1970-01-01T00:00:00.000000016Z | |", - "+-------+-------+-------+-----+-------+------+------+------+--------------------------------+-----+", + "+-------+-------+--------+-----+-------+------+------+------+--------------------------------+-----+", + "| b1 | b2 | f64 | i64 | i64_2 | tag1 | tag2 | tag3 | time | u64 |", + "+-------+-------+--------+-----+-------+------+------+------+--------------------------------+-----+", + "| true | true | 343.3 | 234 | -8 | v1 | v1 | v2 | 1970-01-01T00:00:00.000000007Z | 23 |", + "| true | | 443.0 | 6 | | v1 | v2 | v1 | 1970-01-01T00:00:00.000000005Z | |", + "| false | false | | 2 | | v2 | | | 1970-01-01T00:00:00.000000007Z | |", + "| false | false | 477.0 | 6 | | v2 | v2 | v1 | 1970-01-01T00:00:00.000000003Z | 5 |", + "| false | true | -24.0 | -3 | | v1 | | v2 | 1970-01-01T00:00:00.000000005Z | |", + "| | | | | | | v4 | v2 | 1970-01-01T00:00:00Z | |", + "| | | 4.0 | | | | v1 | v2 | 1970-01-01T00:00:00.000000001Z | |", + "| | | | | | | v1 | v2 | 1970-01-01T00:00:00.000000002Z | |", + "| | | | | | | v4 | v2 | 1970-01-01T00:00:00.000000003Z | |", + "| | | | | | | v1 | v2 | 1970-01-01T00:00:00.000000004Z | |", + "| | | | | | | | v2 | 1970-01-01T00:00:00.000000005Z | |", + "| | | 945.0 | | | | v1 | v2 | 1970-01-01T00:00:00.000000006Z | |", + "| | | | | | | v1 | v2 | 1970-01-01T00:00:00.000000007Z | |", + "| | | | | | | v4 | v2 | 1970-01-01T00:00:00.000000008Z | |", + "| | | | | | | | v2 | 1970-01-01T00:00:00.000000009Z | |", + "| | | -222.0 | | | | v4 | v2 | 1970-01-01T00:00:00.000000010Z | |", + "| | | | | | | v4 | v2 | 1970-01-01T00:00:00.000000011Z | |", + "| | | | | | | v4 | v2 | 1970-01-01T00:00:00.000000012Z | |", + "| | | 4.0 | | | | | v2 | 1970-01-01T00:00:00.000000013Z | |", + "| | | | | | | v1 | v2 | 1970-01-01T00:00:00.000000014Z | |", + "| | | | | | | v1 | v2 | 1970-01-01T00:00:00.000000015Z | |", + "| | | 7.0 | | | | | v2 | 1970-01-01T00:00:00.000000016Z | |", + "+-------+-------+--------+-----+-------+------+------+------+--------------------------------+-----+", ]; let expected_stats = vec![ diff --git a/mutable_batch_lp/src/lib.rs b/mutable_batch_lp/src/lib.rs index eedf365c11..f7e3df3d24 100644 --- a/mutable_batch_lp/src/lib.rs +++ b/mutable_batch_lp/src/lib.rs @@ -350,7 +350,7 @@ mod tests { "| | v1 | v2 | 1970-01-01T00:00:00Z | 2 |", "| | v4 | v1 | 1970-01-01T00:00:00Z | 2 |", "| | | v2 | 1970-01-01T00:00:00.000000001Z | 3 |", - "| 2 | v1 | v2 | 1970-01-01T00:00:00.000000005Z | |", + "| 2.0 | v1 | v2 | 1970-01-01T00:00:00.000000005Z | |", "+------+------+------+--------------------------------+-----+", ], &[batches["cpu"].to_arrow(Projection::All).unwrap()] diff --git a/mutable_batch_pb/src/decode.rs b/mutable_batch_pb/src/decode.rs index 15b7d5bae3..4c334b0ef3 100644 --- a/mutable_batch_pb/src/decode.rs +++ b/mutable_batch_pb/src/decode.rs @@ -628,10 +628,10 @@ mod tests { "| f64 | i64 | tag1 | tag2 | time | u64 |", "+-----+-----+------+------+--------------------------------+-----+", "| | 56 | v1 | | 1970-01-01T00:00:00.000000001Z | 4 |", - "| 3 | | v1 | v2 | 1970-01-01T00:00:00.000000002Z | 3 |", + "| 3.0 | | v1 | v2 | 1970-01-01T00:00:00.000000002Z | 3 |", "| | | v2 | | 1970-01-01T00:00:00.000000003Z | |", "| | | v2 | v3 | 1970-01-01T00:00:00.000000004Z | 2 |", - "| 5 | 2 | v1 | | 1970-01-01T00:00:00.000000005Z | 1 |", + "| 5.0 | 2 | v1 | | 1970-01-01T00:00:00.000000005Z | 1 |", "+-----+-----+------+------+--------------------------------+-----+", ]; diff --git a/mutable_batch_pb/tests/encode.rs b/mutable_batch_pb/tests/encode.rs index 9583cafff4..d269734182 100644 --- a/mutable_batch_pb/tests/encode.rs +++ b/mutable_batch_pb/tests/encode.rs @@ -20,9 +20,9 @@ fn test_encode_decode() { "+-------+------+-------+-----+------+--------------------------------+-----+", "| bv | fv | iv | sv | t1 | time | uv |", "+-------+------+-------+-----+------+--------------------------------+-----+", - "| true | 1 | 1 | hi | asdf | 1970-01-01T00:00:00.000000001Z | 774 |", - "| true | 32 | | | bar | 1970-01-01T00:00:00.000000002Z | 1 |", - "| | 1 | 1 | bye | bar | 1970-01-01T00:00:00.000000003Z | 1 |", + "| true | 1.0 | 1 | hi | asdf | 1970-01-01T00:00:00.000000001Z | 774 |", + "| true | 32.0 | | | bar | 1970-01-01T00:00:00.000000002Z | 1 |", + "| | 1.0 | 1 | bye | bar | 1970-01-01T00:00:00.000000003Z | 1 |", "| false | | -3405 | hi | | 1970-01-01T00:00:00.000000004Z | 566 |", "| true | 1.23 | 1 | hi | asdf | 1970-01-01T00:00:00.000000005Z | |", "+-------+------+-------+-----+------+--------------------------------+-----+", diff --git a/querier/src/namespace/query_access.rs b/querier/src/namespace/query_access.rs index 5215521ca5..3f8b340cbb 100644 --- a/querier/src/namespace/query_access.rs +++ b/querier/src/namespace/query_access.rs @@ -335,10 +335,10 @@ mod tests { "+-----+------+------+--------------------------------+", "| foo | host | load | time |", "+-----+------+------+--------------------------------+", - "| | a | 1 | 1970-01-01T00:00:00.000000011Z |", - "| | a | 3 | 1970-01-01T00:00:00.000000033Z |", - "| | a | 4 | 1970-01-01T00:00:00.000010001Z |", - "| | b | 5 | 1970-01-01T00:00:00.000000011Z |", + "| | a | 1.0 | 1970-01-01T00:00:00.000000011Z |", + "| | a | 3.0 | 1970-01-01T00:00:00.000000033Z |", + "| | a | 4.0 | 1970-01-01T00:00:00.000010001Z |", + "| | b | 5.0 | 1970-01-01T00:00:00.000000011Z |", "+-----+------+------+--------------------------------+", ], Some(span_ctx), @@ -471,9 +471,9 @@ mod tests { "+------+------+--------------------------------+", "| host | perc | time |", "+------+------+--------------------------------+", - "| c | 50 | 1970-01-01T00:00:00.000000011Z |", - "| c | 51 | 1970-01-01T00:00:00.000000012Z |", - "| d | 53 | 1970-01-01T00:00:00.000000014Z |", + "| c | 50.0 | 1970-01-01T00:00:00.000000011Z |", + "| c | 51.0 | 1970-01-01T00:00:00.000000012Z |", + "| d | 53.0 | 1970-01-01T00:00:00.000000014Z |", "+------+------+--------------------------------+", ], ) @@ -551,11 +551,11 @@ mod tests { "+-----+------+------+--------------------------------+", "| foo | host | load | time |", "+-----+------+------+--------------------------------+", - "| | a | 1 | 1970-01-01T00:00:00.000000011Z |", - "| | a | 3 | 1970-01-01T00:00:00.000000033Z |", - "| | a | 14 | 1970-01-01T00:00:00.000010001Z |", // load has most recent value 14 - "| | b | 5 | 1970-01-01T00:00:00.000000011Z |", - "| | z | 0 | 1970-01-01T00:00:00Z |", + "| | a | 1.0 | 1970-01-01T00:00:00.000000011Z |", + "| | a | 14.0 | 1970-01-01T00:00:00.000010001Z |", + "| | a | 3.0 | 1970-01-01T00:00:00.000000033Z |", + "| | b | 5.0 | 1970-01-01T00:00:00.000000011Z |", + "| | z | 0.0 | 1970-01-01T00:00:00Z |", "+-----+------+------+--------------------------------+", ], ) diff --git a/querier/src/table/mod.rs b/querier/src/table/mod.rs index 6440c10bcf..ce384bdc6f 100644 --- a/querier/src/table/mod.rs +++ b/querier/src/table/mod.rs @@ -897,7 +897,7 @@ mod tests { "+-----+------+------+--------------------------------+", "| foo | tag1 | tag2 | time |", "+-----+------+------+--------------------------------+", - "| 3 | val1 | val2 | 1970-01-01T00:00:00.000000011Z |", + "| 3.0 | val1 | val2 | 1970-01-01T00:00:00.000000011Z |", "+-----+------+------+--------------------------------+", ]; assert_batches_eq!(&expected, &batches); diff --git a/query_functions/src/selectors.rs b/query_functions/src/selectors.rs index ee160562d0..f62748192f 100644 --- a/query_functions/src/selectors.rs +++ b/query_functions/src/selectors.rs @@ -655,7 +655,7 @@ mod test { "+------------------------------------------+-----------------------------------------+", "| selector_first_value(t.f64_value,t.time) | selector_first_time(t.f64_value,t.time) |", "+------------------------------------------+-----------------------------------------+", - "| 2 | 1970-01-01T00:00:00.000001 |", + "| 2.0 | 1970-01-01T00:00:00.000001 |", "+------------------------------------------+-----------------------------------------+", ], ), @@ -732,7 +732,7 @@ mod test { "+-----------------------------------------+----------------------------------------+", "| selector_last_value(t.f64_value,t.time) | selector_last_time(t.f64_value,t.time) |", "+-----------------------------------------+----------------------------------------+", - "| 3 | 1970-01-01T00:00:00.000006 |", + "| 3.0 | 1970-01-01T00:00:00.000006 |", "+-----------------------------------------+----------------------------------------+", ], ), @@ -809,7 +809,7 @@ mod test { "+----------------------------------------+---------------------------------------+", "| selector_min_value(t.f64_value,t.time) | selector_min_time(t.f64_value,t.time) |", "+----------------------------------------+---------------------------------------+", - "| 1 | 1970-01-01T00:00:00.000004 |", + "| 1.0 | 1970-01-01T00:00:00.000004 |", "+----------------------------------------+---------------------------------------+", ], ), @@ -886,7 +886,7 @@ mod test { "+----------------------------------------+---------------------------------------+", "| selector_max_value(t.f64_value,t.time) | selector_max_time(t.f64_value,t.time) |", "+----------------------------------------+---------------------------------------+", - "| 5 | 1970-01-01T00:00:00.000005 |", + "| 5.0 | 1970-01-01T00:00:00.000005 |", "+----------------------------------------+---------------------------------------+", ], ), @@ -959,11 +959,11 @@ mod test { run_case( struct_selector_first().call(vec![col("f64_value"), col("time")]), vec![ - "+--------------------------------------------------+", - "| selector_first(t.f64_value,t.time) |", - "+--------------------------------------------------+", - "| {\"value\": 2, \"time\": 1970-01-01T00:00:00.000001} |", - "+--------------------------------------------------+", + "+------------------------------------------------+", + "| selector_first(t.f64_value,t.time) |", + "+------------------------------------------------+", + "| {value: 2.0, time: 1970-01-01T00:00:00.000001} |", + "+------------------------------------------------+", ], ) .await; @@ -974,11 +974,11 @@ mod test { run_case( struct_selector_first().call(vec![col("i64_value"), col("time")]), vec![ - "+---------------------------------------------------+", - "| selector_first(t.i64_value,t.time) |", - "+---------------------------------------------------+", - "| {\"value\": 20, \"time\": 1970-01-01T00:00:00.000001} |", - "+---------------------------------------------------+", + "+-----------------------------------------------+", + "| selector_first(t.i64_value,t.time) |", + "+-----------------------------------------------+", + "| {value: 20, time: 1970-01-01T00:00:00.000001} |", + "+-----------------------------------------------+", ], ) .await; @@ -989,11 +989,11 @@ mod test { run_case( struct_selector_first().call(vec![col("u64_value"), col("time")]), vec![ - "+---------------------------------------------------+", - "| selector_first(t.u64_value,t.time) |", - "+---------------------------------------------------+", - "| {\"value\": 20, \"time\": 1970-01-01T00:00:00.000001} |", - "+---------------------------------------------------+", + "+-----------------------------------------------+", + "| selector_first(t.u64_value,t.time) |", + "+-----------------------------------------------+", + "| {value: 20, time: 1970-01-01T00:00:00.000001} |", + "+-----------------------------------------------+", ], ) .await; @@ -1004,11 +1004,11 @@ mod test { run_case( struct_selector_first().call(vec![col("string_value"), col("time")]), vec![ - "+------------------------------------------------------+", - "| selector_first(t.string_value,t.time) |", - "+------------------------------------------------------+", - "| {\"value\": \"two\", \"time\": 1970-01-01T00:00:00.000001} |", - "+------------------------------------------------------+", + "+------------------------------------------------+", + "| selector_first(t.string_value,t.time) |", + "+------------------------------------------------+", + "| {value: two, time: 1970-01-01T00:00:00.000001} |", + "+------------------------------------------------+", ], ) .await; @@ -1019,11 +1019,11 @@ mod test { run_case( struct_selector_first().call(vec![col("bool_value"), col("time")]), vec![ - "+-----------------------------------------------------+", - "| selector_first(t.bool_value,t.time) |", - "+-----------------------------------------------------+", - "| {\"value\": true, \"time\": 1970-01-01T00:00:00.000001} |", - "+-----------------------------------------------------+", + "+-------------------------------------------------+", + "| selector_first(t.bool_value,t.time) |", + "+-------------------------------------------------+", + "| {value: true, time: 1970-01-01T00:00:00.000001} |", + "+-------------------------------------------------+", ], ) .await; @@ -1036,11 +1036,11 @@ mod test { run_case( struct_selector_last().call(vec![col("f64_value"), col("time")]), vec![ - "+--------------------------------------------------+", - "| selector_last(t.f64_value,t.time) |", - "+--------------------------------------------------+", - "| {\"value\": 3, \"time\": 1970-01-01T00:00:00.000006} |", - "+--------------------------------------------------+", + "+------------------------------------------------+", + "| selector_last(t.f64_value,t.time) |", + "+------------------------------------------------+", + "| {value: 3.0, time: 1970-01-01T00:00:00.000006} |", + "+------------------------------------------------+", ], ) .await; @@ -1051,11 +1051,11 @@ mod test { run_case( struct_selector_last().call(vec![col("i64_value"), col("time")]), vec![ - "+---------------------------------------------------+", - "| selector_last(t.i64_value,t.time) |", - "+---------------------------------------------------+", - "| {\"value\": 30, \"time\": 1970-01-01T00:00:00.000006} |", - "+---------------------------------------------------+", + "+-----------------------------------------------+", + "| selector_last(t.i64_value,t.time) |", + "+-----------------------------------------------+", + "| {value: 30, time: 1970-01-01T00:00:00.000006} |", + "+-----------------------------------------------+", ], ) .await; @@ -1066,11 +1066,11 @@ mod test { run_case( struct_selector_last().call(vec![col("u64_value"), col("time")]), vec![ - "+---------------------------------------------------+", - "| selector_last(t.u64_value,t.time) |", - "+---------------------------------------------------+", - "| {\"value\": 30, \"time\": 1970-01-01T00:00:00.000006} |", - "+---------------------------------------------------+", + "+-----------------------------------------------+", + "| selector_last(t.u64_value,t.time) |", + "+-----------------------------------------------+", + "| {value: 30, time: 1970-01-01T00:00:00.000006} |", + "+-----------------------------------------------+", ], ) .await; @@ -1081,11 +1081,11 @@ mod test { run_case( struct_selector_last().call(vec![col("string_value"), col("time")]), vec![ - "+--------------------------------------------------------+", - "| selector_last(t.string_value,t.time) |", - "+--------------------------------------------------------+", - "| {\"value\": \"three\", \"time\": 1970-01-01T00:00:00.000006} |", - "+--------------------------------------------------------+", + "+--------------------------------------------------+", + "| selector_last(t.string_value,t.time) |", + "+--------------------------------------------------+", + "| {value: three, time: 1970-01-01T00:00:00.000006} |", + "+--------------------------------------------------+", ], ) .await; @@ -1096,11 +1096,11 @@ mod test { run_case( struct_selector_last().call(vec![col("bool_value"), col("time")]), vec![ - "+------------------------------------------------------+", - "| selector_last(t.bool_value,t.time) |", - "+------------------------------------------------------+", - "| {\"value\": false, \"time\": 1970-01-01T00:00:00.000006} |", - "+------------------------------------------------------+", + "+--------------------------------------------------+", + "| selector_last(t.bool_value,t.time) |", + "+--------------------------------------------------+", + "| {value: false, time: 1970-01-01T00:00:00.000006} |", + "+--------------------------------------------------+", ], ) .await; @@ -1113,11 +1113,11 @@ mod test { run_case( struct_selector_min().call(vec![col("f64_value"), col("time")]), vec![ - "+--------------------------------------------------+", - "| selector_min(t.f64_value,t.time) |", - "+--------------------------------------------------+", - "| {\"value\": 1, \"time\": 1970-01-01T00:00:00.000004} |", - "+--------------------------------------------------+", + "+------------------------------------------------+", + "| selector_min(t.f64_value,t.time) |", + "+------------------------------------------------+", + "| {value: 1.0, time: 1970-01-01T00:00:00.000004} |", + "+------------------------------------------------+", ], ) .await; @@ -1128,11 +1128,11 @@ mod test { run_case( struct_selector_min().call(vec![col("i64_value"), col("time")]), vec![ - "+---------------------------------------------------+", - "| selector_min(t.i64_value,t.time) |", - "+---------------------------------------------------+", - "| {\"value\": 10, \"time\": 1970-01-01T00:00:00.000004} |", - "+---------------------------------------------------+", + "+-----------------------------------------------+", + "| selector_min(t.i64_value,t.time) |", + "+-----------------------------------------------+", + "| {value: 10, time: 1970-01-01T00:00:00.000004} |", + "+-----------------------------------------------+", ], ) .await; @@ -1143,11 +1143,11 @@ mod test { run_case( struct_selector_min().call(vec![col("u64_value"), col("time")]), vec![ - "+---------------------------------------------------+", - "| selector_min(t.u64_value,t.time) |", - "+---------------------------------------------------+", - "| {\"value\": 10, \"time\": 1970-01-01T00:00:00.000004} |", - "+---------------------------------------------------+", + "+-----------------------------------------------+", + "| selector_min(t.u64_value,t.time) |", + "+-----------------------------------------------+", + "| {value: 10, time: 1970-01-01T00:00:00.000004} |", + "+-----------------------------------------------+", ], ) .await; @@ -1158,11 +1158,11 @@ mod test { run_case( struct_selector_min().call(vec![col("string_value"), col("time")]), vec![ - "+--------------------------------------------------------+", - "| selector_min(t.string_value,t.time) |", - "+--------------------------------------------------------+", - "| {\"value\": \"a_one\", \"time\": 1970-01-01T00:00:00.000004} |", - "+--------------------------------------------------------+", + "+--------------------------------------------------+", + "| selector_min(t.string_value,t.time) |", + "+--------------------------------------------------+", + "| {value: a_one, time: 1970-01-01T00:00:00.000004} |", + "+--------------------------------------------------+", ], ) .await; @@ -1173,11 +1173,11 @@ mod test { run_case( struct_selector_min().call(vec![col("bool_value"), col("time")]), vec![ - "+------------------------------------------------------+", - "| selector_min(t.bool_value,t.time) |", - "+------------------------------------------------------+", - "| {\"value\": false, \"time\": 1970-01-01T00:00:00.000002} |", - "+------------------------------------------------------+", + "+--------------------------------------------------+", + "| selector_min(t.bool_value,t.time) |", + "+--------------------------------------------------+", + "| {value: false, time: 1970-01-01T00:00:00.000002} |", + "+--------------------------------------------------+", ], ) .await; @@ -1190,11 +1190,11 @@ mod test { run_case( struct_selector_max().call(vec![col("f64_value"), col("time")]), vec![ - "+--------------------------------------------------+", - "| selector_max(t.f64_value,t.time) |", - "+--------------------------------------------------+", - "| {\"value\": 5, \"time\": 1970-01-01T00:00:00.000005} |", - "+--------------------------------------------------+", + "+------------------------------------------------+", + "| selector_max(t.f64_value,t.time) |", + "+------------------------------------------------+", + "| {value: 5.0, time: 1970-01-01T00:00:00.000005} |", + "+------------------------------------------------+", ], ) .await; @@ -1205,11 +1205,11 @@ mod test { run_case( struct_selector_max().call(vec![col("i64_value"), col("time")]), vec![ - "+---------------------------------------------------+", - "| selector_max(t.i64_value,t.time) |", - "+---------------------------------------------------+", - "| {\"value\": 50, \"time\": 1970-01-01T00:00:00.000005} |", - "+---------------------------------------------------+", + "+-----------------------------------------------+", + "| selector_max(t.i64_value,t.time) |", + "+-----------------------------------------------+", + "| {value: 50, time: 1970-01-01T00:00:00.000005} |", + "+-----------------------------------------------+", ], ) .await; @@ -1220,11 +1220,11 @@ mod test { run_case( struct_selector_max().call(vec![col("u64_value"), col("time")]), vec![ - "+---------------------------------------------------+", - "| selector_max(t.u64_value,t.time) |", - "+---------------------------------------------------+", - "| {\"value\": 50, \"time\": 1970-01-01T00:00:00.000005} |", - "+---------------------------------------------------+", + "+-----------------------------------------------+", + "| selector_max(t.u64_value,t.time) |", + "+-----------------------------------------------+", + "| {value: 50, time: 1970-01-01T00:00:00.000005} |", + "+-----------------------------------------------+", ], ) .await; @@ -1235,11 +1235,11 @@ mod test { run_case( struct_selector_max().call(vec![col("string_value"), col("time")]), vec![ - "+---------------------------------------------------------+", - "| selector_max(t.string_value,t.time) |", - "+---------------------------------------------------------+", - "| {\"value\": \"z_five\", \"time\": 1970-01-01T00:00:00.000005} |", - "+---------------------------------------------------------+", + "+---------------------------------------------------+", + "| selector_max(t.string_value,t.time) |", + "+---------------------------------------------------+", + "| {value: z_five, time: 1970-01-01T00:00:00.000005} |", + "+---------------------------------------------------+", ], ) .await; @@ -1250,11 +1250,11 @@ mod test { run_case( struct_selector_max().call(vec![col("bool_value"), col("time")]), vec![ - "+-----------------------------------------------------+", - "| selector_max(t.bool_value,t.time) |", - "+-----------------------------------------------------+", - "| {\"value\": true, \"time\": 1970-01-01T00:00:00.000001} |", - "+-----------------------------------------------------+", + "+-------------------------------------------------+", + "| selector_max(t.bool_value,t.time) |", + "+-------------------------------------------------+", + "| {value: true, time: 1970-01-01T00:00:00.000001} |", + "+-------------------------------------------------+", ], ) .await; diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index d67bd0e40f..ffb8f686d3 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -17,10 +17,10 @@ license.workspace = true ### BEGIN HAKARI SECTION [dependencies] ahash = { version = "0.8", default-features = false, features = ["getrandom", "runtime-rng"] } -arrow = { version = "32", features = ["arrow-csv", "arrow-ipc", "arrow-json", "comfy-table", "csv", "dyn_cmp_dict", "ipc", "json", "prettyprint"] } -arrow-flight = { version = "32", features = ["flight-sql-experimental"] } -arrow-ord = { version = "32", default-features = false, features = ["dyn_cmp_dict"] } -arrow-string = { version = "32", default-features = false, features = ["dyn_cmp_dict"] } +arrow = { version = "33", features = ["arrow-csv", "arrow-ipc", "arrow-json", "comfy-table", "csv", "dyn_cmp_dict", "ipc", "json", "prettyprint"] } +arrow-flight = { version = "33", features = ["flight-sql-experimental"] } +arrow-ord = { version = "33", default-features = false, features = ["dyn_cmp_dict"] } +arrow-string = { version = "33", default-features = false, features = ["dyn_cmp_dict"] } base64-594e8ee84c453af0 = { package = "base64", version = "0.13", features = ["std"] } base64-647d43efb71741da = { package = "base64", version = "0.21", features = ["std"] } bitflags = { version = "1" } @@ -29,7 +29,7 @@ bytes = { version = "1", features = ["std"] } chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "iana-time-zone", "serde", "std", "winapi"] } crossbeam-utils = { version = "0.8", features = ["std"] } crypto-common = { version = "0.1", default-features = false, features = ["std"] } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "253550c6c936f75f654dcdc9480025a9ef55d9fd", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "f6e49ac7a027abb95d8b7fa755502dfa7d53c21c", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] } digest = { version = "0.10", features = ["alloc", "block-buffer", "core-api", "mac", "std", "subtle"] } either = { version = "1", features = ["use_std"] } fixedbitset = { version = "0.4", features = ["std"] } @@ -58,7 +58,7 @@ num-traits = { version = "0.2", features = ["i128", "libm", "std"] } object_store = { version = "0.5", default-features = false, features = ["aws", "azure", "base64", "cloud", "gcp", "quick-xml", "rand", "reqwest", "ring", "rustls-pemfile", "serde", "serde_json"] } once_cell = { version = "1", features = ["alloc", "parking_lot", "parking_lot_core", "race", "std"] } parking_lot = { version = "0.12", features = ["arc_lock"] } -parquet = { version = "32", features = ["arrow", "arrow-array", "arrow-buffer", "arrow-cast", "arrow-data", "arrow-ipc", "arrow-schema", "arrow-select", "async", "base64", "brotli", "experimental", "flate2", "futures", "lz4", "snap", "tokio", "zstd"] } +parquet = { version = "33", features = ["arrow", "arrow-array", "arrow-buffer", "arrow-cast", "arrow-data", "arrow-ipc", "arrow-schema", "arrow-select", "async", "base64", "brotli", "experimental", "flate2", "futures", "lz4", "snap", "tokio", "zstd"] } phf_shared = { version = "0.11", features = ["std"] } predicates = { version = "2", features = ["diff", "float-cmp", "normalize-line-endings", "regex"] } prost = { version = "0.11", features = ["prost-derive", "std"] }
04f3296d7b9413496da09d10b3faa847d00b41bc
Marco Neumann
2023-02-28 08:57:19
add "remove de-duplication" optimizer pass (#7042)
For #6098.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
feat: add "remove de-duplication" optimizer pass (#7042) For #6098. Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/iox_query/src/physical_optimizer/dedup/mod.rs b/iox_query/src/physical_optimizer/dedup/mod.rs index af30321407..253f185794 100644 --- a/iox_query/src/physical_optimizer/dedup/mod.rs +++ b/iox_query/src/physical_optimizer/dedup/mod.rs @@ -1,6 +1,7 @@ //! Optimizer passes concering de-duplication. mod partition_split; +mod remove_dedup; #[cfg(test)] mod test_util; diff --git a/iox_query/src/physical_optimizer/dedup/remove_dedup.rs b/iox_query/src/physical_optimizer/dedup/remove_dedup.rs new file mode 100644 index 0000000000..edda8ac6b9 --- /dev/null +++ b/iox_query/src/physical_optimizer/dedup/remove_dedup.rs @@ -0,0 +1,160 @@ +use std::sync::Arc; + +use datafusion::{ + config::ConfigOptions, + error::Result, + physical_optimizer::PhysicalOptimizerRule, + physical_plan::{rewrite::TreeNodeRewritable, ExecutionPlan}, +}; +use predicate::Predicate; + +use crate::{ + physical_optimizer::chunk_extraction::extract_chunks, + provider::{chunks_to_physical_nodes, DeduplicateExec}, +}; + +/// Removes de-duplication operation if there are at most 1 chunks and this chunk does NOT contain primary-key duplicates. +#[derive(Debug, Default)] +pub struct RemoveDedup; + +impl PhysicalOptimizerRule for RemoveDedup { + fn optimize( + &self, + plan: Arc<dyn ExecutionPlan>, + config: &ConfigOptions, + ) -> Result<Arc<dyn ExecutionPlan>> { + plan.transform_up(&|plan| { + let plan_any = plan.as_any(); + + if let Some(dedup_exec) = plan_any.downcast_ref::<DeduplicateExec>() { + let mut children = dedup_exec.children(); + assert_eq!(children.len(), 1); + let child = children.remove(0); + let Some((schema, chunks)) = extract_chunks(child.as_ref()) else { + return Ok(None); + }; + + if (chunks.len() < 2) && chunks.iter().all(|c| !c.may_contain_pk_duplicates()) { + return Ok(Some(chunks_to_physical_nodes( + &schema, + None, + chunks, + Predicate::new(), + config.execution.target_partitions, + ))); + } + } + + Ok(None) + }) + } + + fn name(&self) -> &str { + "remove_dedup" + } + + fn schema_check(&self) -> bool { + true + } +} + +#[cfg(test)] +mod tests { + use crate::{ + physical_optimizer::{ + dedup::test_util::{chunk, dedup_plan}, + test_util::OptimizationTest, + }, + QueryChunkMeta, + }; + + use super::*; + + #[test] + fn test_no_chunks() { + let schema = chunk(1).schema().clone(); + let plan = dedup_plan(schema, vec![]); + let opt = RemoveDedup::default(); + insta::assert_yaml_snapshot!( + OptimizationTest::new(plan, opt), + @r###" + --- + input: + - " DeduplicateExec: [tag1@1 ASC,tag2@2 ASC,time@3 ASC]" + - " EmptyExec: produce_one_row=false" + output: + Ok: + - " EmptyExec: produce_one_row=false" + "### + ); + } + + #[test] + fn test_single_chunk_no_pk_dups() { + let chunk1 = chunk(1).with_may_contain_pk_duplicates(false); + let schema = chunk1.schema().clone(); + let plan = dedup_plan(schema, vec![chunk1]); + let opt = RemoveDedup::default(); + insta::assert_yaml_snapshot!( + OptimizationTest::new(plan, opt), + @r###" + --- + input: + - " DeduplicateExec: [tag1@1 ASC,tag2@2 ASC,time@3 ASC]" + - " UnionExec" + - " RecordBatchesExec: batches_groups=1 batches=0 total_rows=0" + output: + Ok: + - " UnionExec" + - " RecordBatchesExec: batches_groups=1 batches=0 total_rows=0" + "### + ); + } + + #[test] + fn test_single_chunk_with_pk_dups() { + let chunk1 = chunk(1).with_may_contain_pk_duplicates(true); + let schema = chunk1.schema().clone(); + let plan = dedup_plan(schema, vec![chunk1]); + let opt = RemoveDedup::default(); + insta::assert_yaml_snapshot!( + OptimizationTest::new(plan, opt), + @r###" + --- + input: + - " DeduplicateExec: [tag1@1 ASC,tag2@2 ASC,time@3 ASC]" + - " UnionExec" + - " RecordBatchesExec: batches_groups=1 batches=0 total_rows=0" + output: + Ok: + - " DeduplicateExec: [tag1@1 ASC,tag2@2 ASC,time@3 ASC]" + - " UnionExec" + - " RecordBatchesExec: batches_groups=1 batches=0 total_rows=0" + "### + ); + } + + #[test] + fn test_multiple_chunks() { + let chunk1 = chunk(1).with_may_contain_pk_duplicates(false); + let chunk2 = chunk(2).with_may_contain_pk_duplicates(false); + let schema = chunk1.schema().clone(); + let plan = dedup_plan(schema, vec![chunk1, chunk2]); + let opt = RemoveDedup::default(); + insta::assert_yaml_snapshot!( + OptimizationTest::new(plan, opt), + @r###" + --- + input: + - " DeduplicateExec: [tag1@1 ASC,tag2@2 ASC,time@3 ASC]" + - " UnionExec" + - " RecordBatchesExec: batches_groups=2 batches=0 total_rows=0" + output: + Ok: + - " DeduplicateExec: [tag1@1 ASC,tag2@2 ASC,time@3 ASC]" + - " UnionExec" + - " RecordBatchesExec: batches_groups=2 batches=0 total_rows=0" + "### + ); + } +}
0e814f5d52509dafd382522f8c9dc846967e6f2b
Trevor Hilton
2024-10-25 13:49:02
SerdeVecMap type for serializing ID maps (#25492)
This PR introduces a new type `SerdeVecHashMap` that can be used in places where we need a HashMap with the following properties: 1. When serialized, it is serialized as a list of key-value pairs, instead of a map 2. When deserialized, it assumes the serialization format from (1.) and deserializes from a list of key-value pairs to a map 3. Does not allow for duplicate keys on deserialization This is useful in places where we need to create map types that map from an identifier (integer) to some value, and need to serialize that data. For example: in the WAL when serializing write batches, and in the catalog when serializing the database/table schema. This PR refactors the code in `influxdb3_wal` and `influxdb3_catalog` to use the new type for maps that use `DbId` and `TableId` as the key. Follow on work can give the same treatment to `ColumnId` based maps once that is fully worked out. ## Explanation If we have a `HashMap<u32, String>`, `serde_json` will serialize it in the following way: ```json {"0": "foo", "1": "bar"} ``` i.e., the integer keys are serialized as strings, since JSON doesn't support any other type of key in maps. `SerdeVecHashMap<u32, String>` will be serialized by `serde_json` in the following way: ```json, [[0, "foo"], [1, "bar"]] ``` and will deserialize from that vector-based structure back to the map. This allows serialization/deserialization to run directly off of the `HashMap`'s `Iterator`/`FromIterator` implementations. ## The Controversial Part One thing I also did in this PR was switch the catalog from using a `BTreeMap` for tables to using the new `HashMap` type. This breaks the deterministic ordering of the database schema's `tables` map and therefore wrecks the snapshot tests we were using. I had to comment those parts of their respective tests out, because there isn't an easy way to make the underlying hashmap have a deterministic ordering just in tests that I am aware of. If we think that using `BTreeMap` in the catalog is okay over a `HashMap`, then I think it would be okay to roll a similar `SerdeVecBTreeMap` type specifically for the catalog. Coincidentally, this may actually be a good use case for [`indexmap`](https://docs.rs/indexmap/latest/indexmap/), since it holds supposedly similar lookup performance characteristics to hashmap, while preserving order and _having faster iteration_ which could be a win for WAL serialization speed. It also accepts different hashing algorithms so could be swapped in with FNV like `HashMap` can. ## Follow-up work Use the `SerdeVecHashMap` for column data in the WAL following https://github.com/influxdata/influxdb/issues/25461
null
feat: SerdeVecMap type for serializing ID maps (#25492) This PR introduces a new type `SerdeVecHashMap` that can be used in places where we need a HashMap with the following properties: 1. When serialized, it is serialized as a list of key-value pairs, instead of a map 2. When deserialized, it assumes the serialization format from (1.) and deserializes from a list of key-value pairs to a map 3. Does not allow for duplicate keys on deserialization This is useful in places where we need to create map types that map from an identifier (integer) to some value, and need to serialize that data. For example: in the WAL when serializing write batches, and in the catalog when serializing the database/table schema. This PR refactors the code in `influxdb3_wal` and `influxdb3_catalog` to use the new type for maps that use `DbId` and `TableId` as the key. Follow on work can give the same treatment to `ColumnId` based maps once that is fully worked out. ## Explanation If we have a `HashMap<u32, String>`, `serde_json` will serialize it in the following way: ```json {"0": "foo", "1": "bar"} ``` i.e., the integer keys are serialized as strings, since JSON doesn't support any other type of key in maps. `SerdeVecHashMap<u32, String>` will be serialized by `serde_json` in the following way: ```json, [[0, "foo"], [1, "bar"]] ``` and will deserialize from that vector-based structure back to the map. This allows serialization/deserialization to run directly off of the `HashMap`'s `Iterator`/`FromIterator` implementations. ## The Controversial Part One thing I also did in this PR was switch the catalog from using a `BTreeMap` for tables to using the new `HashMap` type. This breaks the deterministic ordering of the database schema's `tables` map and therefore wrecks the snapshot tests we were using. I had to comment those parts of their respective tests out, because there isn't an easy way to make the underlying hashmap have a deterministic ordering just in tests that I am aware of. If we think that using `BTreeMap` in the catalog is okay over a `HashMap`, then I think it would be okay to roll a similar `SerdeVecBTreeMap` type specifically for the catalog. Coincidentally, this may actually be a good use case for [`indexmap`](https://docs.rs/indexmap/latest/indexmap/), since it holds supposedly similar lookup performance characteristics to hashmap, while preserving order and _having faster iteration_ which could be a win for WAL serialization speed. It also accepts different hashing algorithms so could be swapped in with FNV like `HashMap` can. ## Follow-up work Use the `SerdeVecHashMap` for column data in the WAL following https://github.com/influxdata/influxdb/issues/25461
diff --git a/Cargo.lock b/Cargo.lock index 5bdee84c16..7e00a60ddc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2680,7 +2680,9 @@ dependencies = [ name = "influxdb3_id" version = "0.1.0" dependencies = [ + "hashbrown 0.14.5", "serde", + "serde_json", ] [[package]] diff --git a/influxdb3_catalog/src/catalog.rs b/influxdb3_catalog/src/catalog.rs index a5290eedbb..d3a7123a93 100644 --- a/influxdb3_catalog/src/catalog.rs +++ b/influxdb3_catalog/src/catalog.rs @@ -3,7 +3,7 @@ use crate::catalog::Error::TableNotFound; use arrow::datatypes::SchemaRef; use bimap::BiHashMap; -use influxdb3_id::{ColumnId, DbId, TableId}; +use influxdb3_id::{ColumnId, DbId, SerdeVecHashMap, TableId}; use influxdb3_wal::{ CatalogBatch, CatalogOp, FieldAdditions, LastCacheDefinition, LastCacheDelete, }; @@ -12,7 +12,7 @@ use observability_deps::tracing::info; use parking_lot::RwLock; use schema::{InfluxColumnType, InfluxFieldType, Schema, SchemaBuilder}; use serde::{Deserialize, Serialize, Serializer}; -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use std::sync::Arc; use thiserror::Error; @@ -284,8 +284,7 @@ impl Catalog { #[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone, Default)] pub struct InnerCatalog { /// The catalog is a map of databases with their table schemas - #[serde_as(as = "DatabasesAsArray")] - databases: HashMap<DbId, Arc<DatabaseSchema>>, + databases: SerdeVecHashMap<DbId, Arc<DatabaseSchema>>, sequence: SequenceNumber, /// The host_id is the prefix that is passed in when starting up (`host_identifier_prefix`) host_id: Arc<str>, @@ -351,55 +350,10 @@ serde_with::serde_conv!( } ); -serde_with::serde_conv!( - DatabasesAsArray, - HashMap<DbId, Arc<DatabaseSchema>>, - |map: &HashMap<DbId, Arc<DatabaseSchema>>| { - map.values().fold(Vec::new(), |mut acc, db| { - acc.push(DatabasesSerialized { - id: db.id, - name: Arc::clone(&db.name), - tables: db.tables.values().cloned().collect(), - }); - acc - }) - }, - |vec: Vec<DatabasesSerialized>| -> Result<_, String> { - vec.into_iter().fold(Ok(HashMap::new()), |acc, db| { - let mut acc = acc?; - let mut table_map = BiHashMap::new(); - if let Some(_) = acc.insert(db.id, Arc::new(DatabaseSchema { - id: db.id, - name: Arc::clone(&db.name), - tables: db.tables.into_iter().fold(Ok(BTreeMap::new()), |acc, table| { - let mut acc = acc?; - let table_name = Arc::clone(&table.table_name); - table_map.insert(table.table_id, Arc::clone(&table_name)); - if let Some(_) = acc.insert(table.table_id, table) { - return Err(format!("found duplicate table: {}", table_name)); - } - Ok(acc) - })?, - table_map - })) { - return Err(format!("found duplicate db: {}", db.name)); - } - Ok(acc) - }) - } -); - -#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone, Default)] -struct DatabasesSerialized { - pub id: DbId, - pub name: Arc<str>, - pub tables: Vec<TableDefinition>, -} - impl InnerCatalog { pub(crate) fn new(host_id: Arc<str>, instance_id: Arc<str>) -> Self { Self { - databases: HashMap::new(), + databases: SerdeVecHashMap::new(), sequence: SequenceNumber::new(0), host_id, instance_id, @@ -466,7 +420,7 @@ pub struct DatabaseSchema { pub id: DbId, pub name: Arc<str>, /// The database is a map of tables - pub tables: BTreeMap<TableId, TableDefinition>, + pub tables: SerdeVecHashMap<TableId, TableDefinition>, #[serde_as(as = "TableMapAsArray")] pub table_map: BiHashMap<TableId, Arc<str>>, } @@ -476,7 +430,7 @@ impl DatabaseSchema { Self { id, name, - tables: BTreeMap::new(), + tables: Default::default(), table_map: BiHashMap::new(), } } @@ -485,7 +439,7 @@ impl DatabaseSchema { /// everything is compatible and there are no updates to the existing schema, None will be /// returned, otherwise a new `DatabaseSchema` will be returned with the updates applied. pub fn new_if_updated_from_batch(&self, catalog_batch: &CatalogBatch) -> Result<Option<Self>> { - let mut updated_or_new_tables = BTreeMap::new(); + let mut updated_or_new_tables = SerdeVecHashMap::new(); for catalog_op in &catalog_batch.ops { match catalog_op { @@ -1031,7 +985,6 @@ pub fn influx_column_type_from_field_value(fv: &FieldValue<'_>) -> InfluxColumnT #[cfg(test)] mod tests { - use insta::assert_json_snapshot; use pretty_assertions::assert_eq; use test_helpers::assert_contains; @@ -1048,7 +1001,7 @@ mod tests { let mut database = DatabaseSchema { id: DbId::from(0), name: "test_db".into(), - tables: BTreeMap::new(), + tables: SerdeVecHashMap::new(), table_map: { let mut map = BiHashMap::new(); map.insert(TableId::from(1), "test_table_1".into()); @@ -1104,10 +1057,6 @@ mod tests { .databases .insert(database.id, Arc::new(database)); - // Perform a snapshot test to check that the JSON serialized catalog does not change in an - // undesired way when introducing features etc. - assert_json_snapshot!(catalog); - // Serialize/deserialize to ensure roundtrip to/from JSON let serialized = serde_json::to_string(&catalog).unwrap(); let deserialized_inner: InnerCatalog = serde_json::from_str(&serialized).unwrap(); @@ -1122,85 +1071,116 @@ mod tests { { let json = r#"{ "databases": [ - { - "id": 0, - "name": "db1", - "tables": [] - }, - { - "id": 0, - "name": "db1", - "tables": [] - } - ] + [ + 0, + { + "id": 0, + "name": "db1", + "tables": [], + "table_map": [] + } + ], + [ + 0, + { + "id": 0, + "name": "db1", + "tables": [], + "table_map": [] + } + ] + ], + "sequence": 0, + "host_id": "test", + "instance_id": "test", + "db_map": [] }"#; let err = serde_json::from_str::<InnerCatalog>(json).unwrap_err(); - assert_contains!(err.to_string(), "found duplicate db: db1"); + assert_contains!(err.to_string(), "duplicate key found"); } // Duplicate tables { let json = r#"{ "databases": [ - { - "id": 0, - "name": "db1", - "tables": [ - { - "table_id": 0, - "table_name": "tbl1", - "cols": {}, - "column_map": [], - "next_column_id": 0 - }, - { - "table_id": 0, - "table_name": "tbl1", - "cols": {}, - "column_map": [], - "next_column_id": 0 - } - ] - } - ] + [ + 0, + { + "id": 0, + "name": "db1", + "tables": [ + [ + 0, + { + "table_id": 0, + "table_name": "tbl1", + "cols": {}, + "column_map": [], + "next_column_id": 0 + } + ], + [ + 0, + { + "table_id": 0, + "table_name": "tbl1", + "cols": {}, + "column_map": [], + "next_column_id": 0 + } + ] + ] + } + ] + ], + "sequence": 0, + "host_id": "test", + "instance_id": "test", + "db_map": [] }"#; let err = serde_json::from_str::<InnerCatalog>(json).unwrap_err(); - assert_contains!(err.to_string(), "found duplicate table: tbl1"); + assert_contains!(err.to_string(), "duplicate key found"); } // Duplicate columns { let json = r#"{ "databases": [ - { - "id": 0, - "name": "db1", - "tables": [ - { - "table_id": 0, - "table_name": "tbl1", - "cols": { - "col1": { - "column_id": 0, - "type": "i64", - "influx_type": "field", - "nullable": true - }, - "col1": { - "column_id": 0, - "type": "u64", - "influx_type": "field", - "nullable": true - } - }, - "column_map": [ + [ + 0, + { + "id": 0, + "name": "db1", + "tables": [ + [ + 0, { - "column_id": 0, - "name": "col1" + "table_id": 0, + "table_name": "tbl1", + "cols": { + "col1": { + "column_id": 0, + "type": "i64", + "influx_type": "field", + "nullable": true + }, + "col1": { + "column_id": 0, + "type": "u64", + "influx_type": "field", + "nullable": true + } + }, + "column_map": [ + { + "column_id": 0, + "name": "col1" + } + ], + "next_column_id": 1 } - ], - "next_column_id": 1 - } - ] - } + ] + ] + } + ] ] }"#; let err = serde_json::from_str::<InnerCatalog>(json).unwrap_err(); @@ -1213,7 +1193,7 @@ mod tests { let mut database = DatabaseSchema { id: DbId::from(0), name: "test".into(), - tables: BTreeMap::new(), + tables: SerdeVecHashMap::new(), table_map: BiHashMap::new(), }; database.tables.insert( @@ -1256,7 +1236,7 @@ mod tests { let mut database = DatabaseSchema { id: DbId::from(0), name: "test_db".into(), - tables: BTreeMap::new(), + tables: SerdeVecHashMap::new(), table_map: { let mut map = BiHashMap::new(); map.insert(TableId::from(1), "test_table_1".into()); @@ -1291,8 +1271,6 @@ mod tests { .databases .insert(database.id, Arc::new(database)); - assert_json_snapshot!(catalog); - let serialized = serde_json::to_string(&catalog).unwrap(); let deserialized_inner: InnerCatalog = serde_json::from_str(&serialized).unwrap(); let deserialized = Catalog::from_inner(deserialized_inner); @@ -1307,7 +1285,7 @@ mod tests { let mut database = DatabaseSchema { id: DbId::from(0), name: "test_db".into(), - tables: BTreeMap::new(), + tables: SerdeVecHashMap::new(), table_map: { let mut map = BiHashMap::new(); map.insert(TableId::from(0), "test".into()); @@ -1348,8 +1326,6 @@ mod tests { .databases .insert(database.id, Arc::new(database)); - assert_json_snapshot!(catalog); - let serialized = serde_json::to_string(&catalog).unwrap(); let deserialized_inner: InnerCatalog = serde_json::from_str(&serialized).unwrap(); let deserialized = Catalog::from_inner(deserialized_inner); diff --git a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__catalog_serialization.snap b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__catalog_serialization.snap deleted file mode 100644 index 0d2887a4f6..0000000000 --- a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__catalog_serialization.snap +++ /dev/null @@ -1,256 +0,0 @@ ---- -source: influxdb3_catalog/src/catalog.rs -expression: catalog ---- -{ - "databases": [ - { - "id": 0, - "name": "test_db", - "tables": [ - { - "table_id": 1, - "table_name": "test_table_1", - "cols": { - "bool_field": { - "column_id": 0, - "type": "bool", - "influx_type": "field", - "nullable": true - }, - "f64_field": { - "column_id": 1, - "type": "f64", - "influx_type": "field", - "nullable": true - }, - "i64_field": { - "column_id": 2, - "type": "i64", - "influx_type": "field", - "nullable": true - }, - "string_field": { - "column_id": 3, - "type": "str", - "influx_type": "field", - "nullable": true - }, - "tag_1": { - "column_id": 4, - "type": { - "dict": [ - "i32", - "str" - ] - }, - "influx_type": "tag", - "nullable": true - }, - "tag_2": { - "column_id": 5, - "type": { - "dict": [ - "i32", - "str" - ] - }, - "influx_type": "tag", - "nullable": true - }, - "tag_3": { - "column_id": 6, - "type": { - "dict": [ - "i32", - "str" - ] - }, - "influx_type": "tag", - "nullable": true - }, - "time": { - "column_id": 7, - "type": { - "time": [ - "ns", - null - ] - }, - "influx_type": "time", - "nullable": false - }, - "u64_field": { - "column_id": 8, - "type": "u64", - "influx_type": "field", - "nullable": true - } - }, - "column_map": [ - { - "column_id": 0, - "name": "bool_field" - }, - { - "column_id": 1, - "name": "f64_field" - }, - { - "column_id": 2, - "name": "i64_field" - }, - { - "column_id": 3, - "name": "string_field" - }, - { - "column_id": 4, - "name": "tag_1" - }, - { - "column_id": 5, - "name": "tag_2" - }, - { - "column_id": 6, - "name": "tag_3" - }, - { - "column_id": 7, - "name": "time" - }, - { - "column_id": 8, - "name": "u64_field" - } - ], - "next_column_id": 9 - }, - { - "table_id": 2, - "table_name": "test_table_2", - "cols": { - "bool_field": { - "column_id": 0, - "type": "bool", - "influx_type": "field", - "nullable": true - }, - "f64_field": { - "column_id": 1, - "type": "f64", - "influx_type": "field", - "nullable": true - }, - "i64_field": { - "column_id": 2, - "type": "i64", - "influx_type": "field", - "nullable": true - }, - "string_field": { - "column_id": 3, - "type": "str", - "influx_type": "field", - "nullable": true - }, - "tag_1": { - "column_id": 4, - "type": { - "dict": [ - "i32", - "str" - ] - }, - "influx_type": "tag", - "nullable": true - }, - "tag_2": { - "column_id": 5, - "type": { - "dict": [ - "i32", - "str" - ] - }, - "influx_type": "tag", - "nullable": true - }, - "tag_3": { - "column_id": 6, - "type": { - "dict": [ - "i32", - "str" - ] - }, - "influx_type": "tag", - "nullable": true - }, - "time": { - "column_id": 7, - "type": { - "time": [ - "ns", - null - ] - }, - "influx_type": "time", - "nullable": false - }, - "u64_field": { - "column_id": 8, - "type": "u64", - "influx_type": "field", - "nullable": true - } - }, - "column_map": [ - { - "column_id": 0, - "name": "bool_field" - }, - { - "column_id": 1, - "name": "f64_field" - }, - { - "column_id": 2, - "name": "i64_field" - }, - { - "column_id": 3, - "name": "string_field" - }, - { - "column_id": 4, - "name": "tag_1" - }, - { - "column_id": 5, - "name": "tag_2" - }, - { - "column_id": 6, - "name": "tag_3" - }, - { - "column_id": 7, - "name": "time" - }, - { - "column_id": 8, - "name": "u64_field" - } - ], - "next_column_id": 9 - } - ] - } - ], - "sequence": 0, - "host_id": "sample-host-id", - "instance_id": "instance-id", - "db_map": [] -} diff --git a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_last_cache.snap b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_last_cache.snap deleted file mode 100644 index c99f4daec9..0000000000 --- a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_last_cache.snap +++ /dev/null @@ -1,113 +0,0 @@ ---- -source: influxdb3_catalog/src/catalog.rs -expression: catalog ---- -{ - "databases": [ - { - "id": 0, - "name": "test_db", - "tables": [ - { - "table_id": 0, - "table_name": "test", - "cols": { - "field": { - "column_id": 0, - "type": "str", - "influx_type": "field", - "nullable": true - }, - "tag_1": { - "column_id": 1, - "type": { - "dict": [ - "i32", - "str" - ] - }, - "influx_type": "tag", - "nullable": true - }, - "tag_2": { - "column_id": 2, - "type": { - "dict": [ - "i32", - "str" - ] - }, - "influx_type": "tag", - "nullable": true - }, - "tag_3": { - "column_id": 3, - "type": { - "dict": [ - "i32", - "str" - ] - }, - "influx_type": "tag", - "nullable": true - }, - "time": { - "column_id": 4, - "type": { - "time": [ - "ns", - null - ] - }, - "influx_type": "time", - "nullable": false - } - }, - "last_caches": [ - { - "table_id": 0, - "table": "test", - "name": "test_table_last_cache", - "keys": [ - "tag_2", - "tag_3" - ], - "vals": [ - "field" - ], - "n": 1, - "ttl": 600 - } - ], - "column_map": [ - { - "column_id": 0, - "name": "field" - }, - { - "column_id": 1, - "name": "tag_1" - }, - { - "column_id": 2, - "name": "tag_2" - }, - { - "column_id": 3, - "name": "tag_3" - }, - { - "column_id": 4, - "name": "time" - } - ], - "next_column_id": 5 - } - ] - } - ], - "sequence": 0, - "host_id": "sample-host-id", - "instance_id": "instance-id", - "db_map": [] -} diff --git a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_series_keys.snap b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_series_keys.snap deleted file mode 100644 index a8a8004bc3..0000000000 --- a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_series_keys.snap +++ /dev/null @@ -1,102 +0,0 @@ ---- -source: influxdb3_catalog/src/catalog.rs -expression: catalog ---- -{ - "databases": [ - { - "id": 0, - "name": "test_db", - "tables": [ - { - "table_id": 1, - "table_name": "test_table_1", - "key": [ - "tag_1", - "tag_2", - "tag_3" - ], - "cols": { - "field": { - "column_id": 0, - "type": "str", - "influx_type": "field", - "nullable": true - }, - "tag_1": { - "column_id": 1, - "type": { - "dict": [ - "i32", - "str" - ] - }, - "influx_type": "tag", - "nullable": false - }, - "tag_2": { - "column_id": 2, - "type": { - "dict": [ - "i32", - "str" - ] - }, - "influx_type": "tag", - "nullable": false - }, - "tag_3": { - "column_id": 3, - "type": { - "dict": [ - "i32", - "str" - ] - }, - "influx_type": "tag", - "nullable": false - }, - "time": { - "column_id": 4, - "type": { - "time": [ - "ns", - null - ] - }, - "influx_type": "time", - "nullable": false - } - }, - "column_map": [ - { - "column_id": 0, - "name": "field" - }, - { - "column_id": 1, - "name": "tag_1" - }, - { - "column_id": 2, - "name": "tag_2" - }, - { - "column_id": 3, - "name": "tag_3" - }, - { - "column_id": 4, - "name": "time" - } - ], - "next_column_id": 5 - } - ] - } - ], - "sequence": 0, - "host_id": "sample-host-id", - "instance_id": "instance-id", - "db_map": [] -} diff --git a/influxdb3_id/Cargo.toml b/influxdb3_id/Cargo.toml index 0d408e5579..4cf2c9cf2f 100644 --- a/influxdb3_id/Cargo.toml +++ b/influxdb3_id/Cargo.toml @@ -6,7 +6,11 @@ edition.workspace = true license.workspace = true [dependencies] +hashbrown.workspace = true serde.workspace = true +[dev-dependencies] +serde_json.workspace = true + [lints] workspace = true diff --git a/influxdb3_id/src/lib.rs b/influxdb3_id/src/lib.rs index aefa812d58..bc9defd816 100644 --- a/influxdb3_id/src/lib.rs +++ b/influxdb3_id/src/lib.rs @@ -5,6 +5,9 @@ use std::sync::atomic::AtomicU32; use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; +mod serialize; +pub use serialize::SerdeVecHashMap; + #[derive(Debug, Copy, Clone, Eq, PartialOrd, Ord, PartialEq, Serialize, Deserialize, Hash)] pub struct DbId(u32); diff --git a/influxdb3_id/src/serialize.rs b/influxdb3_id/src/serialize.rs new file mode 100644 index 0000000000..bdaf522f2b --- /dev/null +++ b/influxdb3_id/src/serialize.rs @@ -0,0 +1,207 @@ +use std::{ + marker::PhantomData, + ops::{Deref, DerefMut}, +}; + +use hashbrown::{ + hash_map::{IntoIter, Iter, IterMut}, + HashMap, +}; +use serde::{ + de::{self, SeqAccess, Visitor}, + ser::SerializeSeq, + Deserialize, Deserializer, Serialize, Serializer, +}; + +/// A new-type around a `HashMap` that provides special serialization and deserialization behaviour. +/// +/// Specifically, it will be serialized as a vector of tuples, each tuple containing a key-value +/// pair from the map. Deserialization assumes said serialization, and deserializes from the vector +/// of tuples back into the map. Traits like `Deref`, `From`, etc. are implemented on this type such +/// that it can be used as a `HashMap`. +/// +/// During deserialization, there are no duplicate keys allowed. If duplicates are found, an error +/// will be thrown. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SerdeVecHashMap<K: Eq + std::hash::Hash, V>(HashMap<K, V>); + +impl<K, V> SerdeVecHashMap<K, V> +where + K: Eq + std::hash::Hash, +{ + pub fn new() -> Self { + Self::default() + } +} + +impl<K, V> Default for SerdeVecHashMap<K, V> +where + K: Eq + std::hash::Hash, +{ + fn default() -> Self { + Self(Default::default()) + } +} + +impl<K, V, T> From<T> for SerdeVecHashMap<K, V> +where + K: Eq + std::hash::Hash, + T: Into<HashMap<K, V>>, +{ + fn from(value: T) -> Self { + Self(value.into()) + } +} + +impl<K, V> IntoIterator for SerdeVecHashMap<K, V> +where + K: Eq + std::hash::Hash, +{ + type Item = (K, V); + + type IntoIter = IntoIter<K, V>; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, K, V> IntoIterator for &'a SerdeVecHashMap<K, V> +where + K: Eq + std::hash::Hash, +{ + type Item = (&'a K, &'a V); + + type IntoIter = Iter<'a, K, V>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl<'a, K, V> IntoIterator for &'a mut SerdeVecHashMap<K, V> +where + K: Eq + std::hash::Hash, +{ + type Item = (&'a K, &'a mut V); + + type IntoIter = IterMut<'a, K, V>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter_mut() + } +} + +impl<K, V> Deref for SerdeVecHashMap<K, V> +where + K: Eq + std::hash::Hash, +{ + type Target = HashMap<K, V>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<K, V> DerefMut for SerdeVecHashMap<K, V> +where + K: Eq + std::hash::Hash, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl<K, V> Serialize for SerdeVecHashMap<K, V> +where + K: Eq + std::hash::Hash + Serialize, + V: Serialize, +{ + fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> + where + S: Serializer, + { + let mut seq = serializer.serialize_seq(Some(self.len()))?; + for ele in self.iter() { + seq.serialize_element(&ele)?; + } + seq.end() + } +} + +impl<'de, K, V> Deserialize<'de> for SerdeVecHashMap<K, V> +where + K: Eq + std::hash::Hash + Deserialize<'de>, + V: Deserialize<'de>, +{ + fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> + where + D: Deserializer<'de>, + { + let v = deserializer.deserialize_seq(VecVisitor::new())?; + let mut map = HashMap::with_capacity(v.len()); + for (k, v) in v.into_iter() { + if map.insert(k, v).is_some() { + return Err(de::Error::custom("duplicate key found")); + } + } + Ok(Self(map)) + } +} + +type Output<K, V> = fn() -> Vec<(K, V)>; + +struct VecVisitor<K, V> { + marker: PhantomData<Output<K, V>>, +} + +impl<K, V> VecVisitor<K, V> { + fn new() -> Self { + Self { + marker: PhantomData, + } + } +} + +impl<'de, K, V> Visitor<'de> for VecVisitor<K, V> +where + K: Deserialize<'de>, + V: Deserialize<'de>, +{ + type Value = Vec<(K, V)>; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("a vector of key value pairs") + } + + fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error> + where + A: SeqAccess<'de>, + { + let mut v = Vec::with_capacity(seq.size_hint().unwrap_or(0)); + while let Some(ele) = seq.next_element()? { + v.push(ele); + } + Ok(v) + } +} + +#[cfg(test)] +mod tests { + use hashbrown::HashMap; + + use super::SerdeVecHashMap; + + #[test] + fn serde_vec_map_with_json() { + let map = HashMap::<u32, &str>::from_iter([(0, "foo"), (1, "bar"), (2, "baz")]); + let serde_vec_map = SerdeVecHashMap::from(map); + // test round-trip to JSON: + let s = serde_json::to_string(&serde_vec_map).unwrap(); + // with using a hashmap the order changes so asserting on the JSON itself is flaky, so if + // you want to see it working use --nocapture on the test... + println!("{s}"); + let d: SerdeVecHashMap<u32, &str> = serde_json::from_str(&s).unwrap(); + assert_eq!(d, serde_vec_map); + } +} diff --git a/influxdb3_telemetry/src/stats.rs b/influxdb3_telemetry/src/stats.rs index 9e5c751f52..b7131cebd4 100644 --- a/influxdb3_telemetry/src/stats.rs +++ b/influxdb3_telemetry/src/stats.rs @@ -21,10 +21,6 @@ pub(crate) struct RollingStats<T> { } impl<T: Default + Num + Copy + NumCast + PartialOrd> RollingStats<T> { - pub fn new() -> RollingStats<T> { - RollingStats::default() - } - /// Update the rolling stats [`Self::min`]/[`Self::max`]/[`Self::avg`] using /// reference to an higher precision stats that is passed in. This is usually a /// per minute interval stats. One thing to note here is the [`Self::num_samples`] @@ -70,10 +66,6 @@ pub(crate) struct Stats<T> { } impl<T: Default + Num + Copy + NumCast + PartialOrd> Stats<T> { - pub fn new() -> Stats<T> { - Stats::default() - } - /// Update the [`Self::min`]/[`Self::max`]/[`Self::avg`] from a /// new value that is sampled. pub fn update(&mut self, new_val: T) -> Option<()> { diff --git a/influxdb3_wal/src/lib.rs b/influxdb3_wal/src/lib.rs index 6873074d27..ed4599e2bc 100644 --- a/influxdb3_wal/src/lib.rs +++ b/influxdb3_wal/src/lib.rs @@ -11,7 +11,7 @@ use crate::snapshot_tracker::SnapshotInfo; use async_trait::async_trait; use data_types::Timestamp; use hashbrown::HashMap; -use influxdb3_id::{DbId, TableId}; +use influxdb3_id::{DbId, SerdeVecHashMap, TableId}; use influxdb_line_protocol::v3::SeriesValue; use influxdb_line_protocol::FieldValue; use iox_time::Time; @@ -442,46 +442,11 @@ pub struct LastCacheDelete { pub struct WriteBatch { pub database_id: DbId, pub database_name: Arc<str>, - #[serde_as(as = "TableChunksMapAsVec")] - pub table_chunks: HashMap<TableId, TableChunks>, + pub table_chunks: SerdeVecHashMap<TableId, TableChunks>, pub min_time_ns: i64, pub max_time_ns: i64, } -#[derive(Debug, Serialize, Deserialize)] -pub struct TableChunksMap { - table_id: TableId, - min_time: i64, - max_time: i64, - chunk_time_to_chunk: HashMap<i64, TableChunk>, -} - -serde_with::serde_conv!( - TableChunksMapAsVec, - HashMap<TableId,TableChunks>, - |map: &HashMap<TableId, TableChunks>| - map.iter() - .map(|(table_id, chunk)| { - TableChunksMap { - table_id: *table_id, - min_time: chunk.min_time, - max_time: chunk.max_time, - chunk_time_to_chunk: chunk.chunk_time_to_chunk.clone() - } - }) - .collect::<Vec<TableChunksMap>>(), - |vec: Vec<TableChunksMap>| -> Result<_, std::convert::Infallible> { - Ok(vec.into_iter().fold(HashMap::new(), |mut acc, chunk| { - acc.insert(chunk.table_id, TableChunks{ - min_time: chunk.min_time, - max_time: chunk.max_time, - chunk_time_to_chunk: chunk.chunk_time_to_chunk - }); - acc - })) - } -); - impl WriteBatch { pub fn new( database_id: DbId, @@ -502,7 +467,7 @@ impl WriteBatch { Self { database_id, database_name, - table_chunks, + table_chunks: table_chunks.into(), min_time_ns, max_time_ns, } @@ -510,7 +475,7 @@ impl WriteBatch { pub fn add_write_batch( &mut self, - new_table_chunks: HashMap<TableId, TableChunks>, + new_table_chunks: SerdeVecHashMap<TableId, TableChunks>, min_time_ns: i64, max_time_ns: i64, ) { diff --git a/influxdb3_wal/src/object_store.rs b/influxdb3_wal/src/object_store.rs index eeb2d005fa..14dbe4c02f 100644 --- a/influxdb3_wal/src/object_store.rs +++ b/influxdb3_wal/src/object_store.rs @@ -681,7 +681,8 @@ mod tests { }, )]), }, - )]), + )]) + .into(), min_time_ns: 1, max_time_ns: 3, }); @@ -714,7 +715,8 @@ mod tests { }, )]), }, - )]), + )]) + .into(), min_time_ns: 62_000000000, max_time_ns: 62_000000000, }); @@ -782,7 +784,8 @@ mod tests { }, )]), }, - )]), + )]) + .into(), min_time_ns: 1, max_time_ns: 62_000000000, })], @@ -824,7 +827,8 @@ mod tests { }, )]), }, - )]), + )]) + .into(), min_time_ns: 62_000000000, max_time_ns: 62_000000000, })], @@ -896,7 +900,8 @@ mod tests { }, )]), }, - )]), + )]) + .into(), min_time_ns: 128_000000000, max_time_ns: 128_000000000, }); @@ -956,7 +961,8 @@ mod tests { }, )]), }, - )]), + )]) + .into(), min_time_ns: 128_000000000, max_time_ns: 128_000000000, })], diff --git a/influxdb3_wal/src/serialize.rs b/influxdb3_wal/src/serialize.rs index da52c13273..1b61439383 100644 --- a/influxdb3_wal/src/serialize.rs +++ b/influxdb3_wal/src/serialize.rs @@ -91,8 +91,7 @@ mod tests { use crate::{ Field, FieldData, Row, TableChunk, TableChunks, WalFileSequenceNumber, WalOp, WriteBatch, }; - use hashbrown::HashMap; - use influxdb3_id::{DbId, TableId}; + use influxdb3_id::{DbId, SerdeVecHashMap, TableId}; #[test] fn test_serialize_deserialize() { @@ -117,7 +116,7 @@ mod tests { chunk_time_to_chunk: [(1, chunk)].iter().cloned().collect(), }; let table_id = TableId::from(2); - let mut table_chunks = HashMap::new(); + let mut table_chunks = SerdeVecHashMap::new(); table_chunks.insert(table_id, chunks); let contents = WalContents { diff --git a/influxdb3_write/src/last_cache/mod.rs b/influxdb3_write/src/last_cache/mod.rs index e26cef2f18..b60c6c2583 100644 --- a/influxdb3_write/src/last_cache/mod.rs +++ b/influxdb3_write/src/last_cache/mod.rs @@ -1586,7 +1586,7 @@ fn data_type_from_buffer_field(field: &Field) -> DataType { #[cfg(test)] mod tests { - use std::{cmp::Ordering, collections::BTreeMap, sync::Arc, time::Duration}; + use std::{cmp::Ordering, sync::Arc, time::Duration}; use crate::{ last_cache::{KeyValue, LastCacheProvider, Predicate, DEFAULT_CACHE_TTL}, @@ -1600,7 +1600,7 @@ mod tests { use bimap::BiHashMap; use data_types::NamespaceName; use influxdb3_catalog::catalog::{Catalog, DatabaseSchema, TableDefinition}; - use influxdb3_id::{DbId, TableId}; + use influxdb3_id::{DbId, SerdeVecHashMap, TableId}; use influxdb3_wal::{LastCacheDefinition, WalConfig}; use insta::assert_json_snapshot; use iox_time::{MockProvider, Time, TimeProvider}; @@ -3114,7 +3114,7 @@ mod tests { let mut database = DatabaseSchema { id: DbId::from(0), name: db_name.into(), - tables: BTreeMap::new(), + tables: SerdeVecHashMap::new(), table_map: { let mut map = BiHashMap::new(); map.insert(TableId::from(0), "test_table_1".into()); diff --git a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap index b57528e66a..fcacd3e0fc 100644 --- a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap +++ b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap @@ -4,84 +4,96 @@ expression: catalog_json --- { "databases": [ - { - "id": 0, - "name": "db", - "tables": [ - { - "cols": { - "f1": { - "column_id": 0, - "influx_type": "field", - "nullable": true, - "type": "bool" - }, - "f2": { - "column_id": 3, - "influx_type": "field", - "nullable": true, - "type": "i64" - }, - "t1": { - "column_id": 1, - "influx_type": "tag", - "nullable": true, - "type": { - "dict": [ - "i32", - "str" - ] - } - }, - "time": { - "column_id": 2, - "influx_type": "time", - "nullable": false, - "type": { - "time": [ - "ns", - null - ] - } - } - }, - "column_map": [ - { - "column_id": 0, - "name": "f1" - }, - { - "column_id": 1, - "name": "t1" - }, - { - "column_id": 2, - "name": "time" - }, + [ + 0, + { + "id": 0, + "name": "db", + "table_map": [ + { + "name": "table", + "table_id": 0 + } + ], + "tables": [ + [ + 0, { - "column_id": 3, - "name": "f2" - } - ], - "last_caches": [ - { - "keys": [ - "t1" + "cols": { + "f1": { + "column_id": 0, + "influx_type": "field", + "nullable": true, + "type": "bool" + }, + "f2": { + "column_id": 3, + "influx_type": "field", + "nullable": true, + "type": "i64" + }, + "t1": { + "column_id": 1, + "influx_type": "tag", + "nullable": true, + "type": { + "dict": [ + "i32", + "str" + ] + } + }, + "time": { + "column_id": 2, + "influx_type": "time", + "nullable": false, + "type": { + "time": [ + "ns", + null + ] + } + } + }, + "column_map": [ + { + "column_id": 0, + "name": "f1" + }, + { + "column_id": 1, + "name": "t1" + }, + { + "column_id": 2, + "name": "time" + }, + { + "column_id": 3, + "name": "f2" + } ], - "n": 1, - "name": "cache", - "table": "table", + "last_caches": [ + { + "keys": [ + "t1" + ], + "n": 1, + "name": "cache", + "table": "table", + "table_id": 0, + "ttl": 14400, + "vals": null + } + ], + "next_column_id": 4, "table_id": 0, - "ttl": 14400, - "vals": null + "table_name": "table" } - ], - "next_column_id": 4, - "table_id": 0, - "table_name": "table" - } - ] - } + ] + ] + } + ] ], "db_map": [ { diff --git a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap index 8c1c4c05db..0512ccd036 100644 --- a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap +++ b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap @@ -4,74 +4,86 @@ expression: catalog_json --- { "databases": [ - { - "id": 0, - "name": "db", - "tables": [ - { - "cols": { - "f1": { - "column_id": 0, - "influx_type": "field", - "nullable": true, - "type": "bool" - }, - "t1": { - "column_id": 1, - "influx_type": "tag", - "nullable": true, - "type": { - "dict": [ - "i32", - "str" - ] - } - }, - "time": { - "column_id": 2, - "influx_type": "time", - "nullable": false, - "type": { - "time": [ - "ns", - null - ] - } - } - }, - "column_map": [ - { - "column_id": 0, - "name": "f1" - }, - { - "column_id": 1, - "name": "t1" - }, + [ + 0, + { + "id": 0, + "name": "db", + "table_map": [ + { + "name": "table", + "table_id": 0 + } + ], + "tables": [ + [ + 0, { - "column_id": 2, - "name": "time" - } - ], - "last_caches": [ - { - "keys": [ - "t1" + "cols": { + "f1": { + "column_id": 0, + "influx_type": "field", + "nullable": true, + "type": "bool" + }, + "t1": { + "column_id": 1, + "influx_type": "tag", + "nullable": true, + "type": { + "dict": [ + "i32", + "str" + ] + } + }, + "time": { + "column_id": 2, + "influx_type": "time", + "nullable": false, + "type": { + "time": [ + "ns", + null + ] + } + } + }, + "column_map": [ + { + "column_id": 0, + "name": "f1" + }, + { + "column_id": 1, + "name": "t1" + }, + { + "column_id": 2, + "name": "time" + } ], - "n": 1, - "name": "cache", - "table": "table", + "last_caches": [ + { + "keys": [ + "t1" + ], + "n": 1, + "name": "cache", + "table": "table", + "table_id": 0, + "ttl": 14400, + "vals": null + } + ], + "next_column_id": 3, "table_id": 0, - "ttl": 14400, - "vals": null + "table_name": "table" } - ], - "next_column_id": 3, - "table_id": 0, - "table_name": "table" - } - ] - } + ] + ] + } + ] ], "db_map": [ { diff --git a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap index 4704183b76..9925b2f06a 100644 --- a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap +++ b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap @@ -4,71 +4,83 @@ expression: catalog_json --- { "databases": [ - { - "id": 0, - "name": "db", - "tables": [ - { - "cols": { - "f1": { - "column_id": 0, - "influx_type": "field", - "nullable": true, - "type": "bool" - }, - "f2": { - "column_id": 3, - "influx_type": "field", - "nullable": true, - "type": "i64" - }, - "t1": { - "column_id": 1, - "influx_type": "tag", - "nullable": true, - "type": { - "dict": [ - "i32", - "str" - ] - } - }, - "time": { - "column_id": 2, - "influx_type": "time", - "nullable": false, - "type": { - "time": [ - "ns", - null - ] - } - } - }, - "column_map": [ - { - "column_id": 0, - "name": "f1" - }, + [ + 0, + { + "id": 0, + "name": "db", + "table_map": [ + { + "name": "table", + "table_id": 0 + } + ], + "tables": [ + [ + 0, { - "column_id": 1, - "name": "t1" - }, - { - "column_id": 2, - "name": "time" - }, - { - "column_id": 3, - "name": "f2" + "cols": { + "f1": { + "column_id": 0, + "influx_type": "field", + "nullable": true, + "type": "bool" + }, + "f2": { + "column_id": 3, + "influx_type": "field", + "nullable": true, + "type": "i64" + }, + "t1": { + "column_id": 1, + "influx_type": "tag", + "nullable": true, + "type": { + "dict": [ + "i32", + "str" + ] + } + }, + "time": { + "column_id": 2, + "influx_type": "time", + "nullable": false, + "type": { + "time": [ + "ns", + null + ] + } + } + }, + "column_map": [ + { + "column_id": 0, + "name": "f1" + }, + { + "column_id": 1, + "name": "t1" + }, + { + "column_id": 2, + "name": "time" + }, + { + "column_id": 3, + "name": "f2" + } + ], + "next_column_id": 4, + "table_id": 0, + "table_name": "table" } - ], - "next_column_id": 4, - "table_id": 0, - "table_name": "table" - } - ] - } + ] + ] + } + ] ], "db_map": [ { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index dc8d20d361..b7a0050e19 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.80.0" +channel = "1.82.0" components = ["rustfmt", "clippy", "rust-analyzer"]
defd20c7ed5ff81174d1a6888cf0826a65f1bf55
Carol (Nichols || Goulding)
2023-08-07 17:21:03
Reduce duplication and increase consistency in namespace names in tests
Create consts outside the test steps when possible to share values that need to be the same. Call all of these namespace_name to distinguish from a namespace object or command.
null
refactor: Reduce duplication and increase consistency in namespace names in tests Create consts outside the test steps when possible to share values that need to be the same. Call all of these namespace_name to distinguish from a namespace object or command.
diff --git a/influxdb_iox/tests/end_to_end_cases/cli/namespace.rs b/influxdb_iox/tests/end_to_end_cases/cli/namespace.rs index 2184128898..ec92ece108 100644 --- a/influxdb_iox/tests/end_to_end_cases/cli/namespace.rs +++ b/influxdb_iox/tests/end_to_end_cases/cli/namespace.rs @@ -228,6 +228,8 @@ async fn deletion() { let database_url = maybe_skip_integration!(); let mut cluster = MiniCluster::create_shared(database_url).await; + const NAMESPACE_NAME: &str = "bananas_namespace"; + StepTest::new( &mut cluster, vec![ @@ -236,7 +238,6 @@ async fn deletion() { async { let addr = state.cluster().router().router_grpc_base().to_string(); let retention_period_hours = 0; - let namespace = "bananas_namespace"; // Validate the output of the namespace retention command // @@ -252,11 +253,11 @@ async fn deletion() { .arg("create") .arg("--retention-hours") .arg(retention_period_hours.to_string()) - .arg(namespace) + .arg(NAMESPACE_NAME) .assert() .success() .stdout( - predicate::str::contains(namespace) + predicate::str::contains(NAMESPACE_NAME) .and(predicate::str::contains("retentionPeriodNs".to_string())) .not(), ); @@ -267,7 +268,6 @@ async fn deletion() { Step::Custom(Box::new(|state: &mut StepTestState| { async { let addr = state.cluster().router().router_grpc_base().to_string(); - let namespace = "bananas_namespace"; Command::cargo_bin("influxdb_iox") .unwrap() @@ -275,12 +275,12 @@ async fn deletion() { .arg(&addr) .arg("namespace") .arg("delete") - .arg(namespace) + .arg(NAMESPACE_NAME) .assert() .success() .stdout( predicate::str::contains("Deleted namespace") - .and(predicate::str::contains(namespace)), + .and(predicate::str::contains(NAMESPACE_NAME)), ); } .boxed() @@ -288,7 +288,6 @@ async fn deletion() { Step::Custom(Box::new(|state: &mut StepTestState| { async { let addr = state.cluster().router().router_grpc_base().to_string(); - let namespace = "bananas_namespace"; Command::cargo_bin("influxdb_iox") .unwrap() @@ -298,7 +297,7 @@ async fn deletion() { .arg("list") .assert() .success() - .stdout(predicate::str::contains(namespace).not()); + .stdout(predicate::str::contains(NAMESPACE_NAME).not()); } .boxed() })), @@ -320,7 +319,7 @@ async fn create_service_limits() { Step::Custom(Box::new(|state: &mut StepTestState| { async { let addr = state.cluster().router().router_grpc_base().to_string(); - let namespace = "ns1"; + let namespace_name = "ns1"; // { // "id": <foo>, @@ -336,13 +335,13 @@ async fn create_service_limits() { .arg(&addr) .arg("namespace") .arg("create") - .arg(namespace) + .arg(namespace_name) .arg("--max-tables") .arg("123") .assert() .success() .stdout( - predicate::str::contains(namespace) + predicate::str::contains(namespace_name) .and(predicate::str::contains(r#""maxTables": 123"#)) .and(predicate::str::contains(r#""maxColumnsPerTable": 200"#)), ); @@ -352,7 +351,7 @@ async fn create_service_limits() { Step::Custom(Box::new(|state: &mut StepTestState| { async { let addr = state.cluster().router().router_grpc_base().to_string(); - let namespace = "ns2"; + let namespace_name = "ns2"; // { // "id": <foo>, @@ -368,13 +367,13 @@ async fn create_service_limits() { .arg(&addr) .arg("namespace") .arg("create") - .arg(namespace) + .arg(namespace_name) .arg("--max-columns-per-table") .arg("321") .assert() .success() .stdout( - predicate::str::contains(namespace) + predicate::str::contains(namespace_name) .and(predicate::str::contains(r#""maxTables": 500"#)) .and(predicate::str::contains(r#""maxColumnsPerTable": 321"#)), ); @@ -384,7 +383,7 @@ async fn create_service_limits() { Step::Custom(Box::new(|state: &mut StepTestState| { async { let addr = state.cluster().router().router_grpc_base().to_string(); - let namespace = "ns3"; + let namespace_name = "ns3"; // { // "id": <foo>, @@ -400,7 +399,7 @@ async fn create_service_limits() { .arg(&addr) .arg("namespace") .arg("create") - .arg(namespace) + .arg(namespace_name) .arg("--max-tables") .arg("123") .arg("--max-columns-per-table") @@ -408,7 +407,7 @@ async fn create_service_limits() { .assert() .success() .stdout( - predicate::str::contains(namespace) + predicate::str::contains(namespace_name) .and(predicate::str::contains(r#""maxTables": 123"#)) .and(predicate::str::contains(r#""maxColumnsPerTable": 321"#)), ); @@ -427,12 +426,13 @@ async fn update_service_limit() { let database_url = maybe_skip_integration!(); let mut cluster = MiniCluster::create_shared(database_url).await; + const NAMESPACE_NAME: &str = "service_limiter_namespace"; + StepTest::new( &mut cluster, vec![ Step::Custom(Box::new(|state: &mut StepTestState| { async { - let namespace = "service_limiter_namespace"; let addr = state.cluster().router().router_grpc_base().to_string(); // { @@ -449,11 +449,11 @@ async fn update_service_limit() { .arg(&addr) .arg("namespace") .arg("create") - .arg(namespace) + .arg(NAMESPACE_NAME) .assert() .success() .stdout( - predicate::str::contains(namespace) + predicate::str::contains(NAMESPACE_NAME) .and(predicate::str::contains(r#""maxTables": 500"#)) .and(predicate::str::contains(r#""maxColumnsPerTable": 200"#)), ); @@ -462,7 +462,6 @@ async fn update_service_limit() { })), Step::Custom(Box::new(|state: &mut StepTestState| { async { - let namespace = "service_limiter_namespace"; let addr = state.cluster().router().router_grpc_base().to_string(); // { @@ -481,11 +480,11 @@ async fn update_service_limit() { .arg("update-limit") .arg("--max-tables") .arg("1337") - .arg(namespace) + .arg(NAMESPACE_NAME) .assert() .success() .stdout( - predicate::str::contains(namespace) + predicate::str::contains(NAMESPACE_NAME) .and(predicate::str::contains(r#""maxTables": 1337"#)) .and(predicate::str::contains(r#""maxColumnsPerTable": 200"#)), ); @@ -494,7 +493,6 @@ async fn update_service_limit() { })), Step::Custom(Box::new(|state: &mut StepTestState| { async { - let namespace = "service_limiter_namespace"; let addr = state.cluster().router().router_grpc_base().to_string(); // { @@ -513,11 +511,11 @@ async fn update_service_limit() { .arg("update-limit") .arg("--max-columns-per-table") .arg("42") - .arg(namespace) + .arg(NAMESPACE_NAME) .assert() .success() .stdout( - predicate::str::contains(namespace) + predicate::str::contains(NAMESPACE_NAME) .and(predicate::str::contains(r#""maxTables": 1337"#)) .and(predicate::str::contains(r#""maxColumnsPerTable": 42"#)), ); @@ -536,13 +534,14 @@ async fn create_partition_template_negative() { let database_url = maybe_skip_integration!(); let mut cluster = MiniCluster::create_shared(database_url).await; + const NAMESPACE_NAME: &str = "ns_negative"; + StepTest::new( &mut cluster, vec![ Step::Custom(Box::new(|state: &mut StepTestState| { async { let addr = state.cluster().router().router_grpc_base().to_string(); - let namespace = "ns_negative"; // No partition tempplate specified Command::cargo_bin("influxdb_iox") @@ -551,7 +550,7 @@ async fn create_partition_template_negative() { .arg(&addr) .arg("namespace") .arg("create") - .arg(namespace) + .arg(NAMESPACE_NAME) .arg("--partition-template") .assert() .failure() @@ -568,7 +567,7 @@ async fn create_partition_template_negative() { .arg(&addr) .arg("namespace") .arg("create") - .arg(namespace) + .arg(NAMESPACE_NAME) .arg("--partition-template") .arg("{\"prts\": [{\"tagValue\": \"location\"}, {\"tagValue\": \"state\"}, {\"timeFormat\": \"%Y-%m\"}] }") .assert() @@ -584,7 +583,7 @@ async fn create_partition_template_negative() { .arg(&addr) .arg("namespace") .arg("create") - .arg(namespace) + .arg(NAMESPACE_NAME) .arg("--partition-template") .arg("{\"parts\": [{\"tagValue\": \"location\"}, {\"tagValue\": \"time\"}, {\"timeFormat\": \"%Y-%m\"}] }") .assert() @@ -600,7 +599,7 @@ async fn create_partition_template_negative() { .arg(&addr) .arg("namespace") .arg("create") - .arg(namespace) + .arg(NAMESPACE_NAME) .arg("--partition-template") .arg("{\"parts\": [{\"tagValue\": \"location\"}, {\"timeFormat\": \"%42\"}] }") .assert() @@ -616,7 +615,7 @@ async fn create_partition_template_negative() { .arg(&addr) .arg("namespace") .arg("create") - .arg(namespace) + .arg(NAMESPACE_NAME) .arg("--partition-template") .arg("{\"parts\": [{\"tagValue\": \"1\"},{\"tagValue\": \"2\"},{\"timeFormat\": \"%Y-%m\"},{\"tagValue\": \"4\"},{\"tagValue\": \"5\"},{\"tagValue\": \"6\"},{\"tagValue\": \"7\"},{\"tagValue\": \"8\"},{\"tagValue\": \"9\"}]}") .assert() @@ -646,6 +645,7 @@ async fn create_partition_template_positive() { async { let addr = state.cluster().router().router_grpc_base().to_string(); + let namespace_name_1 = "ns_partition_template_1"; // No partition template specified Command::cargo_bin("influxdb_iox") .unwrap() @@ -653,52 +653,55 @@ async fn create_partition_template_positive() { .arg(&addr) .arg("namespace") .arg("create") - .arg("ns_partition_template_1") + .arg(namespace_name_1) .assert() .success() - .stdout(predicate::str::contains("ns_partition_template_1")); + .stdout(predicate::str::contains(namespace_name_1)); // Partition template with time format + let namespace_name_2 = "ns_partition_template_2"; Command::cargo_bin("influxdb_iox") .unwrap() .arg("-h") .arg(&addr) .arg("namespace") .arg("create") - .arg("ns_partition_template_2") + .arg(namespace_name_2) .arg("--partition-template") .arg("{\"parts\":[{\"timeFormat\":\"%Y-%m\"}] }") .assert() .success() - .stdout(predicate::str::contains("ns_partition_template_2")); + .stdout(predicate::str::contains(namespace_name_2)); // Partition template with tag value + let namespace_name_3 = "ns_partition_template_3"; Command::cargo_bin("influxdb_iox") .unwrap() .arg("-h") .arg(&addr) .arg("namespace") .arg("create") - .arg("ns_partition_template_3") + .arg(namespace_name_3) .arg("--partition-template") .arg("{\"parts\":[{\"tagValue\":\"col1\"}] }") .assert() .success() - .stdout(predicate::str::contains("ns_partition_template_3")); + .stdout(predicate::str::contains(namespace_name_3)); // Partition template with time format, tag value, and tag of unsual column name + let namespace_name_4 = "ns_partition_template_4"; Command::cargo_bin("influxdb_iox") .unwrap() .arg("-h") .arg(&addr) .arg("namespace") .arg("create") - .arg("ns_partition_template_4") + .arg(namespace_name_4) .arg("--partition-template") .arg("{\"parts\":[{\"tagValue\":\"col1\"},{\"timeFormat\":\"%Y-%d\"},{\"tagValue\":\"yes,col name\"}] }") .assert() .success() - .stdout(predicate::str::contains("ns_partition_template_4")); + .stdout(predicate::str::contains(namespace_name_4)); // Update an existing namespace Command::cargo_bin("influxdb_iox") @@ -707,7 +710,7 @@ async fn create_partition_template_positive() { .arg(&addr) .arg("namespace") .arg("update") - .arg("ns_partition_template_4") + .arg(namespace_name_4) .arg("--partition-template") .arg("{\"parts\":[{\"tagValue\":\"col1\"}] }") .assert() @@ -733,7 +736,8 @@ async fn create_partition_template_implicit_table_creation() { test_helpers::maybe_start_logging(); let database_url = maybe_skip_integration!(); let mut cluster = MiniCluster::create_shared(database_url).await; - let namespace = "ns_createtableimplicit"; + + const NAMESPACE_NAME: &str = "ns_createtableimplicit"; StepTest::new( &mut cluster, @@ -742,7 +746,6 @@ async fn create_partition_template_implicit_table_creation() { Step::Custom(Box::new(|state: &mut StepTestState| { async { let addr = state.cluster().router().router_grpc_base().to_string(); - let namespace = "ns_createtableimplicit"; Command::cargo_bin("influxdb_iox") .unwrap() @@ -750,14 +753,14 @@ async fn create_partition_template_implicit_table_creation() { .arg(&addr) .arg("namespace") .arg("create") - .arg(namespace) + .arg(NAMESPACE_NAME) .arg("--partition-template") .arg( "{\"parts\":[{\"timeFormat\":\"%Y-%m\"}, {\"tagValue\":\"location\"}]}", ) .assert() .success() - .stdout(predicate::str::contains(namespace)); + .stdout(predicate::str::contains(NAMESPACE_NAME)); } .boxed() })), @@ -765,7 +768,6 @@ async fn create_partition_template_implicit_table_creation() { Step::Custom(Box::new(|state: &mut StepTestState| { async { let addr = state.cluster().router().router_http_base().to_string(); - let namespace = "ns_createtableimplicit"; Command::cargo_bin("influxdb_iox") .unwrap() @@ -773,7 +775,7 @@ async fn create_partition_template_implicit_table_creation() { .arg("-h") .arg(&addr) .arg("write") - .arg(namespace) + .arg(NAMESPACE_NAME) .arg("../test_fixtures/lineproto/temperature.lp") .assert() .success() @@ -786,7 +788,7 @@ async fn create_partition_template_implicit_table_creation() { async { // data from 'air_and_water.lp' wait_for_query_result_with_namespace( - namespace, + NAMESPACE_NAME, state, "SELECT * from h2o_temperature order by time desc limit 10", None, @@ -798,7 +800,7 @@ async fn create_partition_template_implicit_table_creation() { // Check partition keys that use the namespace's partition template Step::PartitionKeys { table_name: "h2o_temperature".to_string(), - namespace_name: Some(namespace.to_string()), + namespace_name: Some(NAMESPACE_NAME.to_string()), expected: vec![ "1970-01|coyote_creek", "1970-01|puget_sound", @@ -820,7 +822,8 @@ async fn create_partition_template_explicit_table_creation_without_partition_tem test_helpers::maybe_start_logging(); let database_url = maybe_skip_integration!(); let mut cluster = MiniCluster::create_shared(database_url).await; - let namespace = "ns_createtableexplicitwithout"; + + const NAMESPACE_NAME: &str = "ns_createtableexplicitwithout"; StepTest::new( &mut cluster, @@ -829,7 +832,6 @@ async fn create_partition_template_explicit_table_creation_without_partition_tem Step::Custom(Box::new(|state: &mut StepTestState| { async { let addr = state.cluster().router().router_grpc_base().to_string(); - let namespace = "ns_createtableexplicitwithout"; Command::cargo_bin("influxdb_iox") .unwrap() @@ -837,12 +839,12 @@ async fn create_partition_template_explicit_table_creation_without_partition_tem .arg(&addr) .arg("namespace") .arg("create") - .arg(namespace) + .arg(NAMESPACE_NAME) .arg("--partition-template") .arg("{\"parts\":[{\"timeFormat\":\"%Y-%m\"}, {\"tagValue\":\"state\"}]}") .assert() .success() - .stdout(predicate::str::contains(namespace)); + .stdout(predicate::str::contains(NAMESPACE_NAME)); } .boxed() })), @@ -850,7 +852,6 @@ async fn create_partition_template_explicit_table_creation_without_partition_tem Step::Custom(Box::new(|state: &mut StepTestState| { async { let addr = state.cluster().router().router_grpc_base().to_string(); - let namespace = "ns_createtableexplicitwithout"; let table_name = "h2o_temperature"; Command::cargo_bin("influxdb_iox") @@ -860,7 +861,7 @@ async fn create_partition_template_explicit_table_creation_without_partition_tem .arg(&addr) .arg("table") .arg("create") - .arg(namespace) + .arg(NAMESPACE_NAME) .arg(table_name) .assert() .success() @@ -872,7 +873,6 @@ async fn create_partition_template_explicit_table_creation_without_partition_tem Step::Custom(Box::new(|state: &mut StepTestState| { async { let addr = state.cluster().router().router_http_base().to_string(); - let namespace = "ns_createtableexplicitwithout"; Command::cargo_bin("influxdb_iox") .unwrap() @@ -880,7 +880,7 @@ async fn create_partition_template_explicit_table_creation_without_partition_tem .arg("-h") .arg(&addr) .arg("write") - .arg(namespace) + .arg(NAMESPACE_NAME) .arg("../test_fixtures/lineproto/temperature.lp") .assert() .success() @@ -893,7 +893,7 @@ async fn create_partition_template_explicit_table_creation_without_partition_tem async { // data from 'air_and_water.lp' wait_for_query_result_with_namespace( - namespace, + NAMESPACE_NAME, state, "SELECT * from h2o_temperature order by time desc limit 10", None, @@ -903,7 +903,7 @@ async fn create_partition_template_explicit_table_creation_without_partition_tem .boxed() })), // Check partition keys that use the namespace's partition template - Step::PartitionKeys{table_name: "h2o_temperature".to_string(), namespace_name: Some(namespace.to_string()), expected: vec!["1970-01|CA", "1970-01|WA"]}, + Step::PartitionKeys{table_name: "h2o_temperature".to_string(), namespace_name: Some(NAMESPACE_NAME.to_string()), expected: vec!["1970-01|CA", "1970-01|WA"]}, ], ) .run() @@ -919,7 +919,8 @@ async fn create_partition_template_explicit_table_creation_with_partition_templa test_helpers::maybe_start_logging(); let database_url = maybe_skip_integration!(); let mut cluster = MiniCluster::create_shared(database_url).await; - let namespace = "ns_createtableexplicitwith"; + + const NAMESPACE_NAME: &str = "ns_createtableexplicitwith"; StepTest::new( &mut cluster, @@ -928,7 +929,6 @@ async fn create_partition_template_explicit_table_creation_with_partition_templa Step::Custom(Box::new(|state: &mut StepTestState| { async { let addr = state.cluster().router().router_grpc_base().to_string(); - let namespace = "ns_createtableexplicitwith"; Command::cargo_bin("influxdb_iox") .unwrap() @@ -936,12 +936,12 @@ async fn create_partition_template_explicit_table_creation_with_partition_templa .arg(&addr) .arg("namespace") .arg("create") - .arg(namespace) + .arg(NAMESPACE_NAME) .arg("--partition-template") .arg("{\"parts\":[{\"timeFormat\":\"%Y-%m\"}, {\"tagValue\":\"state\"}]}") .assert() .success() - .stdout(predicate::str::contains(namespace)); + .stdout(predicate::str::contains(NAMESPACE_NAME)); } .boxed() })), @@ -949,7 +949,6 @@ async fn create_partition_template_explicit_table_creation_with_partition_templa Step::Custom(Box::new(|state: &mut StepTestState| { async { let addr = state.cluster().router().router_grpc_base().to_string(); - let namespace = "ns_createtableexplicitwith"; let table_name = "h2o_temperature"; Command::cargo_bin("influxdb_iox") @@ -959,7 +958,7 @@ async fn create_partition_template_explicit_table_creation_with_partition_templa .arg(&addr) .arg("table") .arg("create") - .arg(namespace) + .arg(NAMESPACE_NAME) .arg(table_name) .arg("--partition-template") .arg("{\"parts\":[{\"tagValue\":\"location\"}, {\"timeFormat\":\"%Y-%m\"}]}") @@ -973,7 +972,6 @@ async fn create_partition_template_explicit_table_creation_with_partition_templa Step::Custom(Box::new(|state: &mut StepTestState| { async { let addr = state.cluster().router().router_http_base().to_string(); - let namespace = "ns_createtableexplicitwith"; Command::cargo_bin("influxdb_iox") .unwrap() @@ -981,7 +979,7 @@ async fn create_partition_template_explicit_table_creation_with_partition_templa .arg("-h") .arg(&addr) .arg("write") - .arg(namespace) + .arg(NAMESPACE_NAME) .arg("../test_fixtures/lineproto/temperature.lp") .assert() .success() @@ -994,7 +992,7 @@ async fn create_partition_template_explicit_table_creation_with_partition_templa async { // data from 'air_and_water.lp' wait_for_query_result_with_namespace( - namespace, + NAMESPACE_NAME, state, "SELECT * from h2o_temperature order by time desc limit 10", None, @@ -1004,7 +1002,7 @@ async fn create_partition_template_explicit_table_creation_with_partition_templa .boxed() })), // Check partition keys that use the table's partition template - Step::PartitionKeys{table_name: "h2o_temperature".to_string(), namespace_name: Some(namespace.to_string()), expected: vec!["coyote_creek|1970-01", "puget_sound|1970-01", "santa_monica|1970-01"]}, + Step::PartitionKeys{table_name: "h2o_temperature".to_string(), namespace_name: Some(NAMESPACE_NAME.to_string()), expected: vec!["coyote_creek|1970-01", "puget_sound|1970-01", "santa_monica|1970-01"]}, ], ) .run()
6f92bccc99c7aba551dc1c35fc25ad7ed0ef50bc
Carol (Nichols || Goulding)
2023-05-04 17:08:19
Use protobuf for PartitionTemplate in CreateNamespace gRPC API
The service implementation doesn't use this field yet.
null
feat: Use protobuf for PartitionTemplate in CreateNamespace gRPC API The service implementation doesn't use this field yet.
diff --git a/generated_types/protos/influxdata/iox/namespace/v1/service.proto b/generated_types/protos/influxdata/iox/namespace/v1/service.proto index e586bece07..ce75731da9 100644 --- a/generated_types/protos/influxdata/iox/namespace/v1/service.proto +++ b/generated_types/protos/influxdata/iox/namespace/v1/service.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package influxdata.iox.namespace.v1; option go_package = "github.com/influxdata/iox/namespace/v1"; +import "influxdata/iox/partition_template/v1/template.proto"; + service NamespaceService { // Get all namespaces rpc GetNamespaces(GetNamespacesRequest) returns (GetNamespacesResponse); @@ -36,6 +38,9 @@ message CreateNamespaceRequest { // NULL means "infinite retention", and 0 is mapped to NULL. Negative values // are rejected. optional int64 retention_period_ns = 2; + + // Partitioning scheme to use for tables created in this namespace + optional influxdata.iox.partition_template.v1.PartitionTemplate partition_template = 3; } message CreateNamespaceResponse { diff --git a/generated_types/protos/influxdata/iox/partition_template/v1/template.proto b/generated_types/protos/influxdata/iox/partition_template/v1/template.proto index 61d7f89835..976950ddba 100644 --- a/generated_types/protos/influxdata/iox/partition_template/v1/template.proto +++ b/generated_types/protos/influxdata/iox/partition_template/v1/template.proto @@ -12,14 +12,17 @@ message PartitionTemplate { // // For example, given the following template: // - // [ TemplatePart::time_format("%Y.%j") TemplatePart::tag_value("region") ] + // ```text + // [ TemplatePart::time_format("%Y.%j") TemplatePart::tag_value("region") ] + // ``` // // The below example rows would have the specified partition key derived: // - // time=2023-03-10T13:00:00, region=EMEA, x=42 => "2023.69-EMEA" - // time=2023-03-10T13:00:00, region=EMEA-bananas => "2023.69-EMEA-bananas" - // time=2023-03-10T13:00:00, x=42 => "2023.69-region" - // + // ```text + // time=2023-03-10T13:00:00, region=EMEA, x=42 => "2023.69-EMEA" + // time=2023-03-10T13:00:00, region=EMEA-bananas => "2023.69-EMEA-bananas" + // time=2023-03-10T13:00:00, x=42 => "2023.69-region" + // ``` repeated TemplatePart parts = 1; } diff --git a/generated_types/src/lib.rs b/generated_types/src/lib.rs index 8c6d5451ac..c475531bb1 100644 --- a/generated_types/src/lib.rs +++ b/generated_types/src/lib.rs @@ -122,6 +122,19 @@ pub mod influxdata { } } + pub mod partition_template { + pub mod v1 { + include!(concat!( + env!("OUT_DIR"), + "/influxdata.iox.partition_template.v1.rs" + )); + include!(concat!( + env!("OUT_DIR"), + "/influxdata.iox.partition_template.v1.serde.rs" + )); + } + } + pub mod predicate { pub mod v1 { include!(concat!(env!("OUT_DIR"), "/influxdata.iox.predicate.v1.rs")); diff --git a/influxdb_iox_client/src/client/namespace.rs b/influxdb_iox_client/src/client/namespace.rs index fb88dca097..92102a89de 100644 --- a/influxdb_iox_client/src/client/namespace.rs +++ b/influxdb_iox_client/src/client/namespace.rs @@ -50,6 +50,7 @@ impl Client { .create_namespace(CreateNamespaceRequest { name: namespace.to_string(), retention_period_ns, + partition_template: None, }) .await?; diff --git a/router/tests/grpc.rs b/router/tests/grpc.rs index ab26989bfd..bbb555a831 100644 --- a/router/tests/grpc.rs +++ b/router/tests/grpc.rs @@ -79,6 +79,7 @@ async fn test_namespace_create() { let req = CreateNamespaceRequest { name: "bananas_test".to_string(), retention_period_ns: Some(RETENTION), + partition_template: None, }; let got = ctx .grpc_delegate() @@ -151,6 +152,7 @@ async fn test_namespace_delete() { let req = CreateNamespaceRequest { name: "bananas_test".to_string(), retention_period_ns: Some(RETENTION), + partition_template: None, }; let got = ctx .grpc_delegate() @@ -280,6 +282,7 @@ async fn test_create_namespace_0_retention_period() { let req = CreateNamespaceRequest { name: "bananas_test".to_string(), retention_period_ns: Some(0), // A zero! + partition_template: None, }; let got = ctx .grpc_delegate() @@ -344,6 +347,7 @@ async fn test_create_namespace_negative_retention_period() { let req = CreateNamespaceRequest { name: "bananas_test".to_string(), retention_period_ns: Some(-42), + partition_template: None, }; let err = ctx .grpc_delegate() @@ -407,6 +411,7 @@ async fn test_update_namespace_0_retention_period() { .create_namespace(Request::new(CreateNamespaceRequest { name: "bananas_test".to_string(), retention_period_ns: Some(42), + partition_template: None, })) .await .expect("failed to create namespace") @@ -512,6 +517,7 @@ async fn test_update_namespace_negative_retention_period() { .create_namespace(Request::new(CreateNamespaceRequest { name: "bananas_test".to_string(), retention_period_ns: Some(42), + partition_template: None, })) .await .expect("failed to create namespace") @@ -781,6 +787,7 @@ async fn test_update_namespace_limit_0_max_tables_max_columns() { .create_namespace(Request::new(CreateNamespaceRequest { name: "bananas_test".to_string(), retention_period_ns: Some(0), + partition_template: None, })) .await .expect("failed to create namespace") diff --git a/service_grpc_namespace/src/lib.rs b/service_grpc_namespace/src/lib.rs index 84f754a89c..d8f039ddc2 100644 --- a/service_grpc_namespace/src/lib.rs +++ b/service_grpc_namespace/src/lib.rs @@ -70,6 +70,7 @@ impl namespace_service_server::NamespaceService for NamespaceService { let CreateNamespaceRequest { name: namespace_name, retention_period_ns, + partition_template: _, } = request.into_inner(); // Ensure the namespace name is consistently processed within IOx - this @@ -363,6 +364,7 @@ mod tests { let req = CreateNamespaceRequest { name: NS_NAME.to_string(), retention_period_ns: Some(RETENTION), + partition_template: None, }; let created_ns = handler .create_namespace(Request::new(req)) @@ -486,6 +488,7 @@ mod tests { let req = CreateNamespaceRequest { name: NS_NAME.to_string(), retention_period_ns: Some(RETENTION), + partition_template: None, }; let created_ns = handler .create_namespace(Request::new(req)) @@ -545,6 +548,7 @@ mod tests { let req = CreateNamespaceRequest { name: String::from($name), retention_period_ns: Some(RETENTION), + partition_template: None, }; let got = handler.create_namespace(Request::new(req)).await;
bc33ad1548c16a9c7704b9475a45635079f5d3c3
Dom Dwyer
2023-05-16 16:29:33
PartitionTemplate proto definition
Defines the PartitionTemplate as a re-usable proto type.
null
feat: PartitionTemplate proto definition Defines the PartitionTemplate as a re-usable proto type.
diff --git a/generated_types/build.rs b/generated_types/build.rs index 3570f1b9d8..362dafcd26 100644 --- a/generated_types/build.rs +++ b/generated_types/build.rs @@ -39,6 +39,7 @@ fn generate_grpc_types(root: &Path) -> Result<()> { let ingester_path = root.join("influxdata/iox/ingester/v1"); let namespace_path = root.join("influxdata/iox/namespace/v1"); let object_store_path = root.join("influxdata/iox/object_store/v1"); + let partition_template_path = root.join("influxdata/iox/partition_template/v1"); let predicate_path = root.join("influxdata/iox/predicate/v1"); let querier_path = root.join("influxdata/iox/querier/v1"); let schema_path = root.join("influxdata/iox/schema/v1"); @@ -58,6 +59,7 @@ fn generate_grpc_types(root: &Path) -> Result<()> { ingester_path.join("write.proto"), namespace_path.join("service.proto"), object_store_path.join("service.proto"), + partition_template_path.join("template.proto"), predicate_path.join("predicate.proto"), querier_path.join("flight.proto"), root.join("google/longrunning/operations.proto"), diff --git a/generated_types/protos/influxdata/iox/partition_template/v1/template.proto b/generated_types/protos/influxdata/iox/partition_template/v1/template.proto new file mode 100644 index 0000000000..9487e8228e --- /dev/null +++ b/generated_types/protos/influxdata/iox/partition_template/v1/template.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; +package influxdata.iox.partition_template.v1; +option go_package = "github.com/influxdata/iox/partition_template/v1"; + +// A partitioning template describes how data is split into IOx partitions in +// the ingest pipeline. +message PartitionTemplate { + // One or more partitioning template parts. + // + // Each template part is evaluated in sequence, concatinating the final + // partition key from the output of each part, delimited by hyphens. + // + // For example, given the following template: + // + // [ + // TemplatePart::time_format("%Y.%j") + // TemplatePart::column_value("region") + // ] + // + // The below example rows would have the specified partition key derived: + // + // time=2023-03-10T13:00:00, region=EMEA, x=42 => "2023.69-EMEA" + // time=2023-03-10T13:00:00, region=EMEA-bananas => "2023.69-EMEA-bananas" + // time=2023-03-10T13:00:00, x=42 => "2023.69-region" + // + repeated TemplatePart parts = 1; +} + +// A sub-part of a PartitionTemplate. +message TemplatePart { + oneof part { + // A column value matcher extracts a string value from the column with the + // specified name. + // + // If a row does not contain the specified column, the provided column name + // is rendered instead of the (missing) value. + string column_value = 1; + + // A time format matcher accepts a "strftime"-like format string and + // evaluates it against the "time" column. + string time_format = 2; + } +}
c05739ff20ae5772ba489b56aa8ba62bd628b421
Joe-Blount
2023-09-19 11:53:36
move CompactRange up to RoundInfo (#8736)
* chore(compactor): move CompactRange up to RoundInfo * chore: insta updates from compactor CompactRange refactor * chore: lint cleanup * chore: addressing some of the comments * chore: remove duplicated done check * chore: variable renaming
null
chore(compactor): move CompactRange up to RoundInfo (#8736) * chore(compactor): move CompactRange up to RoundInfo * chore: insta updates from compactor CompactRange refactor * chore: lint cleanup * chore: addressing some of the comments * chore: remove duplicated done check * chore: variable renaming
diff --git a/compactor/src/components/divide_initial/mod.rs b/compactor/src/components/divide_initial/mod.rs index cebd875082..6d9c2a9ccb 100644 --- a/compactor/src/components/divide_initial/mod.rs +++ b/compactor/src/components/divide_initial/mod.rs @@ -2,7 +2,7 @@ use std::fmt::{Debug, Display}; use data_types::{ParquetFile, TransitionPartitionId}; -use crate::RoundInfo; +use crate::round_info::CompactType; pub mod multiple_branches; @@ -15,7 +15,7 @@ pub trait DivideInitial: Debug + Display + Send + Sync { fn divide( &self, files: Vec<ParquetFile>, - round_info: RoundInfo, + op: CompactType, partition: TransitionPartitionId, ) -> (Vec<Vec<ParquetFile>>, Vec<ParquetFile>); } diff --git a/compactor/src/components/divide_initial/multiple_branches.rs b/compactor/src/components/divide_initial/multiple_branches.rs index 5232466115..4e797a30d3 100644 --- a/compactor/src/components/divide_initial/multiple_branches.rs +++ b/compactor/src/components/divide_initial/multiple_branches.rs @@ -3,12 +3,7 @@ use std::fmt::Display; use data_types::{CompactionLevel, ParquetFile, Timestamp, TransitionPartitionId}; use observability_deps::tracing::warn; -use crate::{ - components::split_or_compact::start_level_files_to_split::{ - merge_small_l0_chains, split_into_chains, - }, - RoundInfo, -}; +use crate::round_info::CompactType; use super::DivideInitial; @@ -44,129 +39,85 @@ impl DivideInitial for MultipleBranchesDivideInitial { fn divide( &self, files: Vec<ParquetFile>, - round_info: RoundInfo, + op: CompactType, partition: TransitionPartitionId, ) -> (Vec<Vec<ParquetFile>>, Vec<ParquetFile>) { let mut more_for_later = vec![]; - match round_info { - RoundInfo::ManySmallFiles { + match op { + CompactType::ManySmallFiles { start_level, max_num_files_to_group, max_total_file_size_to_group, } => { - // Files must be sorted by `max_l0_created_at` when there are overlaps to resolve. - // If the `start_level` is greater than 0, there cannot be overlaps within the level, - // so sorting by `max_l0_created_at` is not necessary (however, sorting by `min_time` - // is needed to avoid introducing overlaps within their levels). When the `start_level` - // is 0, we have to sort by `max_l0_created_at` if a chain of overlaps is too big for - // a single compaction. - // - // See tests many_l0_files_different_created_order and many_l1_files_different_created_order for examples - - let start_level_files = files - .into_iter() - .filter(|f| f.compaction_level == start_level) - .collect::<Vec<_>>(); - let mut branches = Vec::with_capacity(start_level_files.len()); - - let mut chains = Vec::with_capacity(start_level_files.len()); - if start_level == CompactionLevel::Initial { - // L0 files can be highly overlapping, requiring 'vertical splitting' (see high_l0_overlap_split). - // Achieving `vertical splitting` requires we tweak the grouping here for two reasons: - // 1) Allow the large highly overlapped groups of L0s to remain in a single branch, so they trigger the split - // 2) Prevent the output of a prior split from being grouped together to undo the previous veritacal split. - - // Both of these objectives need to consider the L0s as a chains of overlapping files. The chains are - // each a set of L0s that overlap each other, but do not overlap the other chains. - // Chains can be created based on min_time/max_time without regard for max_l0_created_at because there - // are no overlaps between chains. - let initial_chains = split_into_chains(start_level_files); - - // Reason 1) above - keep the large groups of L0s in a single branch to facilitate later splitting. - for chain in initial_chains { - let this_chain_bytes: usize = - chain.iter().map(|f| f.file_size_bytes as usize).sum(); - if this_chain_bytes > 2 * max_total_file_size_to_group { - // This is a very large set of overlapping L0s, its needs vertical splitting, so keep the branch intact - // to trigger the split. - branches.push(chain); - } else { - chains.push(chain); - } - } - - // If the chains are smaller than the max compact size, combine them to get better compaction group sizes. - // This combining of chains must happen based on max_l0_created_at (it can only join adjacent chains, when - // sorted by max_l0_created_at). - chains = merge_small_l0_chains(chains, max_total_file_size_to_group); - } else { - chains = vec![start_level_files]; - } - - // Reason 2) above - ensure the grouping in branches doesn't undo the vertical splitting. - // Assume we start with 30 files (A,B,C,...), that were each split into 3 files (A1, A2, A3, B1, ..). If we create branches - // from sorting all files by max_l0_created_at we'd undo the vertical splitting (A1-A3 would get compacted back into one file). - // Currently the contents of each chain is more like A1, B1, C1, so by grouping chains together we can preserve the previous - // vertical splitting. - for chain in chains { - let start_level_files = order_files(chain, start_level); - - let capacity = start_level_files.len(); - - // Split L0s into many small groups, each has max_num_files_to_group but not exceed max_total_file_size_to_group - // Collect files until either limit is reached - let mut current_branch = Vec::with_capacity(capacity); - let mut current_branch_size = 0; - for f in start_level_files { - if current_branch.len() == max_num_files_to_group - || current_branch_size + f.file_size_bytes as usize - > max_total_file_size_to_group - { - if current_branch.is_empty() { - warn!( - "Size of a file {} is larger than the max size limit to compact on partition {}.", - f.file_size_bytes, - partition - ); - } - if current_branch.len() == 1 { - // Compacting a branch of 1 won't help us reduce the L0 file count. Put it on the ignore list. - more_for_later.push(current_branch.pop().unwrap()); - } else if !current_branch.is_empty() { - branches.push(current_branch); - } - current_branch = Vec::with_capacity(capacity); - current_branch_size = 0; + // Since its ManySmallFiles, we know the files are L0s, and the total bytes is under our limit. + // We just need to split them up into branches for compaction. + // TODO: it would be nice to pick some good split times, store them in the ManySmallFiles op, and use them consistently across all the branches. That should make the later round more efficient. + let mut branches = Vec::with_capacity(files.len() / max_num_files_to_group); + let files = order_files(files, start_level); + let capacity = files.len(); + + let mut current_branch = Vec::with_capacity(capacity.min(max_num_files_to_group)); + let mut current_branch_size = 0; + for f in files { + if current_branch.len() == max_num_files_to_group + || current_branch_size + f.file_size_bytes as usize + > max_total_file_size_to_group + { + if current_branch.is_empty() { + warn!( + "Size of a file {} is larger than the max size limit to compact on partition {}.", + f.file_size_bytes, + partition + ); } - current_branch_size += f.file_size_bytes as usize; - current_branch.push(f); - } - // push the last branch - if !current_branch.is_empty() { if current_branch.len() == 1 { // Compacting a branch of 1 won't help us reduce the L0 file count. Put it on the ignore list. more_for_later.push(current_branch.pop().unwrap()); - } else { + } else if !current_branch.is_empty() { branches.push(current_branch); } + current_branch = Vec::with_capacity(capacity); + current_branch_size = 0; + } + current_branch_size += f.file_size_bytes as usize; + current_branch.push(f); + } + + // push the last branch + if !current_branch.is_empty() { + if current_branch.len() == 1 { + // Compacting a branch of 1 won't help us reduce the L0 file count. Put it on the ignore list. + more_for_later.push(current_branch.pop().unwrap()); + } else { + branches.push(current_branch); } } (branches, more_for_later) } - RoundInfo::TargetLevel { + CompactType::TargetLevel { target_level, max_total_file_size_to_group, } => { + let start_level = target_level.prev(); + let total_bytes: usize = files.iter().map(|f| f.file_size_bytes as usize).sum(); - if total_bytes < max_total_file_size_to_group { + let start_file_cnt = files + .iter() + .filter(|f| f.compaction_level == start_level) + .count(); + + if start_file_cnt == 0 { + // No files to compact + (vec![], files) + } else if total_bytes < max_total_file_size_to_group { (vec![files], more_for_later) } else { let (mut for_now, rest): (Vec<ParquetFile>, Vec<ParquetFile>) = files .into_iter() - .partition(|f| f.compaction_level == target_level.prev()); + .partition(|f| f.compaction_level == start_level); let min_time = for_now.iter().map(|f| f.min_time).min().unwrap(); let max_time = for_now.iter().map(|f| f.max_time).max().unwrap(); @@ -181,7 +132,7 @@ impl DivideInitial for MultipleBranchesDivideInitial { } } - RoundInfo::SimulatedLeadingEdge { + CompactType::SimulatedLeadingEdge { max_num_files_to_group, max_total_file_size_to_group, } => { @@ -248,31 +199,10 @@ impl DivideInitial for MultipleBranchesDivideInitial { } // RoundSplit already eliminated all the files we don't need to work on. - RoundInfo::VerticalSplit { .. } => (vec![files], more_for_later), + CompactType::VerticalSplit { .. } => (vec![files], more_for_later), - RoundInfo::CompactRanges { ranges, .. } => { - // Each range describes what can be a distinct branch, concurrently compacted. - - let mut branches = Vec::with_capacity(ranges.len()); - let mut this_branch: Vec<ParquetFile>; - let mut files = files; - - for range in &ranges { - (this_branch, files) = files.into_iter().partition(|f2| { - f2.overlaps_time_range(Timestamp::new(range.min), Timestamp::new(range.max)) - }); - - if !this_branch.is_empty() { - branches.push(this_branch) - } - } - assert!( - files.is_empty(), - "all files should map to a range, instead partition {} had unmapped files", - partition - ); - (branches, vec![]) - } + // Deferred does nothing now, everything is for later + CompactType::Deferred { .. } => (vec![], files), } } } @@ -317,7 +247,7 @@ mod tests { #[test] fn test_divide_num_file() { - let round_info = RoundInfo::ManySmallFiles { + let op = CompactType::ManySmallFiles { start_level: CompactionLevel::Initial, max_num_files_to_group: 2, max_total_file_size_to_group: 100, @@ -328,7 +258,7 @@ mod tests { assert_eq!( divide.divide( vec![], - round_info.clone(), + op.clone(), TransitionPartitionId::Deprecated(PartitionId::new(0)) ), (Vec::<Vec<_>>::new(), Vec::new()) @@ -353,7 +283,7 @@ mod tests { let (branches, more_for_later) = divide.divide( files, - round_info.clone(), + op.clone(), TransitionPartitionId::Deprecated(PartitionId::new(0)), ); // output must be split into their max_l0_created_at @@ -364,7 +294,7 @@ mod tests { #[test] fn test_divide_size_limit() { - let round_info = RoundInfo::ManySmallFiles { + let op = CompactType::ManySmallFiles { start_level: CompactionLevel::Initial, max_num_files_to_group: 10, max_total_file_size_to_group: 100, @@ -392,7 +322,7 @@ mod tests { let (branches, more_for_later) = divide.divide( files, - round_info, + op, TransitionPartitionId::Deprecated(PartitionId::new(0)), ); // output must be split into their max_l0_created_at diff --git a/compactor/src/components/file_classifier/logging.rs b/compactor/src/components/file_classifier/logging.rs index 96265b7583..62f8e9992b 100644 --- a/compactor/src/components/file_classifier/logging.rs +++ b/compactor/src/components/file_classifier/logging.rs @@ -3,7 +3,9 @@ use std::fmt::Display; use data_types::ParquetFile; use observability_deps::tracing::info; -use crate::{file_classification::FileClassification, partition_info::PartitionInfo, RoundInfo}; +use crate::{ + file_classification::FileClassification, partition_info::PartitionInfo, round_info::CompactType, +}; use super::FileClassifier; @@ -40,15 +42,15 @@ where fn classify( &self, partition_info: &PartitionInfo, - round_info: &RoundInfo, + op: &CompactType, files: Vec<ParquetFile>, ) -> FileClassification { - let classification = self.inner.classify(partition_info, round_info, files); + let classification = self.inner.classify(partition_info, op, files); info!( partition_id = partition_info.partition_id.get(), target_level = %classification.target_level, - round_info = %round_info, + op = %op, files_to_compact = classification.num_files_to_compact(), files_to_split = classification.num_files_to_split(), files_to_upgrade = classification.num_files_to_upgrade(), diff --git a/compactor/src/components/file_classifier/mod.rs b/compactor/src/components/file_classifier/mod.rs index 87361dfe74..75e953d1a2 100644 --- a/compactor/src/components/file_classifier/mod.rs +++ b/compactor/src/components/file_classifier/mod.rs @@ -5,7 +5,9 @@ use std::{ use data_types::ParquetFile; -use crate::{file_classification::FileClassification, partition_info::PartitionInfo, RoundInfo}; +use crate::{ + file_classification::FileClassification, partition_info::PartitionInfo, round_info::CompactType, +}; pub mod logging; pub mod split_based; @@ -14,7 +16,7 @@ pub trait FileClassifier: Debug + Display + Send + Sync { fn classify( &self, partition_info: &PartitionInfo, - round_info: &RoundInfo, + op: &CompactType, files: Vec<ParquetFile>, ) -> FileClassification; } @@ -26,9 +28,9 @@ where fn classify( &self, partition_info: &PartitionInfo, - round_info: &RoundInfo, + op: &CompactType, files: Vec<ParquetFile>, ) -> FileClassification { - self.as_ref().classify(partition_info, round_info, files) + self.as_ref().classify(partition_info, op, files) } } diff --git a/compactor/src/components/file_classifier/split_based.rs b/compactor/src/components/file_classifier/split_based.rs index 9707d7df6c..d84a7ee1ce 100644 --- a/compactor/src/components/file_classifier/split_based.rs +++ b/compactor/src/components/file_classifier/split_based.rs @@ -9,10 +9,10 @@ use crate::{ }, file_classification::{ CompactReason, FileClassification, FileToSplit, FilesForProgress, FilesToSplitOrCompact, - SplitReason, + NoneReason, SplitReason, }, partition_info::PartitionInfo, - RoundInfo, + round_info::CompactType, }; use super::FileClassifier; @@ -131,13 +131,13 @@ where fn classify( &self, partition_info: &PartitionInfo, - round_info: &RoundInfo, + op: &CompactType, files: Vec<ParquetFile>, ) -> FileClassification { let files_to_compact = files; - match round_info { - RoundInfo::ManySmallFiles { + match op { + CompactType::ManySmallFiles { start_level, max_num_files_to_group, max_total_file_size_to_group, @@ -149,10 +149,10 @@ where *start_level, ), - RoundInfo::SimulatedLeadingEdge { .. } => { + CompactType::SimulatedLeadingEdge { .. } => { // file division already done in round_info_source FileClassification { - target_level: round_info.target_level(), + target_level: op.target_level(), files_to_make_progress_on: FilesForProgress { upgrade: vec![], split_or_compact: FilesToSplitOrCompact::Compact( @@ -164,13 +164,13 @@ where } } - RoundInfo::VerticalSplit { split_times } => file_classification_for_vertical_split( + CompactType::VerticalSplit { split_times } => file_classification_for_vertical_split( split_times, files_to_compact, partition_info.partition_id(), ), - RoundInfo::TargetLevel { target_level, .. } => { + CompactType::TargetLevel { target_level, .. } => { let partition_id = partition_info.partition_id(); // Split files into files_to_compact, files_to_upgrade, and files_to_keep @@ -219,90 +219,14 @@ where } } - RoundInfo::CompactRanges { - max_num_files_to_group, - max_total_file_size_to_group, - .. - } => { - let partition_id = partition_info.partition_id(); - - let l0_count = files_to_compact - .iter() - .filter(|f| f.compaction_level == CompactionLevel::Initial) - .count(); - - if l0_count > *max_num_files_to_group { - // Too many L0s, do manySmallFiles within this range. - let (files_to_compact, mut files_to_keep) = files_to_compact - .into_iter() - .partition(|f| f.compaction_level == CompactionLevel::Initial); - - let l0_classification = file_classification_for_many_files( - partition_id.clone(), - *max_total_file_size_to_group, - *max_num_files_to_group, - files_to_compact, - CompactionLevel::Initial, - ); - - files_to_keep.extend(l0_classification.files_to_keep); - - assert!( - !l0_classification.files_to_make_progress_on.is_empty(), - "L0 files_to_make_progress_on should not be empty, for partition {}", - partition_id - ); - FileClassification { - target_level: l0_classification.target_level, - files_to_make_progress_on: l0_classification.files_to_make_progress_on, - files_to_keep, - } - } else { - // There's not too many L0s, so upgrade/split/compact as required to get L0s->L1. - let target_level = CompactionLevel::FileNonOverlapped; - let (files_to_compact, mut files_to_keep) = self.target_level_split.apply( - files_to_compact, - target_level, - partition_id.clone(), - ); - - // To have efficient compaction performance, we do not need to compact eligible non-overlapped files - // Find eligible non-overlapped files and keep for next round of compaction - let (files_to_compact, non_overlapping_files) = self.non_overlap_split.apply( - files_to_compact, - target_level, - partition_id.clone(), - ); - files_to_keep.extend(non_overlapping_files); - - // To have efficient compaction performance, we only need to upgrade (catalog update only) eligible files - let (files_to_compact, files_to_upgrade) = - self.upgrade_split - .apply(files_to_compact, target_level, partition_id); - - // See if we need to split start-level files due to over compaction size limit - let (files_to_split_or_compact, other_files) = - self.split_or_compact - .apply(partition_info, files_to_compact, target_level); - files_to_keep.extend(other_files); - - let files_to_make_progress_on = FilesForProgress { - upgrade: files_to_upgrade, - split_or_compact: files_to_split_or_compact, - }; - - assert!( - !files_to_make_progress_on.is_empty(), - "files_to_make_progress_on should not be empty, for partition {}", - partition_info.partition_id() - ); - FileClassification { - target_level, - files_to_make_progress_on, - files_to_keep, - } - } - } + CompactType::Deferred {} => FileClassification { + target_level: CompactionLevel::Initial, + files_to_make_progress_on: FilesForProgress { + upgrade: vec![], + split_or_compact: FilesToSplitOrCompact::None(NoneReason::Deferred), + }, + files_to_keep: files_to_compact, + }, } } } @@ -332,61 +256,60 @@ fn file_classification_for_many_files( let mut files_to_compact = vec![]; let mut files_to_keep: Vec<ParquetFile> = vec![]; - // The goal is to compact the small files without repeately rewriting the non-small files (that hurts write amp). - // Assume tiny files separated by non-tiny files, we need to get down to max_num_files_to_group. - // So compute the biggest files we can skip, and still be guaranteed to get down to max_num_files_to_group. + // If we're under the max_total_file_size_to_group, we could unconditionally put all files in files_to_compact. + // But ManySmallFiles always compacts to L0, which means we'll be rewriting them again anyway to get to L1, which + // means if there's a larger file or two we can skip on this L0->L0 compaction (while still getting file count + // small enough), that will help write amplification. So assume tiny files separated by non-tiny files, and we + // need to get down to max_num_files_to_group. + // skip_size is the biggest files we can skip, and still be guaranteed to get down to max_num_files_to_group. let skip_size = max_total_file_size_to_group * 2 / max_num_files_to_group; // Enforce max_num_files_to_group - if files.len() > max_num_files_to_group { - let ordered_files = order_files(files, target_level.prev()); - - let mut chunk_bytes: usize = 0; - let mut chunk: Vec<ParquetFile> = Vec::with_capacity(max_num_files_to_group); - for f in ordered_files { - if !files_to_compact.is_empty() { - // We've already got a batch of files to compact, this can wait. - files_to_keep.push(f); - } else if chunk_bytes + f.file_size_bytes as usize > max_total_file_size_to_group - || chunk.len() + 1 > max_num_files_to_group - || f.file_size_bytes >= skip_size as i64 - { - // This file will not be included in this compaction. - files_to_keep.push(f); - if chunk.len() > 1 { - // Several files; we'll do an L0->L0 comapction on them. - files_to_compact = chunk.to_vec(); - chunk = Vec::with_capacity(max_num_files_to_group); - } else if !chunk.is_empty() { - // Just one file, and we don't want to compact it with 'f', so skip it. - files_to_keep.append(chunk.to_vec().as_mut()); - chunk = Vec::with_capacity(max_num_files_to_group); - } - } else { - // This files goes in our draft chunk to compact - chunk_bytes += f.file_size_bytes as usize; - chunk.push(f); - } - } - if !chunk.is_empty() { - assert!(files_to_compact.is_empty(), "we shouldn't accumulate multiple non-contiguous chunks to compact, but we found non-contiguous chunks in compaction job for partition_id={}", partition); + let ordered_files = order_files(files, target_level.prev()); + + let mut chunk_bytes: usize = 0; + let mut chunk: Vec<ParquetFile> = Vec::with_capacity(max_num_files_to_group); + for f in ordered_files { + if !files_to_compact.is_empty() { + // We've already got a batch of files to compact, this can wait. + files_to_keep.push(f); + } else if chunk_bytes + f.file_size_bytes as usize > max_total_file_size_to_group + || chunk.len() + 1 > max_num_files_to_group + || f.file_size_bytes >= skip_size as i64 + { + // This file will not be included in this compaction. + files_to_keep.push(f); if chunk.len() > 1 { - // We need to compact what comes before f + // Several files; we'll do an L0->L0 comapction on them. files_to_compact = chunk.to_vec(); + chunk = Vec::with_capacity(max_num_files_to_group); } else if !chunk.is_empty() { + // Just one file, and we don't want to compact it with 'f', so skip it. files_to_keep.append(chunk.to_vec().as_mut()); + chunk = Vec::with_capacity(max_num_files_to_group); } + } else { + // This files goes in our draft chunk to compact + chunk_bytes += f.file_size_bytes as usize; + chunk.push(f); + } + } + if !chunk.is_empty() { + assert!(files_to_compact.is_empty(), "we shouldn't accumulate multiple non-contiguous chunks to compact, but we found non-contiguous chunks in compaction job for partition_id={}", partition); + if chunk.len() > 1 { + // We need to compact what comes before f + files_to_compact = chunk.to_vec(); + } else if !chunk.is_empty() { + files_to_keep.append(chunk.to_vec().as_mut()); } - - assert!( - chunk.is_empty() || chunk.len() > 1, - "should not have only 1 chunk, for partition {}", - partition - ); - } else { - files_to_compact = files; } + assert!( + chunk.is_empty() || chunk.len() > 1, + "should not have only 1 chunk, for partition {}", + partition + ); + let files_to_make_progress_on = FilesForProgress { upgrade: vec![], split_or_compact: FilesToSplitOrCompact::Compact( diff --git a/compactor/src/components/round_info_source/mod.rs b/compactor/src/components/round_info_source/mod.rs index 7b1e83a871..ad275489da 100644 --- a/compactor/src/components/round_info_source/mod.rs +++ b/compactor/src/components/round_info_source/mod.rs @@ -1,21 +1,24 @@ use std::{ cmp::max, fmt::{Debug, Display}, - sync::Arc, + sync::{Arc, Mutex}, }; use crate::components::{ split_or_compact::start_level_files_to_split::{ - linear_dist_ranges, merge_small_l0_chains, select_split_times, split_into_chains, + linear_dist_ranges, merge_l1_spanned_chains, merge_small_l0_chains, select_split_times, + split_into_chains, }, Components, }; use async_trait::async_trait; -use data_types::{CompactionLevel, FileRange, ParquetFile, Timestamp, TransitionPartitionId}; +use data_types::{CompactionLevel, ParquetFile, Timestamp, TransitionPartitionId}; use itertools::Itertools; use observability_deps::tracing::{debug, info}; -use crate::{error::DynError, PartitionInfo, RoundInfo}; +use crate::{ + error::DynError, round_info::CompactRange, round_info::CompactType, PartitionInfo, RoundInfo, +}; /// Calculates information about what this compaction round does. /// When we get deeper into the compaction decision making, there @@ -28,10 +31,10 @@ pub trait RoundInfoSource: Debug + Display + Send + Sync { async fn calculate( &self, components: Arc<Components>, - last_round_info: Option<RoundInfo>, + last_round_info: Option<Arc<RoundInfo>>, partition_info: &PartitionInfo, files: Vec<ParquetFile>, - ) -> Result<(RoundInfo, Vec<Vec<ParquetFile>>, Vec<ParquetFile>), DynError>; + ) -> Result<(Arc<RoundInfo>, bool), DynError>; } #[derive(Debug)] @@ -56,16 +59,16 @@ impl RoundInfoSource for LoggingRoundInfoWrapper { async fn calculate( &self, components: Arc<Components>, - last_round_info: Option<RoundInfo>, + last_round_info: Option<Arc<RoundInfo>>, partition_info: &PartitionInfo, files: Vec<ParquetFile>, - ) -> Result<(RoundInfo, Vec<Vec<ParquetFile>>, Vec<ParquetFile>), DynError> { + ) -> Result<(Arc<RoundInfo>, bool), DynError> { let res = self .inner .calculate(components, last_round_info, partition_info, files) .await; - if let Ok((round_info, branches, files_later)) = &res { - debug!(round_info_source=%self.inner, %round_info, branches=branches.len(), files_later=files_later.len(), "running round"); + if let Ok((round_info, done)) = &res { + debug!(round_info_source=%self.inner, %round_info, %done, "running round"); } res } @@ -208,197 +211,427 @@ impl LevelBasedRoundInfo { false } - /// vertical_split_handling determines if vertical splitting is necessary, or has already been done. - /// If splitting is necessary, a vec of split times is returned. If a previous split is detected, a - /// vec of CompactionRange is returned to preserve the prior split. - /// The need for more splitting takes precedence over acting on prior splitting. So if a vec of split times - /// is returned, the caller will use those split times in a VerticalSplit RoundInfo for vertical splitting. - /// If only a vec of CompactRanges are returned, the caller will use those to preserve the prior split until - /// all the L0s are compacted to L1. - /// If neither is returned, the caller will identify another type of RoundInfo for this round of compaction. - pub fn vertical_split_handling( + /// consider_vertical_splitting determines if vertical splitting is necessary, and if so, a vec of split times is + /// returned. + pub fn consider_vertical_splitting( &self, partition_id: TransitionPartitionId, files: Vec<ParquetFile>, max_compact_size: usize, - ) -> (Vec<i64>, Vec<FileRange>) { - let (start_level_files, mut target_level_files): (Vec<ParquetFile>, Vec<ParquetFile>) = - files - .into_iter() - .filter(|f| f.compaction_level != CompactionLevel::Final) - .partition(|f| f.compaction_level == CompactionLevel::Initial); + ) -> Vec<i64> { + let file_cnt = files.len(); + + let (start_level_files, target_level_files): (Vec<ParquetFile>, Vec<ParquetFile>) = files + .into_iter() + .filter(|f| f.compaction_level != CompactionLevel::Final) + .partition(|f| f.compaction_level == CompactionLevel::Initial); let len = start_level_files.len(); let mut split_times = Vec::with_capacity(len); - // Break up the start level files into chains of files that overlap each other. - // Then we'll determine if vertical splitting is needed within each chain. - let chains = split_into_chains(start_level_files); - let chains = merge_small_l0_chains(chains, max_compact_size); - let mut ranges = Vec::with_capacity(chains.len()); + let cap: usize = start_level_files + .iter() + .map(|f| f.file_size_bytes as usize) + .sum(); - for chain in &chains { - let chain_cap: usize = chain.iter().map(|f| f.file_size_bytes as usize).sum(); + // TODO: remove this: + if start_level_files.len() > 300 && cap / file_cnt < max_compact_size / 10 { + info!("skipping vertical splitting on partition_id {} for now, due to excessive file count. file count: {}, cap: {} MB", + partition_id, start_level_files.len(), cap/1024/1024); + return vec![]; + } - if chain.len() > 300 && chain_cap / chain.len() < max_compact_size / 10 { - info!("skipping vertical splitting on partition_id {} for now, due to excessive file count. chain length: {}, cap: {} MB", - partition_id, chain.len(), chain_cap/1024/1024); - continue; - } + // A single file over max size can just get upgraded to L1, then L2, unless it overlaps other L0s. + // So multi file filess over the max compact size may need split + if start_level_files.len() > 1 && cap > max_compact_size { + // files in this range are too big to compact in one job, so files will be split it into smaller, more manageable ranges. + // We can't know the data distribution within each file without reading the file (too expensive), but we can + // still learn a lot about the data distribution accross the set of files by assuming even distribtuion within each + // file and considering the distribution of files within the files's time range. + let linear_ranges = linear_dist_ranges( + &start_level_files, + cap, + max_compact_size, + partition_id.clone(), + ); - // A single file over max size can just get upgraded to L1, then L2, unless it overlaps other L0s. - // So multi file chains over the max compact size may need split - if chain.len() > 1 && chain_cap > max_compact_size { - // This chain is too big to compact on its own, so files will be split it into smaller, more manageable chains. - // We can't know the data distribution within each file without reading the file (too expensive), but we can - // still learn a lot about the data distribution accross the set of files by assuming even distribtuion within each - // file and considering the distribution of files within the chain's time range. - let linear_ranges = - linear_dist_ranges(chain, chain_cap, max_compact_size, partition_id.clone()); - - for range in linear_ranges { - // split at every time range of linear distribution. - if !split_times.is_empty() { - split_times.push(range.min - 1); - } + let mut first_range = true; + for range in linear_ranges { + // split at every time range of linear distribution. + if !first_range { + split_times.push(range.min - 1); + } + first_range = false; - // how many start level files are in this range? - let overlaps = chain - .iter() - .filter(|f| { - f.overlaps_time_range( - Timestamp::new(range.min), - Timestamp::new(range.max), - ) - }) - .count(); - - if overlaps > 1 && range.cap > max_compact_size { - // Since we'll be splitting the start level files within this range, it would be nice to align the split times to - // the min/max times of target level files. select_split_times will use the min/max time of target level files - // as hints, and see what lines up to where the range needs split. - let mut split_hints: Vec<i64> = - Vec::with_capacity(range.cap * 2 / max_compact_size + 1); - - // split time is the last time included in the 'left' side of the split. Our goal with these hints is to avoid - // overlaps with L1 files, we'd like the 'left file' to end before this L1 file starts (split=min-1), or it can - // include up to the last ns of the L1 file (split=max). - for f in &target_level_files { - if f.min_time.get() - 1 > range.min && f.min_time.get() < range.max { - split_hints.push(f.min_time.get() - 1); - } - if f.max_time.get() > range.min && f.max_time.get() < range.max { - split_hints.push(f.max_time.get()); - } - } + // how many start level files are in this range? + let overlaps = start_level_files + .iter() + .filter(|f| { + f.overlaps_time_range(Timestamp::new(range.min), Timestamp::new(range.max)) + }) + .count(); - // We may have started splitting files, and now there's a new L0 added that spans our previous splitting. - // We'll detect multiple L0 files ending at the same time, and add that to the split hints. - let end_times = chain - .iter() - .map(|f| f.max_time.get()) - .sorted() - .dedup_with_count(); - for (count, time) in end_times { - if count > 1 { - // wether we previously split here or not, with at least 2 L0s ending here, its a good place to split. - split_hints.push(time); - } + if overlaps > 1 && range.cap > max_compact_size { + // Since we'll be splitting the start level files within this range, it would be nice to align the split times to + // the min/max times of target level files. select_split_times will use the min/max time of target level files + // as hints, and see what lines up to where the range needs split. + let mut split_hints: Vec<i64> = + Vec::with_capacity(range.cap * 2 / max_compact_size + 1); + + // split time is the last time included in the 'left' side of the split. Our goal with these hints is to avoid + // overlaps with L1 files, we'd like the 'left file' to end before this L1 file starts (split=min-1), or it can + // include up to the last ns of the L1 file (split=max). + for f in &target_level_files { + if f.min_time.get() - 1 > range.min && f.min_time.get() < range.max { + split_hints.push(f.min_time.get() - 1); } + if f.max_time.get() > range.min && f.max_time.get() < range.max { + split_hints.push(f.max_time.get()); + } + } - let splits = select_split_times( - range.cap, - max_compact_size, - range.min, - range.max, - split_hints.clone(), - ); - split_times.extend(splits); + // We may have started splitting files, and now there's a new L0 added that spans our previous splitting. + // We'll detect multiple L0 files ending at the same time, and add that to the split hints. + let end_times = start_level_files + .iter() + .map(|f| f.max_time.get()) + .sorted() + .dedup_with_count(); + for (count, time) in end_times { + if count > 1 { + // wether we previously split here or not, with at least 2 L0s ending here, its a good place to split. + split_hints.push(time); + } } + + let splits = select_split_times( + range.cap, + max_compact_size, + range.min, + range.max, + split_hints.clone(), + ); + split_times.extend(splits); } } } - // If we're not doing vertical splitting, while we've got the chains, lets check for a previous vertical split. - // We'll preserve prior splitting activity by creating CompactionRange for each of the previous splits. - let mut prior_max = -1; - if chains.len() > 1 { - let mut prior_overlapping_max = Timestamp::new(0); - let mut prior_chain_max: i64 = 0; - let mut overlaps: Vec<ParquetFile>; - let mut adding_ranges = true; - - for chain in &chains { - let mut min = chain.iter().map(|f| f.min_time).min().unwrap(); - - let max = chain.iter().map(|f| f.max_time).max().unwrap(); - assert!(min.get() > prior_max); - prior_max = max.get(); - - if min <= prior_overlapping_max && prior_overlapping_max != Timestamp::new(0) { - // Target level files overlap more than one start level file, and there is a target level file overlapping - // the prior chain of L0s and this one. We'll split the target level file at the pror range/chain max before - // proceeding with compactions. - split_times.push(prior_chain_max); - adding_ranges = false; + split_times.sort(); + split_times.dedup(); + split_times + } + + // derive_draft_ranges takes a last round info option and a vec of files - one of them must be populated. + // From this, we'll get a draft of CompactRanges for the current round of compaction. Its a draft because + // it partially set up, having only the files, min, max, and cap set. The op, branches, and files_for_later + // will be determined shortly. + // We split up into several ranges to keep the L0->L1 compaction simple (the overlaps allowed in L0 make it messy). + // But we don't want to create artificial divisions in L2, so L2's get set aside until we've consolidated to + // a single CompactRange. + fn derive_draft_ranges( + &self, + partition_info: &PartitionInfo, + last_round_info: Option<Arc<RoundInfo>>, + files: Vec<ParquetFile>, + ) -> (Vec<CompactRange>, Option<Vec<ParquetFile>>) { + // We require exactly 1 source of information: either 'files' because this is the first round, or 'last_round_info' from the prior round. + if let Some(last_round_info) = last_round_info { + assert!( + files.is_empty(), + "last_round_info and files must not both be populated" + ); + self.evaluate_prior_ranges(partition_info, last_round_info) + } else { + assert!( + !files.is_empty(), + "last_round_info and files must not both be empty" + ); + // This is the first round, so no prior round info. + // We'll take a look at 'files' and see what we can do. + self.split_files_into_ranges(files) + } + } + + // evaluate_prior_ranges is a helper function for derive_draft_ranges, used when there is prior round info. + // It takes the prior round's ranges, and splits them if they did vertical splitting, or combines them if + // they finished compacting their L0s. + fn evaluate_prior_ranges( + &self, + partition_info: &PartitionInfo, + last_round_info: Arc<RoundInfo>, + ) -> (Vec<CompactRange>, Option<Vec<ParquetFile>>) { + // We'll start with the ranges from the prior round. + let mut ranges = Vec::with_capacity(last_round_info.ranges.len()); + + // As we iterate through the last_round_info's ranges, we'll try to consolidate ranges for any that don't have L0s. + let mut prior_range: Option<CompactRange> = None; + + for range in &last_round_info.ranges { + // The prior round should have handled its `files_for_now`, so that should `None`. + // What the prior round considered `files_for_later` will now become `files_for_now`. + assert!( + range.files_for_now.lock().unwrap().is_none(), + "files_for_now should be empty for range {}->{} on partition {}", + range.min, + range.max, + partition_info.partition_id() + ); + assert!( + range.branches.lock().unwrap().is_none(), + "branches should be empty for range {}->{} on partition {}", + range.min, + range.max, + partition_info.partition_id() + ); + + let files_for_now = range.files_for_later.lock().unwrap().take(); + assert!( + files_for_now.is_some(), + "files_for_later should not be None for range {}->{} on partition {}", + range.min, + range.max, + partition_info.partition_id() + ); + let mut files_for_now = files_for_now.unwrap(); + assert!( + !files_for_now.is_empty(), + "files_for_later should not be empty for range {}->{} on partition {}", + range.min, + range.max, + partition_info.partition_id() + ); + + if let Some(split_times) = range.op.split_times() { + // In the prior round, this range did vertical splitting. Those split times now divide this range into several ranges. + + if prior_range.is_some() { + ranges.push(prior_range.unwrap()); + prior_range = None; } - // As we identify overlaps, we'll include some don't quite overlap, but are between the prior chain and this one. - // By including them here, we preserve the opportunity to grow small L1s in a "gappy" leading edge pattern. - // If they're large, they'll be excluded from the L0->L1 compaction, so there's no harm including them. - let search_min = - (prior_overlapping_max + 1).max(Timestamp::new(prior_chain_max + 1)); - (overlaps, target_level_files) = target_level_files.into_iter().partition(|f2| { - f2.overlaps_time_range(search_min, max) - && f2.compaction_level != CompactionLevel::Final - }); - let l0cap: usize = chain - .iter() - .map(|f| f.file_size_bytes as usize) - .sum::<usize>(); - let cap = l0cap - + overlaps + let mut split_ranges = Vec::with_capacity(split_times.len()); + let mut max = range.max; + + for split_time in split_times.into_iter().rev() { + // By iterating in reverse, everything above the split time is in this split + let this_split_files_for_now: Vec<ParquetFile>; + (this_split_files_for_now, files_for_now) = files_for_now + .into_iter() + .partition(|f| f.max_time.get() > split_time); + let cap = this_split_files_for_now .iter() .map(|f| f.file_size_bytes as usize) .sum::<usize>(); - if !overlaps.is_empty() { - prior_overlapping_max = overlaps.iter().map(|f| f.max_time).max().unwrap(); - let prior_smallest_max = overlaps.iter().map(|f| f.max_time).min().unwrap(); - if prior_smallest_max < min { - // Expand the range to include this file, so it can be included (if its small and we'd like to grow it). - min = prior_smallest_max; - } + let this_split_files_for_now = if this_split_files_for_now.is_empty() { + None + } else { + Some(this_split_files_for_now.clone()) + }; + + split_ranges.insert( + 0, + CompactRange { + op: CompactType::Deferred {}, + min: split_time + 1, + max, + cap, + has_l0s: true, + files_for_now: Mutex::new(this_split_files_for_now), + branches: Mutex::new(None), + files_for_later: Mutex::new(None), + }, + ); + + // split_time is the highest time in the 'left' file, so that will be max time for the next range. + max = split_time; } - // To avoid illegal max_l0_created_at ordering issues, we can only compact ranges from the left. - // The first ineligible (too big) range will make us quit. - if adding_ranges && l0cap <= max_compact_size { - ranges.push(FileRange { - min: min.get(), - max: max.get(), - cap, - }); + if !files_for_now.is_empty() { + let cap = files_for_now + .iter() + .map(|f| f.file_size_bytes as usize) + .sum::<usize>(); + let files_for_now = Some(files_for_now.clone()); + + split_ranges.insert( + 0, + CompactRange { + op: CompactType::Deferred {}, + min: range.min, + max, + cap, + has_l0s: true, + files_for_now: Mutex::new(files_for_now), + branches: Mutex::new(None), + files_for_later: Mutex::new(None), + }, + ); + } + + ranges.append(&mut split_ranges); + } else { + // Carry forward the prior range + let has_l0s = files_for_now + .iter() + .any(|f| f.compaction_level == CompactionLevel::Initial); + + if prior_range.is_some() && (!prior_range.as_mut().unwrap().has_l0s || !has_l0s) { + // This and the prior range don't both have L0s; we can consolidate. + let prior = prior_range.as_mut().unwrap(); + prior.max = range.max; + prior.cap += range.cap; + prior.has_l0s = prior.has_l0s || has_l0s; + prior.add_files_for_now(files_for_now); } else { - adding_ranges = false; + if let Some(prior_range) = prior_range { + // we'll not be consolidating with with the prior range, so push it + ranges.push(prior_range); + } + + let files_for_now = if files_for_now.is_empty() { + None + } else { + Some(files_for_now.clone()) + }; + let this_range = CompactRange { + op: range.op.clone(), + min: range.min, + max: range.max, + cap: range.cap, + has_l0s, + files_for_now: Mutex::new(files_for_now), + branches: Mutex::new(None), + files_for_later: Mutex::new(None), + }; + prior_range = Some(this_range); + }; + } + } + if let Some(prior_range) = prior_range { + ranges.push(prior_range); + } + + // If we still have several ranges, L2s (if any) need to stay in round_info.files_for_later. If we have 1 range + // without L0s, the L2s can go in that range. + let mut deferred_l2s = last_round_info.take_l2_files_for_later(); + if ranges.len() == 1 && !ranges[0].has_l0s && deferred_l2s.is_some() { + ranges[0].add_files_for_now(deferred_l2s.unwrap()); + deferred_l2s = None; + } + (ranges, deferred_l2s) + } + + // split_files_into_ranges is a helper function for derive_draft_ranges, used when there is no prior round info. + // Its given the files found in the catalog, and puts them into range(s). + fn split_files_into_ranges( + &self, + files: Vec<ParquetFile>, + ) -> (Vec<CompactRange>, Option<Vec<ParquetFile>>) { + let (l0_files, other_files): (Vec<ParquetFile>, Vec<ParquetFile>) = files + .into_iter() + .partition(|f| f.compaction_level == CompactionLevel::Initial); + if !l0_files.is_empty() { + // We'll get all the L0 files compacted to L1 before dealing with L2 files - so separate them. + let (l2_files_for_later, mut l1_files): (Vec<ParquetFile>, Vec<ParquetFile>) = + other_files + .into_iter() + .partition(|f| f.compaction_level == CompactionLevel::Final); + + // Break up the start level files into chains of files that overlap each other. + // Then we'll determine if vertical splitting is needed within each chain. + let chains = split_into_chains(l0_files); + + // This function is detecting what ranges we already have, not identifying splitting to make ranges we want. + // So we may have to combine some chains based on L1s overlapping. + let chains = merge_l1_spanned_chains(chains, &l1_files); + + // the goal is nice bite sized chains. If some are very small, merge them with their neighbor(s). + let chains = merge_small_l0_chains(chains, self.max_total_file_size_per_plan); + + let mut ranges = Vec::with_capacity(chains.len()); + let mut this_split: Vec<ParquetFile>; + + for mut chain in chains { + let mut max = chain.iter().map(|f| f.max_time).max().unwrap().get(); + + // 'chain' is the L0s that will become a region. We also need the L1s and L2s that belong in this region. + + (this_split, l1_files) = + l1_files.into_iter().partition(|f| f.min_time.get() <= max); + + if !this_split.is_empty() { + max = max.max(this_split.iter().map(|f| f.max_time).max().unwrap().get()); } - prior_chain_max = max.get(); + this_split.append(&mut chain); + let min = this_split.iter().map(|f| f.min_time).min().unwrap().get(); + let cap = this_split + .iter() + .map(|f| f.file_size_bytes as usize) + .sum::<usize>(); + + ranges.push(CompactRange { + op: CompactType::Deferred {}, + min, + max, + cap, + has_l0s: true, + files_for_now: Mutex::new(Some(this_split)), + branches: Mutex::new(None), + files_for_later: Mutex::new(None), + }); } - // If this function returns both split times and ranges, the split times take precedence. But if we're highly backlogged, - // it is preferable to compact some ranges down to L1 as they become available, rather that splitting a potentially huge backlog - // before we compact anything. So if we've got a few ranges eligible for compaction, we'll start them with them, and may - // do more vertical splitting later. - if ranges.len() >= 10 { - // There's enough ranges to work on, discard the split times so we compact the ranges. - split_times = vec![]; + this_split = l1_files; + if !this_split.is_empty() { + let min = this_split.iter().map(|f| f.min_time).min().unwrap().get(); + let max = this_split.iter().map(|f| f.max_time).max().unwrap().get(); + let cap = this_split + .iter() + .map(|f| f.file_size_bytes as usize) + .sum::<usize>(); + + ranges.push(CompactRange { + op: CompactType::Deferred {}, + min, + max, + cap, + has_l0s: true, + files_for_now: Mutex::new(Some(this_split)), + branches: Mutex::new(None), + files_for_later: Mutex::new(None), + }); } - } - split_times.sort(); - split_times.dedup(); - (split_times, ranges) + let l2_files_for_later = if l2_files_for_later.is_empty() { + None + } else { + Some(l2_files_for_later) + }; + (ranges, l2_files_for_later) + } else { + // No start level files, we can put everything in one range. + let min = other_files.iter().map(|f| f.min_time).min().unwrap().get(); + let max = other_files.iter().map(|f| f.max_time).max().unwrap().get(); + let cap = other_files + .iter() + .map(|f| f.file_size_bytes as usize) + .sum::<usize>(); + ( + vec![CompactRange { + op: CompactType::Deferred {}, + min, + max, + cap, + has_l0s: false, + files_for_now: Mutex::new(Some(other_files)), + branches: Mutex::new(None), + files_for_later: Mutex::new(None), + }], + None, + ) + } } } @@ -409,152 +642,127 @@ impl RoundInfoSource for LevelBasedRoundInfo { async fn calculate( &self, components: Arc<Components>, - last_round_info: Option<RoundInfo>, + last_round_info: Option<Arc<RoundInfo>>, partition_info: &PartitionInfo, files: Vec<ParquetFile>, - ) -> Result<(RoundInfo, Vec<Vec<ParquetFile>>, Vec<ParquetFile>), DynError> { - let mut ranges: Vec<FileRange> = vec![]; - - if let Some(last_round_info) = last_round_info { - if let Some(last_ranges) = last_round_info.ranges() { - // Last round had L0 CompactRange. If we have unfinished business from that, - // we need to continue with those ranges. - for range in last_ranges { - // If this range still has overapping L0 files, we need to keep it. - for f in &files { - if f.compaction_level == CompactionLevel::Initial - && f.overlaps_ranges(&vec![range]) - { - ranges.push(range); - break; - } - } - } - } - } - - // start_level is usually the lowest level we have files in, but occasionally we decide to - // compact L1->L2 when L0s still exist. If this comes back as L1, we'll ignore L0s for this - // round and force an early L1-L2 compaction. - let start_level = get_start_level( - &files, - self.max_num_files_per_plan, - self.max_total_file_size_per_plan, - partition_info.partition_id(), - ); - - let round_info = if !ranges.is_empty() { - RoundInfo::CompactRanges { - ranges, - max_num_files_to_group: self.max_num_files_per_plan, - max_total_file_size_to_group: self.max_total_file_size_per_plan, - } - } else if start_level == CompactionLevel::Initial { - let (split_times, ranges) = self.vertical_split_handling( - partition_info.partition_id(), - files.clone().to_vec(), - self.max_total_file_size_per_plan, + ) -> Result<(Arc<RoundInfo>, bool), DynError> { + // Step 1: Establish range boundaries, with files in each range. + let (prior_ranges, mut l2_files_for_later) = + self.derive_draft_ranges(partition_info, last_round_info, files); + + let range_cnt = prior_ranges.len(); + + // Step 2: Determine the op for each range. + let mut ranges: Vec<CompactRange> = Vec::with_capacity(range_cnt); + for mut range in prior_ranges { + let files_for_now = range.files_for_now.lock().unwrap().take(); + assert!( + files_for_now.is_some(), + "files_for_now should not be None for range {}->{} on partition {}", + range.min, + range.max, + partition_info.partition_id() ); + let files_for_now = files_for_now.unwrap(); + + // If we're down to a single range, we should check if we're done. + if range_cnt == 1 + && !components + .partition_filter + .apply(partition_info, &files_for_now) + .await? + { + return Ok(( + Arc::new(RoundInfo { + ranges, + l2_files_for_later: Mutex::new(l2_files_for_later), + }), + true, + )); + } - if !split_times.is_empty() { - RoundInfo::VerticalSplit { split_times } - } else if !ranges.is_empty() { - RoundInfo::CompactRanges { - ranges, - max_num_files_to_group: self.max_num_files_per_plan, - max_total_file_size_to_group: self.max_total_file_size_per_plan, + range.has_l0s = files_for_now + .iter() + .any(|f| f.compaction_level == CompactionLevel::Initial); + + if range.has_l0s { + let split_times = self.consider_vertical_splitting( + partition_info.partition_id(), + files_for_now.clone().to_vec(), + self.max_total_file_size_per_plan, + ); + + if !split_times.is_empty() { + range.op = CompactType::VerticalSplit { split_times }; + } else if self + .too_many_small_files_to_compact(&files_for_now, CompactionLevel::Initial) + { + range.op = CompactType::ManySmallFiles { + start_level: CompactionLevel::Initial, + max_num_files_to_group: self.max_num_files_per_plan, + max_total_file_size_to_group: self.max_total_file_size_per_plan, + }; + } else { + range.op = CompactType::TargetLevel { + target_level: CompactionLevel::FileNonOverlapped, + max_total_file_size_to_group: self.max_total_file_size_per_plan, + }; } - } else if self.too_many_small_files_to_compact(&files, start_level) { - RoundInfo::ManySmallFiles { - start_level, - max_num_files_to_group: self.max_num_files_per_plan, + } else if range_cnt == 1 { + range.op = CompactType::TargetLevel { + target_level: CompactionLevel::Final, max_total_file_size_to_group: self.max_total_file_size_per_plan, - } + }; } else { - RoundInfo::TargetLevel { - target_level: CompactionLevel::FileNonOverlapped, - max_total_file_size_to_group: self.max_total_file_size_per_plan, - } - } - } else { - let target_level = start_level.next(); - RoundInfo::TargetLevel { - target_level, - max_total_file_size_to_group: self.max_total_file_size_per_plan, - } - }; - - let (files_now, mut files_later) = - components - .round_split - .split(files, round_info.clone(), partition_info.partition_id()); + // The L0s of this range are compacted, but this range needs to hang out a while until its neighbors catch up. + range.op = CompactType::Deferred {}; + }; - let (branches, more_for_later) = components.divide_initial.divide( - files_now, - round_info.clone(), - partition_info.partition_id(), - ); - files_later.extend(more_for_later); - - Ok((round_info, branches, files_later)) - } -} - -// get_start_level decides what level to start compaction from. Often this is the lowest level -// we have ParquetFiles in, but occasionally we decide to compact L1->L2 when L0s still exist. -// -// If we ignore the invariants (where intra-level overlaps are allowed), this would be a math problem -// to optimize write amplification. -// -// However, allowing intra-level overlaps in L0 but not L1/L2 adds extra challenge to compacting L0s to L1. -// This is especially true when there are large quantitites of overlapping L0s and L1s, potentially resulting -// in many split/compact cycles to resolve the overlaps. -// -// Since L1 & L2 only have inter-level overlaps, they can be compacted with just a few splits to align the L1s -// with the L2s. The relative ease of moving data from L1 to L2 provides additional motivation to compact the -// L1s to L2s when a backlog of L0s exist. The easily solvable L1->L2 compaction can give us a clean slate in -// L1, greatly simplifying the remaining L0->L1 compactions. -fn get_start_level( - files: &[ParquetFile], - max_files: usize, - max_bytes: usize, - partition: TransitionPartitionId, -) -> CompactionLevel { - // panic if the files are empty - assert!( - !files.is_empty(), - "files should not be empty, partition_id={}", - partition - ); - - let mut l0_cnt: usize = 0; - let mut l0_bytes: usize = 0; - let mut l1_bytes: usize = 0; - - for f in files { - match f.compaction_level { - CompactionLevel::Initial => { - l0_cnt += 1; - l0_bytes += f.file_size_bytes as usize; - } - CompactionLevel::FileNonOverlapped => { - l1_bytes += f.file_size_bytes as usize; + if range.op.is_deferred() { + range.add_files_for_later(files_for_now); + ranges.push(range); + } else { + // start_level is usually the lowest level we have files in, but occasionally we decide to + // compact L1->L2 when L0s still exist. If this comes back as L1, we'll ignore L0s for this + // round and force an early L1-L2 compaction. + let (files_for_now, mut files_later) = components.round_split.split( + files_for_now, + range.op.clone(), + partition_info.partition_id(), + ); + + let (branches, more_for_later) = components.divide_initial.divide( + files_for_now, + range.op.clone(), + partition_info.partition_id(), + ); + + files_later.extend(more_for_later); + + if !branches.is_empty() { + range.branches = Mutex::new(Some(branches)); + } // else, leave it None, since Some is assumed to be non-empty. + + if !files_later.is_empty() { + range.files_for_later = Mutex::new(Some(files_later)); + } // else, leave it None, since Some is assumed to be non-empty. + ranges.push(range); } - _ => {} } - } - if l1_bytes > 3 * max_bytes && (l0_cnt > max_files || l0_bytes > max_bytes) { - // L1 is big enough to pose an overlap challenge compacting from L0, and there is quite a bit more coming from L0. - // The criteria for this early L1->L2 compaction significanly impacts write amplification. The above values optimize - // existing test cases, but may be changed as additional test cases are added. - CompactionLevel::FileNonOverlapped - } else if l0_bytes > 0 { - CompactionLevel::Initial - } else if l1_bytes > 0 { - CompactionLevel::FileNonOverlapped - } else { - CompactionLevel::Final + if ranges.len() == 1 && !ranges[0].has_l0s && l2_files_for_later.is_some() { + // Single range without L0s, its time to work on the L2s. + ranges[0].add_files_for_now(l2_files_for_later.unwrap()); + l2_files_for_later = None; + } + + Ok(( + Arc::new(RoundInfo { + ranges, + l2_files_for_later: Mutex::new(l2_files_for_later), + }), + false, + )) } } diff --git a/compactor/src/components/round_split/many_files.rs b/compactor/src/components/round_split/many_files.rs index cd72ad27f3..921f3474cc 100644 --- a/compactor/src/components/round_split/many_files.rs +++ b/compactor/src/components/round_split/many_files.rs @@ -2,7 +2,7 @@ use std::fmt::Display; use data_types::{CompactionLevel, ParquetFile, TransitionPartitionId}; -use crate::RoundInfo; +use crate::round_info::CompactType; use super::RoundSplit; @@ -28,12 +28,12 @@ impl RoundSplit for ManyFilesRoundSplit { fn split( &self, files: Vec<ParquetFile>, - round_info: RoundInfo, + op: CompactType, partition: TransitionPartitionId, ) -> (Vec<ParquetFile>, Vec<ParquetFile>) { // Scpecify specific arms to avoid missing any new variants - match round_info { - RoundInfo::ManySmallFiles { start_level, .. } => { + match op { + CompactType::ManySmallFiles { start_level, .. } => { // Split start_level from the rest let (start_level_files, rest) = files .into_iter() @@ -43,7 +43,7 @@ impl RoundSplit for ManyFilesRoundSplit { // A TargetLevel round only needs its start (source) and target (destination) levels. // All other files are a distraction that should wait for another round. - RoundInfo::TargetLevel { target_level, .. } => { + CompactType::TargetLevel { target_level, .. } => { // Split start_level & target level from the rest let start_level = target_level.prev(); let (start_files, rest) = files.into_iter().partition(|f| { @@ -54,7 +54,7 @@ impl RoundSplit for ManyFilesRoundSplit { (start_files, rest) } - RoundInfo::SimulatedLeadingEdge { .. } => { + CompactType::SimulatedLeadingEdge { .. } => { // Split first two levels from the rest let (start_files, rest) = files.into_iter().partition(|f| { f.compaction_level == CompactionLevel::Initial @@ -64,7 +64,7 @@ impl RoundSplit for ManyFilesRoundSplit { (start_files, rest) } - RoundInfo::VerticalSplit { split_times } => { + CompactType::VerticalSplit { split_times } => { // We're splitting L0 files at split_times. So any L0 that overlaps a split_time needs processed, and all other files are ignored until later. let (split_files, rest): (Vec<ParquetFile>, Vec<ParquetFile>) = files.into_iter().partition(|f| { @@ -79,15 +79,9 @@ impl RoundSplit for ManyFilesRoundSplit { (split_files, rest) } - RoundInfo::CompactRanges { ranges, .. } => { - // We're compacting L0 & L1s in the specified ranges. Files outside these ranges are - // ignored until a later round. - let (compact_files, rest): (Vec<ParquetFile>, Vec<ParquetFile>) = - files.into_iter().partition(|f| { - f.compaction_level != CompactionLevel::Final && f.overlaps_ranges(&ranges) - }); - - (compact_files, rest) + CompactType::Deferred { .. } => { + // Nothing now, its all for later + (vec![], files) } } } @@ -98,8 +92,6 @@ mod tests { use data_types::{CompactionLevel, PartitionId}; use iox_tests::ParquetFileBuilder; - use crate::RoundInfo; - use super::*; #[test] @@ -109,7 +101,7 @@ mod tests { #[test] fn test_split_many_files() { - let round_info = RoundInfo::ManySmallFiles { + let op = CompactType::ManySmallFiles { start_level: CompactionLevel::Initial, max_num_files_to_group: 2, max_total_file_size_to_group: 100, @@ -119,7 +111,7 @@ mod tests { // empty input assert_eq!( - split.split(vec![], round_info.clone(), default_partition.clone()), + split.split(vec![], op.clone(), default_partition.clone()), (vec![], vec![]) ); @@ -133,7 +125,7 @@ mod tests { assert_eq!( split.split( vec![f1.clone(), f2.clone()], - round_info.clone(), + op.clone(), default_partition.clone() ), (vec![f1.clone(), f2.clone()], vec![]) @@ -149,7 +141,7 @@ mod tests { assert_eq!( split.split( vec![f1.clone(), f2.clone(), f3.clone(), f4.clone()], - round_info.clone(), + op.clone(), default_partition, ), (vec![f1, f2], vec![f3, f4]) @@ -158,7 +150,7 @@ mod tests { #[test] fn test_split_target_level() { - let round_info = RoundInfo::TargetLevel { + let op = CompactType::TargetLevel { target_level: CompactionLevel::Final, max_total_file_size_to_group: 100 * 1024 * 1024, }; @@ -167,7 +159,7 @@ mod tests { // empty input assert_eq!( - split.split(vec![], round_info.clone(), default_partition.clone()), + split.split(vec![], op.clone(), default_partition.clone()), (vec![], vec![]) ); @@ -175,11 +167,7 @@ mod tests { let f1 = ParquetFileBuilder::new(1).build(); let f2 = ParquetFileBuilder::new(2).build(); assert_eq!( - split.split( - vec![f1.clone(), f2.clone()], - round_info.clone(), - default_partition - ), + split.split(vec![f1.clone(), f2.clone()], op.clone(), default_partition), (vec![f1, f2], vec![]) ); } diff --git a/compactor/src/components/round_split/mod.rs b/compactor/src/components/round_split/mod.rs index e4bdafc43e..fafa50bcd8 100644 --- a/compactor/src/components/round_split/mod.rs +++ b/compactor/src/components/round_split/mod.rs @@ -2,8 +2,7 @@ use std::fmt::{Debug, Display}; use data_types::{ParquetFile, TransitionPartitionId}; -use crate::RoundInfo; - +use crate::round_info::CompactType; pub mod many_files; pub trait RoundSplit: Debug + Display + Send + Sync { @@ -16,7 +15,7 @@ pub trait RoundSplit: Debug + Display + Send + Sync { fn split( &self, files: Vec<ParquetFile>, - round_info: RoundInfo, + op: CompactType, partition: TransitionPartitionId, ) -> (Vec<ParquetFile>, Vec<ParquetFile>); } diff --git a/compactor/src/components/split_or_compact/start_level_files_to_split.rs b/compactor/src/components/split_or_compact/start_level_files_to_split.rs index 7357cf509c..820f3a04cf 100644 --- a/compactor/src/components/split_or_compact/start_level_files_to_split.rs +++ b/compactor/src/components/split_or_compact/start_level_files_to_split.rs @@ -233,6 +233,19 @@ pub fn linear_dist_ranges( // Our hypothetical regions are either linearly distributed, or small enough that the capacity spikes are still under max compact size. // Now we can attempt to consoliate regions of similar data density (or consolidate dissimilar regions up to max compact size). let mut ranges = Vec::with_capacity(split_count); + + if split_count as i64 >= max_time - min_time { + // odd boundary case - we have very dense data. Make a region per ns. + for t in min_time..max_time + 1 { + ranges.push(FileRange { + min: t, + max: t, + cap: cap / (max_time - min_time + 1) as usize, + }); + } + return ranges; + } + let mut consolidated_min_time: i64 = 0; let mut consolidated_max_time: i64 = 0; let mut consolidated_total_cap: usize = 0; @@ -431,6 +444,47 @@ pub fn merge_small_l0_chains( merged_chains } +// merge_l1_spanned_chains takes a vec of L0 chains, and a vec of L1 files. Any pair of L0 chains that is +// spanned by a single L1, are merged. +// This is used when detecting previous vertical splitting, when we're deriving regions for the first time. +// While compacting L0s to L1s, we must consider the existing L1s, so we don't create new L1s that overlap the +// old L1s (an invariant violation). So if two chains of L0s don't overlap each other, but both overlap the +// same L1, we must consider them a single region. Likely the first step will be split the L1 allowing them +// to be separate regions, but for the initial region detection, we must consider them a single region. +// That's what this function does. Any L0 chains that overlap the same L1 are merged - regardless of their size. +pub fn merge_l1_spanned_chains( + mut chains: Vec<Vec<ParquetFile>>, + l1s: &[ParquetFile], +) -> Vec<Vec<ParquetFile>> { + chains.sort_by_key(|chain| chain[0].min_time); + + let mut merged_chains: Vec<Vec<ParquetFile>> = Vec::with_capacity(chains.len()); + let mut prior_chain_idx: i32 = -1; + let mut prior_l1_max_time: Option<Timestamp> = None; + for chain in &chains { + let this_chain_min = chain.iter().map(|f| f.min_time).min().unwrap(); + let this_chain_max = chain.iter().map(|f| f.max_time).max().unwrap(); + + if prior_l1_max_time.is_some() && prior_l1_max_time.unwrap() >= this_chain_min { + // this chain is overlapped by an L1 that spans the prior chain and this one. Merge them. + merged_chains[prior_chain_idx as usize].append(&mut chain.clone()); + } else { + merged_chains.push(chain.to_vec()); + prior_chain_idx += 1; + } + + // If an L1 overlaps chain, and extends beyond it, we need to note it. Since L1s cannot overlap, there will be + // at most one L1 that overlaps the end of this chain. + prior_l1_max_time = l1s + .iter() + .filter(|l1| l1.min_time <= this_chain_max && l1.max_time > this_chain_max) + .map(|l1| l1.max_time) + .max(); + } + + merged_chains +} + // get_max_l0_created_at gets the highest max_l0_created_at from all files within a vec. fn get_max_l0_created_at(files: Vec<ParquetFile>) -> Timestamp { files diff --git a/compactor/src/driver.rs b/compactor/src/driver.rs index 961fc7a0bd..de9f302929 100644 --- a/compactor/src/driver.rs +++ b/compactor/src/driver.rs @@ -23,6 +23,7 @@ use crate::{ error::{DynError, ErrorKind, ErrorKindExt, SimpleError}, file_classification::{FileClassification, FilesForProgress}, partition_info::PartitionInfo, + round_info::CompactType, PlanIR, RoundInfo, }; @@ -221,35 +222,34 @@ async fn try_compact_partition( let mut files = components.partition_files_source.fetch(partition_id).await; let partition_info = components.partition_info_source.fetch(partition_id).await?; let transmit_progress_signal = Arc::new(transmit_progress_signal); - let mut last_round_info: Option<RoundInfo> = None; + let mut last_round_info: Option<Arc<RoundInfo>> = None; - // loop for each "Round", consider each file in the partition - // for partitions with a lot of compaction work to do, keeping the work divided into multiple rounds, - // with mutliple calls to execute_branch is important to frequently clean the scratchpad and prevent - // high memory use. - loop { - let round_span = span.child("round"); + if files.is_empty() { + // This should be unreachable, but can happen when someone is manually activiting partitions for compaction. + info!( + partition_id = partition_info.partition_id.get(), + "that's odd - no files to compact in partition" + ); + return Ok(()); + } - if files.is_empty() { - // This should be unreachable, but can happen when someone is manually activiting partitions for compaction. - info!( - partition_id = partition_info.partition_id.get(), - "that's odd - no files to compact in partition" - ); - return Ok(()); - } + // This is the stop condition which will be different for different version of compaction + // and describe where the filter is created at version_specific_partition_filters function + if !components + .partition_filter + .apply(&partition_info, &files) + .await? + { + return Ok(()); + } - // This is the stop condition which will be different for different version of compaction - // and describe where the filter is created at version_specific_partition_filters function - if !components - .partition_filter - .apply(&partition_info, &files) - .await? - { - return Ok(()); - } + // loop for each "Round". A round is comprised of the next thing we can do, on one or more branches within + // one or more CompactRegions. A round does not feed back into itself. So when split|compaction output feeds + // into another split|compaction, that's a new round. + loop { + let round_span = span.child("round"); - let (round_info, branches, files_later) = components + let (round_info, done) = components .round_info_source .calculate( Arc::<Components>::clone(&components), @@ -259,49 +259,90 @@ async fn try_compact_partition( ) .await?; - files = files_later; + files = vec![]; + + if done || round_info.ranges.is_empty() { + return Ok(()); + } info!( partition_id = partition_info.partition_id.get(), - branch_count = branches.len(), - concurrency_limit = df_semaphore.total_permits(), - "compacting branches concurrently", + range_count = round_info.ranges.len(), + "compacting ranges", ); - // concurrently run the branches. - let branches_output: Vec<Vec<ParquetFile>> = stream::iter(branches.into_iter()) - .map(|branch| { - let partition_info = Arc::clone(&partition_info); - let components = Arc::clone(&components); - let df_semaphore = Arc::clone(&df_semaphore); - let transmit_progress_signal = Arc::clone(&transmit_progress_signal); - let scratchpad = Arc::clone(&scratchpad_ctx); - let job = job.clone(); - let branch_span = round_span.child("branch"); - let round_info = round_info.clone(); - let gossip_handle = gossip_handle.clone(); - - async move { - execute_branch( - branch_span, - job, - branch, - df_semaphore, - components, - scratchpad, - partition_info, - round_info, - transmit_progress_signal, - gossip_handle, - ) - .await - } - }) - .buffer_unordered(df_semaphore.total_permits()) - .try_collect() - .await?; + // TODO: consider adding concurrency on the ranges + for range in &round_info.ranges { + // For each range, we'll consume branches from the range_info and put the output into files_for_later in the range_info. + let branches = range.branches.lock().unwrap().take(); + let branches_cnt = branches.as_ref().map(|v| v.len()).unwrap_or(0); - files.extend(branches_output.into_iter().flatten()); + info!( + partition_id = partition_info.partition_id.get(), + op = range.op.to_string(), + min = range.min, + max = range.max, + cap = range.cap, + branch_count = branches_cnt, + concurrency_limit = df_semaphore.total_permits(), + "compacting branches concurrently", + ); + + if branches.is_none() { + continue; + } + + // concurrently run the branches. + let branches_output: Vec<Vec<ParquetFile>> = + stream::iter(branches.unwrap().into_iter()) + .map(|branch| { + let partition_info = Arc::clone(&partition_info); + let components = Arc::clone(&components); + let df_semaphore = Arc::clone(&df_semaphore); + let transmit_progress_signal = Arc::clone(&transmit_progress_signal); + let scratchpad = Arc::clone(&scratchpad_ctx); + let job = job.clone(); + let branch_span = round_span.child("branch"); + let gossip_handle = gossip_handle.clone(); + let op = range.op.clone(); + + async move { + execute_branch( + branch_span, + job, + branch, + df_semaphore, + components, + scratchpad, + partition_info, + op, + transmit_progress_signal, + gossip_handle, + ) + .await + } + }) + .buffer_unordered(df_semaphore.total_permits()) + .try_collect() + .await?; + + // The branches for this range are done, their output needs added to this range's files_for_later. + let branches_output: Vec<ParquetFile> = branches_output.into_iter().flatten().collect(); + + let mut files_for_later = range + .files_for_later + .lock() + .unwrap() + .take() + .unwrap_or(Vec::new()); + + files_for_later.extend(branches_output); + range + .files_for_later + .lock() + .unwrap() + .replace(files_for_later); + } last_round_info = Some(round_info); } } @@ -316,7 +357,7 @@ async fn execute_branch( components: Arc<Components>, scratchpad_ctx: Arc<dyn Scratchpad>, partition_info: Arc<PartitionInfo>, - round_info: RoundInfo, + op: CompactType, transmit_progress_signal: Arc<Sender<bool>>, gossip_handle: Option<Arc<CompactionEventTx>>, ) -> Result<Vec<ParquetFile>, DynError> { @@ -336,7 +377,7 @@ async fn execute_branch( files_to_keep, } = components .file_classifier - .classify(&partition_info, &round_info, branch); + .classify(&partition_info, &op, branch); // Evaluate whether there's work to do or not based on the files classified for // making progress on. If there's no work to do, return early. diff --git a/compactor/src/file_classification.rs b/compactor/src/file_classification.rs index 38e5c4a67a..dce0e806dc 100644 --- a/compactor/src/file_classification.rs +++ b/compactor/src/file_classification.rs @@ -88,6 +88,7 @@ pub enum FilesToSplitOrCompact { pub enum NoneReason { NoInputFiles, NoFilesToSplitFound, + Deferred, } /// Reasons why there are files to split diff --git a/compactor/src/round_info.rs b/compactor/src/round_info.rs index 51b69da86a..cc62251a81 100644 --- a/compactor/src/round_info.rs +++ b/compactor/src/round_info.rs @@ -1,13 +1,15 @@ //! Information about the current compaction round -use std::fmt::Display; +use std::{fmt::Display, sync::Mutex}; -use data_types::{CompactionLevel, FileRange}; +use data_types::{CompactionLevel, ParquetFile}; /// Information about the current compaction round (see driver.rs for /// more details about a round) + +/// FileRange describes a range of files by the min/max time and the sum of their capacities. #[derive(Debug, PartialEq, Eq, Clone)] -pub enum RoundInfo { +pub enum CompactType { /// compacting to target level TargetLevel { /// compaction level of target fles @@ -53,19 +55,11 @@ pub enum RoundInfo { split_times: Vec<i64>, }, - /// CompactRanges are overlapping chains of L0s are less than max_compact_size, with no L0 or L1 overlaps - /// between ranges. - CompactRanges { - /// Ranges describing distinct chains of L0s to be compacted. - ranges: Vec<FileRange>, - /// max number of files to group in each plan - max_num_files_to_group: usize, - /// max total size limit of files to group in each plan - max_total_file_size_to_group: usize, - }, + /// Deferred is holding place for regions we're not ready to work on yet. + Deferred {}, } -impl Display for RoundInfo { +impl Display for CompactType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::TargetLevel { target_level, max_total_file_size_to_group } => write!(f, "TargetLevel: {target_level} {max_total_file_size_to_group}"), @@ -79,12 +73,12 @@ impl Display for RoundInfo { max_total_file_size_to_group, } => write!(f, "SimulatedLeadingEdge: {max_num_files_to_group}, {max_total_file_size_to_group}",), Self::VerticalSplit { split_times } => write!(f, "VerticalSplit: {split_times:?}"), - Self::CompactRanges { ranges, max_num_files_to_group, max_total_file_size_to_group } => write!(f, "{:?}, {max_num_files_to_group}, {max_total_file_size_to_group}", ranges) + Self::Deferred {} => write!(f, "Deferred"), } } } -impl RoundInfo { +impl CompactType { /// what levels should the files in this round be? pub fn target_level(&self) -> CompactionLevel { match self { @@ -93,7 +87,7 @@ impl RoundInfo { Self::ManySmallFiles { start_level, .. } => *start_level, Self::SimulatedLeadingEdge { .. } => CompactionLevel::FileNonOverlapped, Self::VerticalSplit { .. } => CompactionLevel::Initial, - Self::CompactRanges { .. } => CompactionLevel::Initial, + Self::Deferred {} => CompactionLevel::Initial, // n/a } } @@ -120,10 +114,7 @@ impl RoundInfo { .. } => Some(*max_num_files_to_group), Self::VerticalSplit { .. } => None, - Self::CompactRanges { - max_num_files_to_group, - .. - } => Some(*max_num_files_to_group), + Self::Deferred {} => None, } } @@ -140,25 +131,148 @@ impl RoundInfo { .. } => Some(*max_total_file_size_to_group), Self::VerticalSplit { .. } => None, - Self::CompactRanges { - max_total_file_size_to_group, - .. - } => Some(*max_total_file_size_to_group), + Self::Deferred {} => None, } } - /// return compaction ranges, when available. - /// We could generate ranges from VerticalSplit split times, but that asssumes the splits resulted in - /// no ranges > max_compact_size, which is not guaranteed. Instead, we'll detect the ranges the first - /// time after VerticalSplit, and may decide to resplit subset of the files again if data was more - /// non-linear than expected. - pub fn ranges(&self) -> Option<Vec<FileRange>> { + /// return split_times, when available. + pub fn split_times(&self) -> Option<Vec<i64>> { match self { Self::TargetLevel { .. } => None, Self::ManySmallFiles { .. } => None, Self::SimulatedLeadingEdge { .. } => None, - Self::VerticalSplit { .. } => None, - Self::CompactRanges { ranges, .. } => Some(ranges.clone()), + Self::VerticalSplit { split_times } => Some(split_times.clone()), + Self::Deferred {} => None, } } + + pub fn is_deferred(&self) -> bool { + matches!(self, Self::Deferred {}) + } +} + +// CompactRange describes a range of files by the min/max time and allows these files to be compacted without +// consideration of other ranges. Each range may go through many rounds of compaction before being +// consolidated with adjacent ranges. +#[derive(Debug)] +pub struct CompactRange { + // The type of operation required for this range. + pub op: CompactType, + /// The minimum time of any file in the range + pub min: i64, + /// The maximum time of any file in the range + pub max: i64, + /// The sum of the sizes of all files in the range + pub cap: usize, + // Inidcates if L0s are present in this range (used when consolidating ranges) + pub has_l0s: bool, + /// Files to be potentially operated on now. Files start here, then go to branches or files_for_later. + pub files_for_now: Mutex<Option<Vec<ParquetFile>>>, + /// Compaction branches within this range + pub branches: Mutex<Option<Vec<Vec<ParquetFile>>>>, + /// Files ignored for now, but will be considered later + pub files_for_later: Mutex<Option<Vec<ParquetFile>>>, +} + +impl Display for CompactRange { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "CompactRange: {}, {}->{}, {}, has_l0s:{}, {}, {}, {}", + self.op, + self.min, + self.max, + self.cap, + self.has_l0s, + if let Some(files_for_now) = &*self.files_for_now.lock().unwrap() { + format!("files_for_now: {}", files_for_now.len()) + } else { + "".to_string() + }, + if let Some(branches) = &*self.branches.lock().unwrap() { + format!("branches: {}", branches.len()) + } else { + "".to_string() + }, + if let Some(files_for_later) = &*self.files_for_later.lock().unwrap() { + format!("files_for_later: {}", files_for_later.len()) + } else { + "".to_string() + }, + ) + } +} + +impl Clone for CompactRange { + fn clone(&self) -> Self { + Self { + op: self.op.clone(), + min: self.min, + max: self.max, + cap: self.cap, + has_l0s: self.has_l0s, + branches: Mutex::new(self.branches.lock().unwrap().clone()), + files_for_later: Mutex::new(self.files_for_later.lock().unwrap().clone()), + files_for_now: Mutex::new(self.files_for_now.lock().unwrap().clone()), + } + } +} + +impl CompactRange { + pub fn add_files_for_now(&self, files: Vec<ParquetFile>) { + if !files.is_empty() { + let mut files_for_now = self.files_for_now.lock().unwrap(); + if let Some(files_for_now) = &mut *files_for_now { + files_for_now.extend(files); + } else { + *files_for_now = Some(files); + } + } + } + + pub fn add_files_for_later(&self, files: Vec<ParquetFile>) { + if !files.is_empty() { + let mut files_for_later = self.files_for_later.lock().unwrap(); + if let Some(files_for_later) = &mut *files_for_later { + files_for_later.extend(files); + } else { + *files_for_later = Some(files); + } + } + } +} + +/// RoundInfo is comprised of CompactRanges of files to be compacted. +#[derive(Debug)] +pub struct RoundInfo { + /// ranges are the CompactRanges of files which do not overlap at the L0/L1 levels. + /// When we have large numbers of overlapped L0s, dividing them into ranges keeps them in bite sized chunks that + /// are easier to deal with. As L0s are comparted to L1s, the ranges are combined. Its preferable to do the L1->L2 + /// compaction with a single range to avoid unnecessary divisions within the L2 files. + pub ranges: Vec<CompactRange>, + + /// l2_files_for_later holds L2 files while the ranges have L0s. Splitting into many ranges is necessary for the L0->L1 + /// compactions, to keep the problem size manageable. It would be unfortunate (inefficient) to split L2 files to prevent + /// them from spanning multiple CompactRanges. So instead, the L2 files are held in files_for_later until the L0s are gone, + /// at which point we'll have a single range, which gets the L2 files. + /// Note that each CompactRange also has its own files_for_later, which are L0/L1 files within that range that aren't being + /// compacted in the current round. + pub l2_files_for_later: Mutex<Option<Vec<ParquetFile>>>, +} + +impl Display for RoundInfo { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + for range in &self.ranges { + writeln!(f, "{:?}", range)?; + } + Ok(()) + } +} + +impl RoundInfo { + /// take_files_for_later takes the files_for_later from the RoundInfo. + pub fn take_l2_files_for_later(&self) -> Option<Vec<ParquetFile>> { + let l2_files_for_later = self.l2_files_for_later.lock().unwrap().take(); + l2_files_for_later + } } diff --git a/compactor/tests/integration.rs b/compactor/tests/integration.rs index 91b45f21d3..63d1ea3448 100644 --- a/compactor/tests/integration.rs +++ b/compactor/tests/integration.rs @@ -56,9 +56,9 @@ async fn test_num_files_over_limit() { setup.run_compact().await; // - // read files and verify 3 files + // read files and verify 2 files let files = setup.list_by_table_not_to_delete().await; - assert_eq!(files.len(), 3); + assert_eq!(files.len(), 2); // // verify ID and compaction level of the files @@ -68,9 +68,8 @@ async fn test_num_files_over_limit() { assert_levels( &files, vec![ - (7, CompactionLevel::FileNonOverlapped), - (8, CompactionLevel::FileNonOverlapped), (9, CompactionLevel::FileNonOverlapped), + (10, CompactionLevel::FileNonOverlapped), ], ); } diff --git a/compactor/tests/layouts/backfill.rs b/compactor/tests/layouts/backfill.rs index f74def39f1..be544e6d42 100644 --- a/compactor/tests/layouts/backfill.rs +++ b/compactor/tests/layouts/backfill.rs @@ -137,14 +137,15 @@ async fn random_backfill_empty_partition() { - "L0.49[76,932] 1.05us |-----------------------------------L0.49------------------------------------| " - "L0.50[42,986] 1.05us |---------------------------------------L0.50----------------------------------------| " - "L0.51[0,1] 999ns |L0.51| " - - "**** Simulation run 0, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "**** Simulation run 0, type=compact(TotalSizeLessThanMaxCompactSize). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.1[76,932] 1us |------------------------------------------L0.1------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - - "L0 " - - "L0.?[76,355] 1us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1us 4mb |------------L0.?-------------| " + - "L0.51[0,1] 999ns |-----------------------------------------L0.51------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L1, all files 10mb " + - "L1.?[0,1] 999ns |------------------------------------------L1.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 1 files: L0.51" + - " Creating 1 files" - "**** Simulation run 1, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - "L0.2[42,986] 1us |------------------------------------------L0.2------------------------------------------|" @@ -155,270 +156,267 @@ async fn random_backfill_empty_partition() { - "L0.?[630,986] 1us 4mb |-------------L0.?--------------| " - "**** Simulation run 2, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.3[173,950] 1us |------------------------------------------L0.3------------------------------------------|" + - "L0.6[42,986] 1us |------------------------------------------L0.6------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1us 4mb |---------------L0.?----------------| " - - "**** Simulation run 3, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[42,355] 1us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1us 4mb |-------------L0.?--------------| " + - "**** Simulation run 3, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.4[50,629] 1us |------------------------------------------L0.4------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.10[42,986] 1.01us |-----------------------------------------L0.10------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1us 5mb |------------------L0.?------------------| " + - "L0.?[42,355] 1.01us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.01us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.01us 4mb |-------------L0.?--------------| " - "**** Simulation run 4, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.5[76,932] 1us |------------------------------------------L0.5------------------------------------------|" + - "L0.14[42,986] 1.01us |-----------------------------------------L0.14------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1us 4mb |------------L0.?-------------| " + - "L0.?[42,355] 1.01us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.01us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.01us 4mb |-------------L0.?--------------| " - "**** Simulation run 5, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.6[42,986] 1us |------------------------------------------L0.6------------------------------------------|" + - "L0.18[42,986] 1.02us |-----------------------------------------L0.18------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1us 4mb |-------------L0.?--------------| " + - "L0.?[42,355] 1.02us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.02us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.02us 4mb |-------------L0.?--------------| " - "**** Simulation run 6, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.7[173,950] 1.01us |------------------------------------------L0.7------------------------------------------|" + - "L0.22[42,986] 1.02us |-----------------------------------------L0.22------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.01us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.01us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.01us 4mb |---------------L0.?----------------| " - - "**** Simulation run 7, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[42,355] 1.02us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.02us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.02us 4mb |-------------L0.?--------------| " + - "**** Simulation run 7, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.8[50,629] 1.01us |------------------------------------------L0.8------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.26[42,986] 1.02us |-----------------------------------------L0.26------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.01us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.01us 5mb |------------------L0.?------------------| " + - "L0.?[42,355] 1.02us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.02us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.02us 4mb |-------------L0.?--------------| " - "**** Simulation run 8, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.9[76,932] 1.01us |------------------------------------------L0.9------------------------------------------|" + - "L0.30[42,986] 1.03us |-----------------------------------------L0.30------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.01us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.01us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.01us 4mb |------------L0.?-------------| " + - "L0.?[42,355] 1.03us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.03us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.03us 4mb |-------------L0.?--------------| " - "**** Simulation run 9, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.10[42,986] 1.01us |-----------------------------------------L0.10------------------------------------------|" + - "L0.34[42,986] 1.03us |-----------------------------------------L0.34------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.01us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.01us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.01us 4mb |-------------L0.?--------------| " + - "L0.?[42,355] 1.03us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.03us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.03us 4mb |-------------L0.?--------------| " - "**** Simulation run 10, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.11[173,950] 1.01us |-----------------------------------------L0.11------------------------------------------|" + - "L0.38[42,986] 1.04us |-----------------------------------------L0.38------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.01us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.01us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.01us 4mb |---------------L0.?----------------| " - - "**** Simulation run 11, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[42,355] 1.04us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.04us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.04us 4mb |-------------L0.?--------------| " + - "**** Simulation run 11, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.12[50,629] 1.01us |-----------------------------------------L0.12------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.42[42,986] 1.04us |-----------------------------------------L0.42------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.01us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.01us 5mb |------------------L0.?------------------| " + - "L0.?[42,355] 1.04us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.04us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.04us 4mb |-------------L0.?--------------| " - "**** Simulation run 12, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.13[76,932] 1.01us |-----------------------------------------L0.13------------------------------------------|" + - "L0.46[42,986] 1.05us |-----------------------------------------L0.46------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.01us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.01us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.01us 4mb |------------L0.?-------------| " + - "L0.?[42,355] 1.05us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.05us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.05us 4mb |-------------L0.?--------------| " - "**** Simulation run 13, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.14[42,986] 1.01us |-----------------------------------------L0.14------------------------------------------|" + - "L0.50[42,986] 1.05us |-----------------------------------------L0.50------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.01us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.01us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.01us 4mb |-------------L0.?--------------| " - - "**** Simulation run 14, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "L0.?[42,355] 1.05us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.05us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.05us 4mb |-------------L0.?--------------| " + - "**** Simulation run 14, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.15[173,950] 1.01us |-----------------------------------------L0.15------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.4[50,629] 1us |------------------------------------------L0.4------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.01us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.01us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.01us 4mb |---------------L0.?----------------| " + - "L0.?[50,355] 1us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1us 5mb |------------------L0.?------------------| " - "**** Simulation run 15, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.16[50,629] 1.01us |-----------------------------------------L0.16------------------------------------------|" + - "L0.8[50,629] 1.01us |------------------------------------------L0.8------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - "L0.?[50,355] 1.01us 5mb |--------------------L0.?---------------------| " - "L0.?[356,629] 1.01us 5mb |------------------L0.?------------------| " - - "**** Simulation run 16, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "**** Simulation run 16, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.17[76,932] 1.02us |-----------------------------------------L0.17------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.12[50,629] 1.01us |-----------------------------------------L0.12------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.02us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.02us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.02us 4mb |------------L0.?-------------| " - - "**** Simulation run 17, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "L0.?[50,355] 1.01us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.01us 5mb |------------------L0.?------------------| " + - "**** Simulation run 17, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.18[42,986] 1.02us |-----------------------------------------L0.18------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.16[50,629] 1.01us |-----------------------------------------L0.16------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.02us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.02us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.02us 4mb |-------------L0.?--------------| " - - "**** Simulation run 18, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "L0.?[50,355] 1.01us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.01us 5mb |------------------L0.?------------------| " + - "**** Simulation run 18, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.19[173,950] 1.02us |-----------------------------------------L0.19------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.20[50,629] 1.02us |-----------------------------------------L0.20------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.02us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.02us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.02us 4mb |---------------L0.?----------------| " + - "L0.?[50,355] 1.02us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.02us 5mb |------------------L0.?------------------| " - "**** Simulation run 19, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.20[50,629] 1.02us |-----------------------------------------L0.20------------------------------------------|" + - "L0.24[50,629] 1.02us |-----------------------------------------L0.24------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - "L0.?[50,355] 1.02us 5mb |--------------------L0.?---------------------| " - "L0.?[356,629] 1.02us 5mb |------------------L0.?------------------| " - - "**** Simulation run 20, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "**** Simulation run 20, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.21[76,932] 1.02us |-----------------------------------------L0.21------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.28[50,629] 1.03us |-----------------------------------------L0.28------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.02us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.02us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.02us 4mb |------------L0.?-------------| " - - "**** Simulation run 21, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "L0.?[50,355] 1.03us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.03us 5mb |------------------L0.?------------------| " + - "**** Simulation run 21, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.22[42,986] 1.02us |-----------------------------------------L0.22------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.32[50,629] 1.03us |-----------------------------------------L0.32------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.02us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.02us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.02us 4mb |-------------L0.?--------------| " - - "**** Simulation run 22, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "L0.?[50,355] 1.03us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.03us 5mb |------------------L0.?------------------| " + - "**** Simulation run 22, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.23[173,950] 1.02us |-----------------------------------------L0.23------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.36[50,629] 1.03us |-----------------------------------------L0.36------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.02us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.02us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.02us 4mb |---------------L0.?----------------| " + - "L0.?[50,355] 1.03us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.03us 5mb |------------------L0.?------------------| " - "**** Simulation run 23, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.24[50,629] 1.02us |-----------------------------------------L0.24------------------------------------------|" + - "L0.40[50,629] 1.04us |-----------------------------------------L0.40------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.02us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.02us 5mb |------------------L0.?------------------| " - - "**** Simulation run 24, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "L0.?[50,355] 1.04us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.04us 5mb |------------------L0.?------------------| " + - "**** Simulation run 24, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.25[76,932] 1.02us |-----------------------------------------L0.25------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.44[50,629] 1.04us |-----------------------------------------L0.44------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.02us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.02us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.02us 4mb |------------L0.?-------------| " - - "**** Simulation run 25, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "L0.?[50,355] 1.04us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.04us 5mb |------------------L0.?------------------| " + - "**** Simulation run 25, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.26[42,986] 1.02us |-----------------------------------------L0.26------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.48[50,629] 1.05us |-----------------------------------------L0.48------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.02us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.02us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.02us 4mb |-------------L0.?--------------| " + - "L0.?[50,355] 1.05us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.05us 5mb |------------------L0.?------------------| " - "**** Simulation run 26, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.27[173,950] 1.03us |-----------------------------------------L0.27------------------------------------------|" + - "L0.1[76,932] 1us |------------------------------------------L0.1------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.03us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.03us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.03us 4mb |---------------L0.?----------------| " - - "**** Simulation run 27, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[76,355] 1us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1us 4mb |------------L0.?-------------| " + - "**** Simulation run 27, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.28[50,629] 1.03us |-----------------------------------------L0.28------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.5[76,932] 1us |------------------------------------------L0.5------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.03us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.03us 5mb |------------------L0.?------------------| " + - "L0.?[76,355] 1us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1us 4mb |------------L0.?-------------| " - "**** Simulation run 28, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.29[76,932] 1.03us |-----------------------------------------L0.29------------------------------------------|" + - "L0.9[76,932] 1.01us |------------------------------------------L0.9------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.03us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.03us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.03us 4mb |------------L0.?-------------| " + - "L0.?[76,355] 1.01us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.01us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.01us 4mb |------------L0.?-------------| " - "**** Simulation run 29, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.30[42,986] 1.03us |-----------------------------------------L0.30------------------------------------------|" + - "L0.13[76,932] 1.01us |-----------------------------------------L0.13------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.03us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.03us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.03us 4mb |-------------L0.?--------------| " + - "L0.?[76,355] 1.01us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.01us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.01us 4mb |------------L0.?-------------| " - "**** Simulation run 30, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.31[173,950] 1.03us |-----------------------------------------L0.31------------------------------------------|" + - "L0.17[76,932] 1.02us |-----------------------------------------L0.17------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.03us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.03us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.03us 4mb |---------------L0.?----------------| " - - "**** Simulation run 31, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[76,355] 1.02us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.02us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.02us 4mb |------------L0.?-------------| " + - "**** Simulation run 31, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.32[50,629] 1.03us |-----------------------------------------L0.32------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.21[76,932] 1.02us |-----------------------------------------L0.21------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.03us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.03us 5mb |------------------L0.?------------------| " + - "L0.?[76,355] 1.02us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.02us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.02us 4mb |------------L0.?-------------| " - "**** Simulation run 32, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.33[76,932] 1.03us |-----------------------------------------L0.33------------------------------------------|" + - "L0.25[76,932] 1.02us |-----------------------------------------L0.25------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.03us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.03us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.03us 4mb |------------L0.?-------------| " + - "L0.?[76,355] 1.02us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.02us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.02us 4mb |------------L0.?-------------| " - "**** Simulation run 33, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.34[42,986] 1.03us |-----------------------------------------L0.34------------------------------------------|" + - "L0.29[76,932] 1.03us |-----------------------------------------L0.29------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.03us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.03us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.03us 4mb |-------------L0.?--------------| " + - "L0.?[76,355] 1.03us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.03us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.03us 4mb |------------L0.?-------------| " - "**** Simulation run 34, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.35[173,950] 1.03us |-----------------------------------------L0.35------------------------------------------|" + - "L0.33[76,932] 1.03us |-----------------------------------------L0.33------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.03us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.03us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.03us 4mb |---------------L0.?----------------| " - - "**** Simulation run 35, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[76,355] 1.03us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.03us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.03us 4mb |------------L0.?-------------| " + - "**** Simulation run 35, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.36[50,629] 1.03us |-----------------------------------------L0.36------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.37[76,932] 1.04us |-----------------------------------------L0.37------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.03us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.03us 5mb |------------------L0.?------------------| " + - "L0.?[76,355] 1.04us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.04us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.04us 4mb |------------L0.?-------------| " - "**** Simulation run 36, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.37[76,932] 1.04us |-----------------------------------------L0.37------------------------------------------|" + - "L0.41[76,932] 1.04us |-----------------------------------------L0.41------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - "L0.?[76,355] 1.04us 3mb |-----------L0.?------------| " @@ -426,342 +424,393 @@ async fn random_backfill_empty_partition() { - "L0.?[630,932] 1.04us 4mb |------------L0.?-------------| " - "**** Simulation run 37, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.38[42,986] 1.04us |-----------------------------------------L0.38------------------------------------------|" + - "L0.45[76,932] 1.04us |-----------------------------------------L0.45------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.04us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.04us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.04us 4mb |-------------L0.?--------------| " + - "L0.?[76,355] 1.04us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.04us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.04us 4mb |------------L0.?-------------| " - "**** Simulation run 38, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.39[173,950] 1.04us |-----------------------------------------L0.39------------------------------------------|" + - "L0.49[76,932] 1.05us |-----------------------------------------L0.49------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.04us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.04us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.04us 4mb |---------------L0.?----------------| " - - "**** Simulation run 39, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[76,355] 1.05us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.05us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.05us 4mb |------------L0.?-------------| " + - "**** Simulation run 39, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.40[50,629] 1.04us |-----------------------------------------L0.40------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.3[173,950] 1us |------------------------------------------L0.3------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.04us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.04us 5mb |------------------L0.?------------------| " - - "Committing partition 1:" - - " Soft Deleting 40 files: L0.1, L0.2, L0.3, L0.4, L0.5, L0.6, L0.7, L0.8, L0.9, L0.10, L0.11, L0.12, L0.13, L0.14, L0.15, L0.16, L0.17, L0.18, L0.19, L0.20, L0.21, L0.22, L0.23, L0.24, L0.25, L0.26, L0.27, L0.28, L0.29, L0.30, L0.31, L0.32, L0.33, L0.34, L0.35, L0.36, L0.37, L0.38, L0.39, L0.40" - - " Creating 110 files" + - "L0.?[173,355] 1us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1us 4mb |---------------L0.?----------------| " - "**** Simulation run 40, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.41[76,932] 1.04us |-----------------------------------------L0.41------------------------------------------|" + - "L0.7[173,950] 1.01us |------------------------------------------L0.7------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.04us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.04us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.04us 4mb |------------L0.?-------------| " + - "L0.?[173,355] 1.01us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.01us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.01us 4mb |---------------L0.?----------------| " + - "Committing partition 1:" + - " Soft Deleting 40 files: L0.1, L0.2, L0.3, L0.4, L0.5, L0.6, L0.7, L0.8, L0.9, L0.10, L0.12, L0.13, L0.14, L0.16, L0.17, L0.18, L0.20, L0.21, L0.22, L0.24, L0.25, L0.26, L0.28, L0.29, L0.30, L0.32, L0.33, L0.34, L0.36, L0.37, L0.38, L0.40, L0.41, L0.42, L0.44, L0.45, L0.46, L0.48, L0.49, L0.50" + - " Creating 108 files" - "**** Simulation run 41, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.42[42,986] 1.04us |-----------------------------------------L0.42------------------------------------------|" + - "L0.11[173,950] 1.01us |-----------------------------------------L0.11------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.04us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.04us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.04us 4mb |-------------L0.?--------------| " + - "L0.?[173,355] 1.01us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.01us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.01us 4mb |---------------L0.?----------------| " - "**** Simulation run 42, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.43[173,950] 1.04us |-----------------------------------------L0.43------------------------------------------|" + - "L0.15[173,950] 1.01us |-----------------------------------------L0.15------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.04us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.04us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.04us 4mb |---------------L0.?----------------| " - - "**** Simulation run 43, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[173,355] 1.01us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.01us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.01us 4mb |---------------L0.?----------------| " + - "**** Simulation run 43, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.44[50,629] 1.04us |-----------------------------------------L0.44------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.19[173,950] 1.02us |-----------------------------------------L0.19------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.04us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.04us 5mb |------------------L0.?------------------| " + - "L0.?[173,355] 1.02us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.02us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.02us 4mb |---------------L0.?----------------| " - "**** Simulation run 44, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.45[76,932] 1.04us |-----------------------------------------L0.45------------------------------------------|" + - "L0.23[173,950] 1.02us |-----------------------------------------L0.23------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.04us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.04us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.04us 4mb |------------L0.?-------------| " + - "L0.?[173,355] 1.02us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.02us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.02us 4mb |---------------L0.?----------------| " - "**** Simulation run 45, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.46[42,986] 1.05us |-----------------------------------------L0.46------------------------------------------|" + - "L0.27[173,950] 1.03us |-----------------------------------------L0.27------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.05us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.05us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.05us 4mb |-------------L0.?--------------| " + - "L0.?[173,355] 1.03us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.03us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.03us 4mb |---------------L0.?----------------| " - "**** Simulation run 46, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.47[173,950] 1.05us |-----------------------------------------L0.47------------------------------------------|" + - "L0.31[173,950] 1.03us |-----------------------------------------L0.31------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.05us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.05us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.05us 4mb |---------------L0.?----------------| " - - "**** Simulation run 47, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[173,355] 1.03us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.03us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.03us 4mb |---------------L0.?----------------| " + - "**** Simulation run 47, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.48[50,629] 1.05us |-----------------------------------------L0.48------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.35[173,950] 1.03us |-----------------------------------------L0.35------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.05us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.05us 5mb |------------------L0.?------------------| " + - "L0.?[173,355] 1.03us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.03us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.03us 4mb |---------------L0.?----------------| " - "**** Simulation run 48, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.49[76,932] 1.05us |-----------------------------------------L0.49------------------------------------------|" + - "L0.39[173,950] 1.04us |-----------------------------------------L0.39------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.05us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.05us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.05us 4mb |------------L0.?-------------| " + - "L0.?[173,355] 1.04us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.04us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.04us 4mb |---------------L0.?----------------| " - "**** Simulation run 49, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.50[42,986] 1.05us |-----------------------------------------L0.50------------------------------------------|" + - "L0.43[173,950] 1.04us |-----------------------------------------L0.43------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.05us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.05us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.05us 4mb |-------------L0.?--------------| " - - "Committing partition 1:" - - " Soft Deleting 10 files: L0.41, L0.42, L0.43, L0.44, L0.45, L0.46, L0.47, L0.48, L0.49, L0.50" - - " Creating 28 files" - - "**** Simulation run 50, type=compact(ManySmallFiles). 20 Input Files, 76mb total:" - - "L0 " - - "L0.51[0,1] 999ns 10mb |L0.51| " - - "L0.52[76,355] 1us 3mb |-------------------------------L0.52--------------------------------| " - - "L0.55[42,355] 1us 3mb |------------------------------------L0.55------------------------------------| " - - "L0.58[173,355] 1us 2mb |-------------------L0.58--------------------| " - - "L0.61[50,355] 1us 5mb |-----------------------------------L0.61-----------------------------------| " - - "L0.63[76,355] 1us 3mb |-------------------------------L0.63--------------------------------| " - - "L0.66[42,355] 1us 3mb |------------------------------------L0.66------------------------------------| " - - "L0.69[173,355] 1.01us 2mb |-------------------L0.69--------------------| " - - "L0.72[50,355] 1.01us 5mb |-----------------------------------L0.72-----------------------------------| " - - "L0.74[76,355] 1.01us 3mb |-------------------------------L0.74--------------------------------| " - - "L0.77[42,355] 1.01us 3mb |------------------------------------L0.77------------------------------------| " - - "L0.80[173,355] 1.01us 2mb |-------------------L0.80--------------------| " - - "L0.83[50,355] 1.01us 5mb |-----------------------------------L0.83-----------------------------------| " - - "L0.85[76,355] 1.01us 3mb |-------------------------------L0.85--------------------------------| " - - "L0.88[42,355] 1.01us 3mb |------------------------------------L0.88------------------------------------| " - - "L0.91[173,355] 1.01us 2mb |-------------------L0.91--------------------| " - - "L0.94[50,355] 1.01us 5mb |-----------------------------------L0.94-----------------------------------| " - - "L0.96[76,355] 1.02us 3mb |-------------------------------L0.96--------------------------------| " - - "L0.99[42,355] 1.02us 3mb |------------------------------------L0.99------------------------------------| " - - "L0.102[173,355] 1.02us 2mb |-------------------L0.102-------------------| " - - "**** 1 Output Files (parquet_file_id not yet assigned), 76mb total:" - - "L0, all files 76mb " - - "L0.?[0,355] 1.02us |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.51, L0.52, L0.55, L0.58, L0.61, L0.63, L0.66, L0.69, L0.72, L0.74, L0.77, L0.80, L0.83, L0.85, L0.88, L0.91, L0.94, L0.96, L0.99, L0.102" - - " Creating 1 files" - - "**** Simulation run 51, type=compact(ManySmallFiles). 20 Input Files, 72mb total:" - - "L0 " - - "L0.53[356,629] 1us 3mb |-----------------------------------------L0.53------------------------------------------|" - - "L0.56[356,629] 1us 3mb |-----------------------------------------L0.56------------------------------------------|" - - "L0.59[356,629] 1us 4mb |-----------------------------------------L0.59------------------------------------------|" - - "L0.62[356,629] 1us 5mb |-----------------------------------------L0.62------------------------------------------|" - - "L0.64[356,629] 1us 3mb |-----------------------------------------L0.64------------------------------------------|" - - "L0.67[356,629] 1us 3mb |-----------------------------------------L0.67------------------------------------------|" - - "L0.70[356,629] 1.01us 4mb|-----------------------------------------L0.70------------------------------------------|" - - "L0.73[356,629] 1.01us 5mb|-----------------------------------------L0.73------------------------------------------|" - - "L0.75[356,629] 1.01us 3mb|-----------------------------------------L0.75------------------------------------------|" - - "L0.78[356,629] 1.01us 3mb|-----------------------------------------L0.78------------------------------------------|" - - "L0.81[356,629] 1.01us 4mb|-----------------------------------------L0.81------------------------------------------|" - - "L0.84[356,629] 1.01us 5mb|-----------------------------------------L0.84------------------------------------------|" - - "L0.86[356,629] 1.01us 3mb|-----------------------------------------L0.86------------------------------------------|" - - "L0.89[356,629] 1.01us 3mb|-----------------------------------------L0.89------------------------------------------|" - - "L0.92[356,629] 1.01us 4mb|-----------------------------------------L0.92------------------------------------------|" - - "L0.95[356,629] 1.01us 5mb|-----------------------------------------L0.95------------------------------------------|" - - "L0.97[356,629] 1.02us 3mb|-----------------------------------------L0.97------------------------------------------|" - - "L0.100[356,629] 1.02us 3mb|-----------------------------------------L0.100-----------------------------------------|" - - "L0.103[356,629] 1.02us 4mb|-----------------------------------------L0.103-----------------------------------------|" - - "L0.106[356,629] 1.02us 5mb|-----------------------------------------L0.106-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 72mb total:" - - "L0, all files 72mb " - - "L0.?[356,629] 1.02us |------------------------------------------L0.?------------------------------------------|" + - "L0.?[173,355] 1.04us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.04us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.04us 4mb |---------------L0.?----------------| " + - "**** Simulation run 50, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "L0, all files 10mb " + - "L0.47[173,950] 1.05us |-----------------------------------------L0.47------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0 " + - "L0.?[173,355] 1.05us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.05us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.05us 4mb |---------------L0.?----------------| " - "Committing partition 1:" - - " Soft Deleting 20 files: L0.53, L0.56, L0.59, L0.62, L0.64, L0.67, L0.70, L0.73, L0.75, L0.78, L0.81, L0.84, L0.86, L0.89, L0.92, L0.95, L0.97, L0.100, L0.103, L0.106" - - " Creating 1 files" - - "**** Simulation run 52, type=compact(ManySmallFiles). 20 Input Files, 76mb total:" - - "L0 " - - "L0.54[630,932] 1us 4mb |----------------------------------L0.54-----------------------------------| " - - "L0.57[630,986] 1us 4mb |-----------------------------------------L0.57------------------------------------------|" - - "L0.60[630,950] 1us 4mb |------------------------------------L0.60-------------------------------------| " - - "L0.65[630,932] 1us 4mb |----------------------------------L0.65-----------------------------------| " - - "L0.68[630,986] 1us 4mb |-----------------------------------------L0.68------------------------------------------|" - - "L0.71[630,950] 1.01us 4mb|------------------------------------L0.71-------------------------------------| " - - "L0.76[630,932] 1.01us 4mb|----------------------------------L0.76-----------------------------------| " - - "L0.79[630,986] 1.01us 4mb|-----------------------------------------L0.79------------------------------------------|" - - "L0.82[630,950] 1.01us 4mb|------------------------------------L0.82-------------------------------------| " - - "L0.87[630,932] 1.01us 4mb|----------------------------------L0.87-----------------------------------| " - - "L0.90[630,986] 1.01us 4mb|-----------------------------------------L0.90------------------------------------------|" - - "L0.93[630,950] 1.01us 4mb|------------------------------------L0.93-------------------------------------| " - - "L0.98[630,932] 1.02us 4mb|----------------------------------L0.98-----------------------------------| " - - "L0.101[630,986] 1.02us 4mb|-----------------------------------------L0.101-----------------------------------------|" - - "L0.104[630,950] 1.02us 4mb|------------------------------------L0.104------------------------------------| " - - "L0.109[630,932] 1.02us 4mb|----------------------------------L0.109----------------------------------| " - - "L0.112[630,986] 1.02us 4mb|-----------------------------------------L0.112-----------------------------------------|" - - "L0.115[630,950] 1.02us 4mb|------------------------------------L0.115------------------------------------| " - - "L0.120[630,932] 1.02us 4mb|----------------------------------L0.120----------------------------------| " - - "L0.123[630,986] 1.02us 4mb|-----------------------------------------L0.123-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 76mb total:" - - "L0, all files 76mb " - - "L0.?[630,986] 1.02us |------------------------------------------L0.?------------------------------------------|" + - " Soft Deleting 10 files: L0.11, L0.15, L0.19, L0.23, L0.27, L0.31, L0.35, L0.39, L0.43, L0.47" + - " Creating 30 files" + - "**** Simulation run 51, type=compact(ManySmallFiles). 20 Input Files, 71mb total:" + - "L0 " + - "L0.116[76,355] 1us 3mb |------------------------------------L0.116------------------------------------| " + - "L0.53[42,355] 1us 3mb |-----------------------------------------L0.53------------------------------------------|" + - "L0.155[173,355] 1us 2mb |----------------------L0.155----------------------| " + - "L0.92[50,355] 1us 5mb |----------------------------------------L0.92----------------------------------------| " + - "L0.119[76,355] 1us 3mb |------------------------------------L0.119------------------------------------| " + - "L0.56[42,355] 1us 3mb |-----------------------------------------L0.56------------------------------------------|" + - "L0.158[173,355] 1.01us 2mb |----------------------L0.158----------------------| " + - "L0.94[50,355] 1.01us 5mb |----------------------------------------L0.94----------------------------------------| " + - "L0.122[76,355] 1.01us 3mb |------------------------------------L0.122------------------------------------| " + - "L0.59[42,355] 1.01us 3mb |-----------------------------------------L0.59------------------------------------------|" + - "L0.161[173,355] 1.01us 2mb |----------------------L0.161----------------------| " + - "L0.96[50,355] 1.01us 5mb |----------------------------------------L0.96----------------------------------------| " + - "L0.125[76,355] 1.01us 3mb |------------------------------------L0.125------------------------------------| " + - "L0.62[42,355] 1.01us 3mb |-----------------------------------------L0.62------------------------------------------|" + - "L0.164[173,355] 1.01us 2mb |----------------------L0.164----------------------| " + - "L0.98[50,355] 1.01us 5mb |----------------------------------------L0.98----------------------------------------| " + - "L0.128[76,355] 1.02us 3mb |------------------------------------L0.128------------------------------------| " + - "L0.65[42,355] 1.02us 3mb |-----------------------------------------L0.65------------------------------------------|" + - "L0.167[173,355] 1.02us 2mb |----------------------L0.167----------------------| " + - "L0.100[50,355] 1.02us 5mb |---------------------------------------L0.100----------------------------------------| " + - "**** 1 Output Files (parquet_file_id not yet assigned), 71mb total:" + - "L0, all files 71mb " + - "L0.?[42,355] 1.02us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.54, L0.57, L0.60, L0.65, L0.68, L0.71, L0.76, L0.79, L0.82, L0.87, L0.90, L0.93, L0.98, L0.101, L0.104, L0.109, L0.112, L0.115, L0.120, L0.123" + - " Soft Deleting 20 files: L0.53, L0.56, L0.59, L0.62, L0.65, L0.92, L0.94, L0.96, L0.98, L0.100, L0.116, L0.119, L0.122, L0.125, L0.128, L0.155, L0.158, L0.161, L0.164, L0.167" - " Creating 1 files" - - "**** Simulation run 53, type=compact(ManySmallFiles). 20 Input Files, 71mb total:" - - "L0 " - - "L0.105[50,355] 1.02us 5mb |---------------------------------------L0.105----------------------------------------| " - - "L0.107[76,355] 1.02us 3mb |------------------------------------L0.107------------------------------------| " - - "L0.110[42,355] 1.02us 3mb|-----------------------------------------L0.110-----------------------------------------|" - - "L0.113[173,355] 1.02us 2mb |----------------------L0.113----------------------| " - - "L0.116[50,355] 1.02us 5mb |---------------------------------------L0.116----------------------------------------| " - - "L0.118[76,355] 1.02us 3mb |------------------------------------L0.118------------------------------------| " - - "L0.121[42,355] 1.02us 3mb|-----------------------------------------L0.121-----------------------------------------|" - - "L0.124[173,355] 1.03us 2mb |----------------------L0.124----------------------| " - - "L0.127[50,355] 1.03us 5mb |---------------------------------------L0.127----------------------------------------| " - - "L0.129[76,355] 1.03us 3mb |------------------------------------L0.129------------------------------------| " - - "L0.132[42,355] 1.03us 3mb|-----------------------------------------L0.132-----------------------------------------|" - - "L0.135[173,355] 1.03us 2mb |----------------------L0.135----------------------| " - - "L0.138[50,355] 1.03us 5mb |---------------------------------------L0.138----------------------------------------| " + - "**** Simulation run 52, type=compact(ManySmallFiles). 20 Input Files, 71mb total:" + - "L0 " + - "L0.131[76,355] 1.02us 3mb |------------------------------------L0.131------------------------------------| " + - "L0.68[42,355] 1.02us 3mb |-----------------------------------------L0.68------------------------------------------|" + - "L0.170[173,355] 1.02us 2mb |----------------------L0.170----------------------| " + - "L0.102[50,355] 1.02us 5mb |---------------------------------------L0.102----------------------------------------| " + - "L0.134[76,355] 1.02us 3mb |------------------------------------L0.134------------------------------------| " + - "L0.71[42,355] 1.02us 3mb |-----------------------------------------L0.71------------------------------------------|" + - "L0.173[173,355] 1.03us 2mb |----------------------L0.173----------------------| " + - "L0.104[50,355] 1.03us 5mb |---------------------------------------L0.104----------------------------------------| " + - "L0.137[76,355] 1.03us 3mb |------------------------------------L0.137------------------------------------| " + - "L0.74[42,355] 1.03us 3mb |-----------------------------------------L0.74------------------------------------------|" + - "L0.176[173,355] 1.03us 2mb |----------------------L0.176----------------------| " + - "L0.106[50,355] 1.03us 5mb |---------------------------------------L0.106----------------------------------------| " - "L0.140[76,355] 1.03us 3mb |------------------------------------L0.140------------------------------------| " - - "L0.143[42,355] 1.03us 3mb|-----------------------------------------L0.143-----------------------------------------|" - - "L0.146[173,355] 1.03us 2mb |----------------------L0.146----------------------| " - - "L0.149[50,355] 1.03us 5mb |---------------------------------------L0.149----------------------------------------| " - - "L0.151[76,355] 1.04us 3mb |------------------------------------L0.151------------------------------------| " - - "L0.154[42,355] 1.04us 3mb|-----------------------------------------L0.154-----------------------------------------|" - - "L0.157[173,355] 1.04us 2mb |----------------------L0.157----------------------| " + - "L0.77[42,355] 1.03us 3mb |-----------------------------------------L0.77------------------------------------------|" + - "L0.179[173,355] 1.03us 2mb |----------------------L0.179----------------------| " + - "L0.108[50,355] 1.03us 5mb |---------------------------------------L0.108----------------------------------------| " + - "L0.143[76,355] 1.04us 3mb |------------------------------------L0.143------------------------------------| " + - "L0.80[42,355] 1.04us 3mb |-----------------------------------------L0.80------------------------------------------|" + - "L0.182[173,355] 1.04us 2mb |----------------------L0.182----------------------| " + - "L0.110[50,355] 1.04us 5mb |---------------------------------------L0.110----------------------------------------| " - "**** 1 Output Files (parquet_file_id not yet assigned), 71mb total:" - "L0, all files 71mb " - "L0.?[42,355] 1.04us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.105, L0.107, L0.110, L0.113, L0.116, L0.118, L0.121, L0.124, L0.127, L0.129, L0.132, L0.135, L0.138, L0.140, L0.143, L0.146, L0.149, L0.151, L0.154, L0.157" + - " Soft Deleting 20 files: L0.68, L0.71, L0.74, L0.77, L0.80, L0.102, L0.104, L0.106, L0.108, L0.110, L0.131, L0.134, L0.137, L0.140, L0.143, L0.170, L0.173, L0.176, L0.179, L0.182" + - " Creating 1 files" + - "**** Simulation run 53, type=compact(ManySmallFiles). 10 Input Files, 35mb total:" + - "L0 " + - "L0.146[76,355] 1.04us 3mb |------------------------------------L0.146------------------------------------| " + - "L0.83[42,355] 1.04us 3mb |-----------------------------------------L0.83------------------------------------------|" + - "L0.185[173,355] 1.04us 2mb |----------------------L0.185----------------------| " + - "L0.112[50,355] 1.04us 5mb |---------------------------------------L0.112----------------------------------------| " + - "L0.149[76,355] 1.04us 3mb |------------------------------------L0.149------------------------------------| " + - "L0.86[42,355] 1.05us 3mb |-----------------------------------------L0.86------------------------------------------|" + - "L0.188[173,355] 1.05us 2mb |----------------------L0.188----------------------| " + - "L0.114[50,355] 1.05us 5mb |---------------------------------------L0.114----------------------------------------| " + - "L0.152[76,355] 1.05us 3mb |------------------------------------L0.152------------------------------------| " + - "L0.89[42,355] 1.05us 3mb |-----------------------------------------L0.89------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 35mb total:" + - "L0, all files 35mb " + - "L0.?[42,355] 1.05us |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 10 files: L0.83, L0.86, L0.89, L0.112, L0.114, L0.146, L0.149, L0.152, L0.185, L0.188" - " Creating 1 files" - "**** Simulation run 54, type=compact(ManySmallFiles). 20 Input Files, 72mb total:" - "L0 " - - "L0.108[356,629] 1.02us 3mb|-----------------------------------------L0.108-----------------------------------------|" - - "L0.111[356,629] 1.02us 3mb|-----------------------------------------L0.111-----------------------------------------|" - - "L0.114[356,629] 1.02us 4mb|-----------------------------------------L0.114-----------------------------------------|" - - "L0.117[356,629] 1.02us 5mb|-----------------------------------------L0.117-----------------------------------------|" - - "L0.119[356,629] 1.02us 3mb|-----------------------------------------L0.119-----------------------------------------|" - - "L0.122[356,629] 1.02us 3mb|-----------------------------------------L0.122-----------------------------------------|" - - "L0.125[356,629] 1.03us 4mb|-----------------------------------------L0.125-----------------------------------------|" - - "L0.128[356,629] 1.03us 5mb|-----------------------------------------L0.128-----------------------------------------|" - - "L0.130[356,629] 1.03us 3mb|-----------------------------------------L0.130-----------------------------------------|" - - "L0.133[356,629] 1.03us 3mb|-----------------------------------------L0.133-----------------------------------------|" - - "L0.136[356,629] 1.03us 4mb|-----------------------------------------L0.136-----------------------------------------|" - - "L0.139[356,629] 1.03us 5mb|-----------------------------------------L0.139-----------------------------------------|" + - "L0.117[356,629] 1us 3mb |-----------------------------------------L0.117-----------------------------------------|" + - "L0.54[356,629] 1us 3mb |-----------------------------------------L0.54------------------------------------------|" + - "L0.156[356,629] 1us 4mb |-----------------------------------------L0.156-----------------------------------------|" + - "L0.93[356,629] 1us 5mb |-----------------------------------------L0.93------------------------------------------|" + - "L0.120[356,629] 1us 3mb |-----------------------------------------L0.120-----------------------------------------|" + - "L0.57[356,629] 1us 3mb |-----------------------------------------L0.57------------------------------------------|" + - "L0.159[356,629] 1.01us 4mb|-----------------------------------------L0.159-----------------------------------------|" + - "L0.95[356,629] 1.01us 5mb|-----------------------------------------L0.95------------------------------------------|" + - "L0.123[356,629] 1.01us 3mb|-----------------------------------------L0.123-----------------------------------------|" + - "L0.60[356,629] 1.01us 3mb|-----------------------------------------L0.60------------------------------------------|" + - "L0.162[356,629] 1.01us 4mb|-----------------------------------------L0.162-----------------------------------------|" + - "L0.97[356,629] 1.01us 5mb|-----------------------------------------L0.97------------------------------------------|" + - "L0.126[356,629] 1.01us 3mb|-----------------------------------------L0.126-----------------------------------------|" + - "L0.63[356,629] 1.01us 3mb|-----------------------------------------L0.63------------------------------------------|" + - "L0.165[356,629] 1.01us 4mb|-----------------------------------------L0.165-----------------------------------------|" + - "L0.99[356,629] 1.01us 5mb|-----------------------------------------L0.99------------------------------------------|" + - "L0.129[356,629] 1.02us 3mb|-----------------------------------------L0.129-----------------------------------------|" + - "L0.66[356,629] 1.02us 3mb|-----------------------------------------L0.66------------------------------------------|" + - "L0.168[356,629] 1.02us 4mb|-----------------------------------------L0.168-----------------------------------------|" + - "L0.101[356,629] 1.02us 5mb|-----------------------------------------L0.101-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 72mb total:" + - "L0, all files 72mb " + - "L0.?[356,629] 1.02us |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.54, L0.57, L0.60, L0.63, L0.66, L0.93, L0.95, L0.97, L0.99, L0.101, L0.117, L0.120, L0.123, L0.126, L0.129, L0.156, L0.159, L0.162, L0.165, L0.168" + - " Creating 1 files" + - "**** Simulation run 55, type=compact(ManySmallFiles). 10 Input Files, 35mb total:" + - "L0 " + - "L0.147[356,629] 1.04us 3mb|-----------------------------------------L0.147-----------------------------------------|" + - "L0.84[356,629] 1.04us 3mb|-----------------------------------------L0.84------------------------------------------|" + - "L0.186[356,629] 1.04us 4mb|-----------------------------------------L0.186-----------------------------------------|" + - "L0.113[356,629] 1.04us 5mb|-----------------------------------------L0.113-----------------------------------------|" + - "L0.150[356,629] 1.04us 3mb|-----------------------------------------L0.150-----------------------------------------|" + - "L0.87[356,629] 1.05us 3mb|-----------------------------------------L0.87------------------------------------------|" + - "L0.189[356,629] 1.05us 4mb|-----------------------------------------L0.189-----------------------------------------|" + - "L0.115[356,629] 1.05us 5mb|-----------------------------------------L0.115-----------------------------------------|" + - "L0.153[356,629] 1.05us 3mb|-----------------------------------------L0.153-----------------------------------------|" + - "L0.90[356,629] 1.05us 3mb|-----------------------------------------L0.90------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 35mb total:" + - "L0, all files 35mb " + - "L0.?[356,629] 1.05us |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 10 files: L0.84, L0.87, L0.90, L0.113, L0.115, L0.147, L0.150, L0.153, L0.186, L0.189" + - " Creating 1 files" + - "**** Simulation run 56, type=compact(ManySmallFiles). 20 Input Files, 72mb total:" + - "L0 " + - "L0.132[356,629] 1.02us 3mb|-----------------------------------------L0.132-----------------------------------------|" + - "L0.69[356,629] 1.02us 3mb|-----------------------------------------L0.69------------------------------------------|" + - "L0.171[356,629] 1.02us 4mb|-----------------------------------------L0.171-----------------------------------------|" + - "L0.103[356,629] 1.02us 5mb|-----------------------------------------L0.103-----------------------------------------|" + - "L0.135[356,629] 1.02us 3mb|-----------------------------------------L0.135-----------------------------------------|" + - "L0.72[356,629] 1.02us 3mb|-----------------------------------------L0.72------------------------------------------|" + - "L0.174[356,629] 1.03us 4mb|-----------------------------------------L0.174-----------------------------------------|" + - "L0.105[356,629] 1.03us 5mb|-----------------------------------------L0.105-----------------------------------------|" + - "L0.138[356,629] 1.03us 3mb|-----------------------------------------L0.138-----------------------------------------|" + - "L0.75[356,629] 1.03us 3mb|-----------------------------------------L0.75------------------------------------------|" + - "L0.177[356,629] 1.03us 4mb|-----------------------------------------L0.177-----------------------------------------|" + - "L0.107[356,629] 1.03us 5mb|-----------------------------------------L0.107-----------------------------------------|" - "L0.141[356,629] 1.03us 3mb|-----------------------------------------L0.141-----------------------------------------|" - - "L0.144[356,629] 1.03us 3mb|-----------------------------------------L0.144-----------------------------------------|" - - "L0.147[356,629] 1.03us 4mb|-----------------------------------------L0.147-----------------------------------------|" - - "L0.150[356,629] 1.03us 5mb|-----------------------------------------L0.150-----------------------------------------|" - - "L0.152[356,629] 1.04us 3mb|-----------------------------------------L0.152-----------------------------------------|" - - "L0.155[356,629] 1.04us 3mb|-----------------------------------------L0.155-----------------------------------------|" - - "L0.158[356,629] 1.04us 4mb|-----------------------------------------L0.158-----------------------------------------|" - - "L0.161[356,629] 1.04us 5mb|-----------------------------------------L0.161-----------------------------------------|" + - "L0.78[356,629] 1.03us 3mb|-----------------------------------------L0.78------------------------------------------|" + - "L0.180[356,629] 1.03us 4mb|-----------------------------------------L0.180-----------------------------------------|" + - "L0.109[356,629] 1.03us 5mb|-----------------------------------------L0.109-----------------------------------------|" + - "L0.144[356,629] 1.04us 3mb|-----------------------------------------L0.144-----------------------------------------|" + - "L0.81[356,629] 1.04us 3mb|-----------------------------------------L0.81------------------------------------------|" + - "L0.183[356,629] 1.04us 4mb|-----------------------------------------L0.183-----------------------------------------|" + - "L0.111[356,629] 1.04us 5mb|-----------------------------------------L0.111-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 72mb total:" - "L0, all files 72mb " - "L0.?[356,629] 1.04us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.108, L0.111, L0.114, L0.117, L0.119, L0.122, L0.125, L0.128, L0.130, L0.133, L0.136, L0.139, L0.141, L0.144, L0.147, L0.150, L0.152, L0.155, L0.158, L0.161" + - " Soft Deleting 20 files: L0.69, L0.72, L0.75, L0.78, L0.81, L0.103, L0.105, L0.107, L0.109, L0.111, L0.132, L0.135, L0.138, L0.141, L0.144, L0.171, L0.174, L0.177, L0.180, L0.183" - " Creating 1 files" - - "**** Simulation run 55, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[877]). 19 Input Files, 145mb total:" - - "L0 " - - "L0.189[630,986] 1.05us 4mb|-----------------------------------------L0.189-----------------------------------------|" - - "L0.186[630,932] 1.05us 4mb|----------------------------------L0.186----------------------------------| " - - "L0.181[630,950] 1.05us 4mb|------------------------------------L0.181------------------------------------| " - - "L0.178[630,986] 1.05us 4mb|-----------------------------------------L0.178-----------------------------------------|" - - "L0.175[630,932] 1.04us 4mb|----------------------------------L0.175----------------------------------| " - - "L0.170[630,950] 1.04us 4mb|------------------------------------L0.170------------------------------------| " - - "L0.167[630,986] 1.04us 4mb|-----------------------------------------L0.167-----------------------------------------|" - - "L0.164[630,932] 1.04us 4mb|----------------------------------L0.164----------------------------------| " - - "L0.159[630,950] 1.04us 4mb|------------------------------------L0.159------------------------------------| " - - "L0.156[630,986] 1.04us 4mb|-----------------------------------------L0.156-----------------------------------------|" - - "L0.153[630,932] 1.04us 4mb|----------------------------------L0.153----------------------------------| " - - "L0.148[630,950] 1.03us 4mb|------------------------------------L0.148------------------------------------| " - - "L0.145[630,986] 1.03us 4mb|-----------------------------------------L0.145-----------------------------------------|" + - "**** Simulation run 57, type=compact(ManySmallFiles). 20 Input Files, 76mb total:" + - "L0 " + - "L0.118[630,932] 1us 4mb |----------------------------------L0.118----------------------------------| " + - "L0.55[630,986] 1us 4mb |-----------------------------------------L0.55------------------------------------------|" + - "L0.157[630,950] 1us 4mb |------------------------------------L0.157------------------------------------| " + - "L0.121[630,932] 1us 4mb |----------------------------------L0.121----------------------------------| " + - "L0.58[630,986] 1us 4mb |-----------------------------------------L0.58------------------------------------------|" + - "L0.160[630,950] 1.01us 4mb|------------------------------------L0.160------------------------------------| " + - "L0.124[630,932] 1.01us 4mb|----------------------------------L0.124----------------------------------| " + - "L0.61[630,986] 1.01us 4mb|-----------------------------------------L0.61------------------------------------------|" + - "L0.163[630,950] 1.01us 4mb|------------------------------------L0.163------------------------------------| " + - "L0.127[630,932] 1.01us 4mb|----------------------------------L0.127----------------------------------| " + - "L0.64[630,986] 1.01us 4mb|-----------------------------------------L0.64------------------------------------------|" + - "L0.166[630,950] 1.01us 4mb|------------------------------------L0.166------------------------------------| " + - "L0.130[630,932] 1.02us 4mb|----------------------------------L0.130----------------------------------| " + - "L0.67[630,986] 1.02us 4mb|-----------------------------------------L0.67------------------------------------------|" + - "L0.169[630,950] 1.02us 4mb|------------------------------------L0.169------------------------------------| " + - "L0.133[630,932] 1.02us 4mb|----------------------------------L0.133----------------------------------| " + - "L0.70[630,986] 1.02us 4mb|-----------------------------------------L0.70------------------------------------------|" + - "L0.172[630,950] 1.02us 4mb|------------------------------------L0.172------------------------------------| " + - "L0.136[630,932] 1.02us 4mb|----------------------------------L0.136----------------------------------| " + - "L0.73[630,986] 1.02us 4mb|-----------------------------------------L0.73------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 76mb total:" + - "L0, all files 76mb " + - "L0.?[630,986] 1.02us |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.55, L0.58, L0.61, L0.64, L0.67, L0.70, L0.73, L0.118, L0.121, L0.124, L0.127, L0.130, L0.133, L0.136, L0.157, L0.160, L0.163, L0.166, L0.169, L0.172" + - " Creating 1 files" + - "**** Simulation run 58, type=compact(ManySmallFiles). 18 Input Files, 69mb total:" + - "L0 " + - "L0.175[630,950] 1.03us 4mb|------------------------------------L0.175------------------------------------| " + - "L0.139[630,932] 1.03us 4mb|----------------------------------L0.139----------------------------------| " + - "L0.76[630,986] 1.03us 4mb|-----------------------------------------L0.76------------------------------------------|" + - "L0.178[630,950] 1.03us 4mb|------------------------------------L0.178------------------------------------| " - "L0.142[630,932] 1.03us 4mb|----------------------------------L0.142----------------------------------| " - - "L0.137[630,950] 1.03us 4mb|------------------------------------L0.137------------------------------------| " - - "L0.134[630,986] 1.03us 4mb|-----------------------------------------L0.134-----------------------------------------|" - - "L0.131[630,932] 1.03us 4mb|----------------------------------L0.131----------------------------------| " - - "L0.126[630,950] 1.03us 4mb|------------------------------------L0.126------------------------------------| " - - "L0.192[630,986] 1.02us 76mb|-----------------------------------------L0.192-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 145mb total:" - - "L1 " - - "L1.?[630,877] 1.05us 100mb|----------------------------L1.?----------------------------| " - - "L1.?[878,986] 1.05us 44mb |----------L1.?-----------| " + - "L0.79[630,986] 1.03us 4mb|-----------------------------------------L0.79------------------------------------------|" + - "L0.181[630,950] 1.03us 4mb|------------------------------------L0.181------------------------------------| " + - "L0.145[630,932] 1.04us 4mb|----------------------------------L0.145----------------------------------| " + - "L0.82[630,986] 1.04us 4mb|-----------------------------------------L0.82------------------------------------------|" + - "L0.184[630,950] 1.04us 4mb|------------------------------------L0.184------------------------------------| " + - "L0.148[630,932] 1.04us 4mb|----------------------------------L0.148----------------------------------| " + - "L0.85[630,986] 1.04us 4mb|-----------------------------------------L0.85------------------------------------------|" + - "L0.187[630,950] 1.04us 4mb|------------------------------------L0.187------------------------------------| " + - "L0.151[630,932] 1.04us 4mb|----------------------------------L0.151----------------------------------| " + - "L0.88[630,986] 1.05us 4mb|-----------------------------------------L0.88------------------------------------------|" + - "L0.190[630,950] 1.05us 4mb|------------------------------------L0.190------------------------------------| " + - "L0.154[630,932] 1.05us 4mb|----------------------------------L0.154----------------------------------| " + - "L0.91[630,986] 1.05us 4mb|-----------------------------------------L0.91------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 69mb total:" + - "L0, all files 69mb " + - "L0.?[630,986] 1.05us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 19 files: L0.126, L0.131, L0.134, L0.137, L0.142, L0.145, L0.148, L0.153, L0.156, L0.159, L0.164, L0.167, L0.170, L0.175, L0.178, L0.181, L0.186, L0.189, L0.192" - - " Creating 2 files" - - "**** Simulation run 56, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[190]). 13 Input Files, 187mb total:" - - "L0 " - - "L0.187[42,355] 1.05us 3mb |-----------------------------------L0.187------------------------------------| " - - "L0.184[76,355] 1.05us 3mb |-------------------------------L0.184-------------------------------| " - - "L0.182[50,355] 1.05us 5mb |----------------------------------L0.182-----------------------------------| " - - "L0.179[173,355] 1.05us 2mb |-------------------L0.179-------------------| " - - "L0.176[42,355] 1.05us 3mb |-----------------------------------L0.176------------------------------------| " - - "L0.173[76,355] 1.04us 3mb |-------------------------------L0.173-------------------------------| " - - "L0.171[50,355] 1.04us 5mb |----------------------------------L0.171-----------------------------------| " - - "L0.168[173,355] 1.04us 2mb |-------------------L0.168-------------------| " - - "L0.165[42,355] 1.04us 3mb |-----------------------------------L0.165------------------------------------| " - - "L0.162[76,355] 1.04us 3mb |-------------------------------L0.162-------------------------------| " - - "L0.160[50,355] 1.04us 5mb |----------------------------------L0.160-----------------------------------| " - - "L0.190[0,355] 1.02us 76mb|-----------------------------------------L0.190-----------------------------------------|" - - "L0.193[42,355] 1.04us 71mb |-----------------------------------L0.193------------------------------------| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 187mb total:" + - " Soft Deleting 18 files: L0.76, L0.79, L0.82, L0.85, L0.88, L0.91, L0.139, L0.142, L0.145, L0.148, L0.151, L0.154, L0.175, L0.178, L0.181, L0.184, L0.187, L0.190" + - " Creating 1 files" + - "**** Simulation run 59, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[219]). 3 Input Files, 177mb total:" + - "L0 " + - "L0.193[42,355] 1.05us 35mb|-----------------------------------------L0.193-----------------------------------------|" + - "L0.192[42,355] 1.04us 71mb|-----------------------------------------L0.192-----------------------------------------|" + - "L0.191[42,355] 1.02us 71mb|-----------------------------------------L0.191-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 177mb total:" - "L1 " - - "L1.?[0,190] 1.05us 100mb |---------------------L1.?---------------------| " - - "L1.?[191,355] 1.05us 87mb |-----------------L1.?------------------| " + - "L1.?[42,219] 1.05us 100mb|----------------------L1.?----------------------| " + - "L1.?[220,355] 1.05us 77mb |----------------L1.?----------------| " - "Committing partition 1:" - - " Soft Deleting 13 files: L0.160, L0.162, L0.165, L0.168, L0.171, L0.173, L0.176, L0.179, L0.182, L0.184, L0.187, L0.190, L0.193" + - " Soft Deleting 3 files: L0.191, L0.192, L0.193" - " Creating 2 files" - - "**** Simulation run 57, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[510]). 12 Input Files, 178mb total:" - - "L0 " - - "L0.188[356,629] 1.05us 3mb|-----------------------------------------L0.188-----------------------------------------|" - - "L0.185[356,629] 1.05us 3mb|-----------------------------------------L0.185-----------------------------------------|" - - "L0.183[356,629] 1.05us 5mb|-----------------------------------------L0.183-----------------------------------------|" - - "L0.180[356,629] 1.05us 4mb|-----------------------------------------L0.180-----------------------------------------|" - - "L0.177[356,629] 1.05us 3mb|-----------------------------------------L0.177-----------------------------------------|" - - "L0.174[356,629] 1.04us 3mb|-----------------------------------------L0.174-----------------------------------------|" - - "L0.172[356,629] 1.04us 5mb|-----------------------------------------L0.172-----------------------------------------|" - - "L0.169[356,629] 1.04us 4mb|-----------------------------------------L0.169-----------------------------------------|" - - "L0.166[356,629] 1.04us 3mb|-----------------------------------------L0.166-----------------------------------------|" - - "L0.163[356,629] 1.04us 3mb|-----------------------------------------L0.163-----------------------------------------|" - - "L0.191[356,629] 1.02us 72mb|-----------------------------------------L0.191-----------------------------------------|" - - "L0.194[356,629] 1.04us 72mb|-----------------------------------------L0.194-----------------------------------------|" + - "**** Simulation run 60, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[510]). 3 Input Files, 178mb total:" + - "L0 " + - "L0.196[356,629] 1.04us 72mb|-----------------------------------------L0.196-----------------------------------------|" + - "L0.195[356,629] 1.02us 72mb|-----------------------------------------L0.195-----------------------------------------|" + - "L0.194[356,629] 1.05us 35mb|-----------------------------------------L0.194-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 178mb total:" - "L1 " - "L1.?[356,510] 1.05us 101mb|----------------------L1.?----------------------| " - "L1.?[511,629] 1.05us 77mb |----------------L1.?----------------| " - "Committing partition 1:" - - " Soft Deleting 12 files: L0.163, L0.166, L0.169, L0.172, L0.174, L0.177, L0.180, L0.183, L0.185, L0.188, L0.191, L0.194" + - " Soft Deleting 3 files: L0.194, L0.195, L0.196" + - " Creating 2 files" + - "**** Simulation run 61, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[877]). 2 Input Files, 145mb total:" + - "L0 " + - "L0.198[630,986] 1.05us 69mb|-----------------------------------------L0.198-----------------------------------------|" + - "L0.197[630,986] 1.02us 76mb|-----------------------------------------L0.197-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 145mb total:" + - "L1 " + - "L1.?[630,877] 1.05us 100mb|----------------------------L1.?----------------------------| " + - "L1.?[878,986] 1.05us 44mb |----------L1.?-----------| " + - "Committing partition 1:" + - " Soft Deleting 2 files: L0.197, L0.198" - " Creating 2 files" - - "**** Simulation run 58, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[964]). 1 Input Files, 44mb total:" + - "**** Simulation run 62, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[178, 356]). 4 Input Files, 288mb total:" + - "L1 " + - "L1.52[0,1] 999ns 10mb |L1.52| " + - "L1.199[42,219] 1.05us 100mb |-----------L1.199------------| " + - "L1.200[220,355] 1.05us 77mb |-------L1.200--------| " + - "L1.201[356,510] 1.05us 101mb |---------L1.201----------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 288mb total:" + - "L2 " + - "L2.?[0,178] 1.05us 101mb |------------L2.?-------------| " + - "L2.?[179,356] 1.05us 100mb |------------L2.?-------------| " + - "L2.?[357,510] 1.05us 87mb |----------L2.?-----------|" + - "Committing partition 1:" + - " Soft Deleting 4 files: L1.52, L1.199, L1.200, L1.201" + - " Creating 3 files" + - "**** Simulation run 63, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[964]). 1 Input Files, 44mb total:" - "L1, all files 44mb " - - "L1.196[878,986] 1.05us |-----------------------------------------L1.196-----------------------------------------|" + - "L1.204[878,986] 1.05us |-----------------------------------------L1.204-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 44mb total:" - "L2 " - "L2.?[878,964] 1.05us 35mb|--------------------------------L2.?---------------------------------| " - "L2.?[965,986] 1.05us 9mb |-----L2.?------| " - "Committing partition 1:" - - " Soft Deleting 1 files: L1.196" - - " Upgrading 5 files level to CompactionLevel::L2: L1.195, L1.197, L1.198, L1.199, L1.200" + - " Soft Deleting 1 files: L1.204" + - " Upgrading 2 files level to CompactionLevel::L2: L1.202, L1.203" - " Creating 2 files" - - "**** Final Output Files (1.39gb written)" + - "**** Final Output Files (1.8gb written)" - "L2 " - - "L2.195[630,877] 1.05us 100mb |-------L2.195-------| " - - "L2.197[0,190] 1.05us 100mb|----L2.197-----| " - - "L2.198[191,355] 1.05us 87mb |---L2.198---| " - - "L2.199[356,510] 1.05us 101mb |---L2.199---| " - - "L2.200[511,629] 1.05us 77mb |-L2.200-| " - - "L2.201[878,964] 1.05us 35mb |L2.201| " - - "L2.202[965,986] 1.05us 9mb |L2.202|" + - "L2.202[511,629] 1.05us 77mb |-L2.202-| " + - "L2.203[630,877] 1.05us 100mb |-------L2.203-------| " + - "L2.205[0,178] 1.05us 101mb|----L2.205----| " + - "L2.206[179,356] 1.05us 100mb |----L2.206----| " + - "L2.207[357,510] 1.05us 87mb |--L2.207---| " + - "L2.208[878,964] 1.05us 35mb |L2.208| " + - "L2.209[965,986] 1.05us 9mb |L2.209|" "### ); } @@ -912,15 +961,15 @@ async fn random_backfill_over_l2s() { - "L2.10[900,999] 999ns 100mb |L2.10-| " - "**** Simulation run 0, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.11[76,932] 1us |-----------------------------------------L0.11------------------------------------------|" + - "L0.12[42,986] 1us |-----------------------------------------L0.12------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1us 4mb |------------L0.?-------------| " + - "L0.?[42,355] 1us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1us 4mb |-------------L0.?--------------| " - "**** Simulation run 1, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.12[42,986] 1us |-----------------------------------------L0.12------------------------------------------|" + - "L0.16[42,986] 1us |-----------------------------------------L0.16------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - "L0.?[42,355] 1us 3mb |-----------L0.?------------| " @@ -928,239 +977,235 @@ async fn random_backfill_over_l2s() { - "L0.?[630,986] 1us 4mb |-------------L0.?--------------| " - "**** Simulation run 2, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.13[173,950] 1us |-----------------------------------------L0.13------------------------------------------|" + - "L0.20[42,986] 1.01us |-----------------------------------------L0.20------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1us 4mb |---------------L0.?----------------| " - - "**** Simulation run 3, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[42,355] 1.01us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.01us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.01us 4mb |-------------L0.?--------------| " + - "**** Simulation run 3, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.14[50,629] 1us |-----------------------------------------L0.14------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.24[42,986] 1.01us |-----------------------------------------L0.24------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1us 5mb |------------------L0.?------------------| " + - "L0.?[42,355] 1.01us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.01us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.01us 4mb |-------------L0.?--------------| " - "**** Simulation run 4, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.15[76,932] 1us |-----------------------------------------L0.15------------------------------------------|" + - "L0.28[42,986] 1.02us |-----------------------------------------L0.28------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1us 4mb |------------L0.?-------------| " + - "L0.?[42,355] 1.02us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.02us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.02us 4mb |-------------L0.?--------------| " - "**** Simulation run 5, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.16[42,986] 1us |-----------------------------------------L0.16------------------------------------------|" + - "L0.32[42,986] 1.02us |-----------------------------------------L0.32------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1us 4mb |-------------L0.?--------------| " + - "L0.?[42,355] 1.02us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.02us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.02us 4mb |-------------L0.?--------------| " - "**** Simulation run 6, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.17[173,950] 1.01us |-----------------------------------------L0.17------------------------------------------|" + - "L0.36[42,986] 1.02us |-----------------------------------------L0.36------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.01us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.01us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.01us 4mb |---------------L0.?----------------| " - - "**** Simulation run 7, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[42,355] 1.02us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.02us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.02us 4mb |-------------L0.?--------------| " + - "**** Simulation run 7, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.18[50,629] 1.01us |-----------------------------------------L0.18------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.40[42,986] 1.03us |-----------------------------------------L0.40------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.01us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.01us 5mb |------------------L0.?------------------| " + - "L0.?[42,355] 1.03us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.03us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.03us 4mb |-------------L0.?--------------| " - "**** Simulation run 8, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.19[76,932] 1.01us |-----------------------------------------L0.19------------------------------------------|" + - "L0.44[42,986] 1.03us |-----------------------------------------L0.44------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.01us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.01us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.01us 4mb |------------L0.?-------------| " + - "L0.?[42,355] 1.03us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.03us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.03us 4mb |-------------L0.?--------------| " - "**** Simulation run 9, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.20[42,986] 1.01us |-----------------------------------------L0.20------------------------------------------|" + - "L0.48[42,986] 1.04us |-----------------------------------------L0.48------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.01us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.01us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.01us 4mb |-------------L0.?--------------| " + - "L0.?[42,355] 1.04us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.04us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.04us 4mb |-------------L0.?--------------| " - "**** Simulation run 10, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.21[173,950] 1.01us |-----------------------------------------L0.21------------------------------------------|" + - "L0.52[42,986] 1.04us |-----------------------------------------L0.52------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.01us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.01us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.01us 4mb |---------------L0.?----------------| " - - "**** Simulation run 11, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[42,355] 1.04us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.04us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.04us 4mb |-------------L0.?--------------| " + - "**** Simulation run 11, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.22[50,629] 1.01us |-----------------------------------------L0.22------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.56[42,986] 1.05us |-----------------------------------------L0.56------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.01us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.01us 5mb |------------------L0.?------------------| " + - "L0.?[42,355] 1.05us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.05us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.05us 4mb |-------------L0.?--------------| " - "**** Simulation run 12, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.23[76,932] 1.01us |-----------------------------------------L0.23------------------------------------------|" + - "L0.60[42,986] 1.05us |-----------------------------------------L0.60------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.01us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.01us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.01us 4mb |------------L0.?-------------| " - - "**** Simulation run 13, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "L0.?[42,355] 1.05us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.05us 3mb |----------L0.?----------| " + - "L0.?[630,986] 1.05us 4mb |-------------L0.?--------------| " + - "**** Simulation run 13, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.24[42,986] 1.01us |-----------------------------------------L0.24------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.14[50,629] 1us |-----------------------------------------L0.14------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.01us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.01us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.01us 4mb |-------------L0.?--------------| " - - "**** Simulation run 14, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "L0.?[50,355] 1us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1us 5mb |------------------L0.?------------------| " + - "**** Simulation run 14, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.25[173,950] 1.01us |-----------------------------------------L0.25------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.18[50,629] 1.01us |-----------------------------------------L0.18------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.01us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.01us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.01us 4mb |---------------L0.?----------------| " + - "L0.?[50,355] 1.01us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.01us 5mb |------------------L0.?------------------| " - "**** Simulation run 15, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.26[50,629] 1.01us |-----------------------------------------L0.26------------------------------------------|" + - "L0.22[50,629] 1.01us |-----------------------------------------L0.22------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - "L0.?[50,355] 1.01us 5mb |--------------------L0.?---------------------| " - "L0.?[356,629] 1.01us 5mb |------------------L0.?------------------| " - - "**** Simulation run 16, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "**** Simulation run 16, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.27[76,932] 1.02us |-----------------------------------------L0.27------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.26[50,629] 1.01us |-----------------------------------------L0.26------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.02us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.02us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.02us 4mb |------------L0.?-------------| " - - "**** Simulation run 17, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "L0.?[50,355] 1.01us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.01us 5mb |------------------L0.?------------------| " + - "**** Simulation run 17, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.28[42,986] 1.02us |-----------------------------------------L0.28------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.30[50,629] 1.02us |-----------------------------------------L0.30------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.02us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.02us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.02us 4mb |-------------L0.?--------------| " - - "**** Simulation run 18, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "L0.?[50,355] 1.02us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.02us 5mb |------------------L0.?------------------| " + - "**** Simulation run 18, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.29[173,950] 1.02us |-----------------------------------------L0.29------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.34[50,629] 1.02us |-----------------------------------------L0.34------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.02us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.02us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.02us 4mb |---------------L0.?----------------| " + - "L0.?[50,355] 1.02us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.02us 5mb |------------------L0.?------------------| " - "**** Simulation run 19, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.30[50,629] 1.02us |-----------------------------------------L0.30------------------------------------------|" + - "L0.38[50,629] 1.03us |-----------------------------------------L0.38------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.02us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.02us 5mb |------------------L0.?------------------| " - - "**** Simulation run 20, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "L0.?[50,355] 1.03us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.03us 5mb |------------------L0.?------------------| " + - "**** Simulation run 20, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.31[76,932] 1.02us |-----------------------------------------L0.31------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.42[50,629] 1.03us |-----------------------------------------L0.42------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.02us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.02us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.02us 4mb |------------L0.?-------------| " - - "**** Simulation run 21, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "L0.?[50,355] 1.03us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.03us 5mb |------------------L0.?------------------| " + - "**** Simulation run 21, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.32[42,986] 1.02us |-----------------------------------------L0.32------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.46[50,629] 1.03us |-----------------------------------------L0.46------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.02us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.02us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.02us 4mb |-------------L0.?--------------| " - - "**** Simulation run 22, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "L0.?[50,355] 1.03us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.03us 5mb |------------------L0.?------------------| " + - "**** Simulation run 22, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.33[173,950] 1.02us |-----------------------------------------L0.33------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.50[50,629] 1.04us |-----------------------------------------L0.50------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.02us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.02us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.02us 4mb |---------------L0.?----------------| " + - "L0.?[50,355] 1.04us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.04us 5mb |------------------L0.?------------------| " - "**** Simulation run 23, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.34[50,629] 1.02us |-----------------------------------------L0.34------------------------------------------|" + - "L0.54[50,629] 1.04us |-----------------------------------------L0.54------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.02us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.02us 5mb |------------------L0.?------------------| " - - "**** Simulation run 24, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" + - "L0.?[50,355] 1.04us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.04us 5mb |------------------L0.?------------------| " + - "**** Simulation run 24, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.35[76,932] 1.02us |-----------------------------------------L0.35------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.58[50,629] 1.05us |-----------------------------------------L0.58------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.02us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.02us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.02us 4mb |------------L0.?-------------| " + - "L0.?[50,355] 1.05us 5mb |--------------------L0.?---------------------| " + - "L0.?[356,629] 1.05us 5mb |------------------L0.?------------------| " - "**** Simulation run 25, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.36[42,986] 1.02us |-----------------------------------------L0.36------------------------------------------|" + - "L0.11[76,932] 1us |-----------------------------------------L0.11------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.02us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.02us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.02us 4mb |-------------L0.?--------------| " + - "L0.?[76,355] 1us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1us 4mb |------------L0.?-------------| " - "**** Simulation run 26, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.37[173,950] 1.03us |-----------------------------------------L0.37------------------------------------------|" + - "L0.15[76,932] 1us |-----------------------------------------L0.15------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.03us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.03us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.03us 4mb |---------------L0.?----------------| " - - "**** Simulation run 27, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[76,355] 1us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1us 4mb |------------L0.?-------------| " + - "**** Simulation run 27, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.38[50,629] 1.03us |-----------------------------------------L0.38------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.19[76,932] 1.01us |-----------------------------------------L0.19------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.03us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.03us 5mb |------------------L0.?------------------| " + - "L0.?[76,355] 1.01us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.01us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.01us 4mb |------------L0.?-------------| " - "**** Simulation run 28, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.39[76,932] 1.03us |-----------------------------------------L0.39------------------------------------------|" + - "L0.23[76,932] 1.01us |-----------------------------------------L0.23------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.03us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.03us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.03us 4mb |------------L0.?-------------| " + - "L0.?[76,355] 1.01us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.01us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.01us 4mb |------------L0.?-------------| " - "**** Simulation run 29, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.40[42,986] 1.03us |-----------------------------------------L0.40------------------------------------------|" + - "L0.27[76,932] 1.02us |-----------------------------------------L0.27------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.03us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.03us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.03us 4mb |-------------L0.?--------------| " + - "L0.?[76,355] 1.02us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.02us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.02us 4mb |------------L0.?-------------| " - "**** Simulation run 30, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.41[173,950] 1.03us |-----------------------------------------L0.41------------------------------------------|" + - "L0.31[76,932] 1.02us |-----------------------------------------L0.31------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.03us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.03us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.03us 4mb |---------------L0.?----------------| " - - "**** Simulation run 31, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[76,355] 1.02us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.02us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.02us 4mb |------------L0.?-------------| " + - "**** Simulation run 31, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.42[50,629] 1.03us |-----------------------------------------L0.42------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.35[76,932] 1.02us |-----------------------------------------L0.35------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.03us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.03us 5mb |------------------L0.?------------------| " + - "L0.?[76,355] 1.02us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.02us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.02us 4mb |------------L0.?-------------| " - "**** Simulation run 32, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.43[76,932] 1.03us |-----------------------------------------L0.43------------------------------------------|" + - "L0.39[76,932] 1.03us |-----------------------------------------L0.39------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - "L0.?[76,355] 1.03us 3mb |-----------L0.?------------| " @@ -1168,30 +1213,31 @@ async fn random_backfill_over_l2s() { - "L0.?[630,932] 1.03us 4mb |------------L0.?-------------| " - "**** Simulation run 33, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.44[42,986] 1.03us |-----------------------------------------L0.44------------------------------------------|" + - "L0.43[76,932] 1.03us |-----------------------------------------L0.43------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.03us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.03us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.03us 4mb |-------------L0.?--------------| " + - "L0.?[76,355] 1.03us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.03us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.03us 4mb |------------L0.?-------------| " - "**** Simulation run 34, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.45[173,950] 1.03us |-----------------------------------------L0.45------------------------------------------|" + - "L0.47[76,932] 1.04us |-----------------------------------------L0.47------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.03us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.03us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.03us 4mb |---------------L0.?----------------| " - - "**** Simulation run 35, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[76,355] 1.04us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.04us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.04us 4mb |------------L0.?-------------| " + - "**** Simulation run 35, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.46[50,629] 1.03us |-----------------------------------------L0.46------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.51[76,932] 1.04us |-----------------------------------------L0.51------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.03us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.03us 5mb |------------------L0.?------------------| " + - "L0.?[76,355] 1.04us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.04us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.04us 4mb |------------L0.?-------------| " - "**** Simulation run 36, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.47[76,932] 1.04us |-----------------------------------------L0.47------------------------------------------|" + - "L0.55[76,932] 1.04us |-----------------------------------------L0.55------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - "L0.?[76,355] 1.04us 3mb |-----------L0.?------------| " @@ -1199,454 +1245,457 @@ async fn random_backfill_over_l2s() { - "L0.?[630,932] 1.04us 4mb |------------L0.?-------------| " - "**** Simulation run 37, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.48[42,986] 1.04us |-----------------------------------------L0.48------------------------------------------|" + - "L0.59[76,932] 1.05us |-----------------------------------------L0.59------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.04us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.04us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.04us 4mb |-------------L0.?--------------| " + - "L0.?[76,355] 1.05us 3mb |-----------L0.?------------| " + - "L0.?[356,629] 1.05us 3mb |-----------L0.?-----------| " + - "L0.?[630,932] 1.05us 4mb |------------L0.?-------------| " - "**** Simulation run 38, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.49[173,950] 1.04us |-----------------------------------------L0.49------------------------------------------|" + - "L0.13[173,950] 1us |-----------------------------------------L0.13------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.04us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.04us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.04us 4mb |---------------L0.?----------------| " - - "**** Simulation run 39, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[173,355] 1us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1us 4mb |---------------L0.?----------------| " + - "**** Simulation run 39, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.50[50,629] 1.04us |-----------------------------------------L0.50------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.17[173,950] 1.01us |-----------------------------------------L0.17------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.04us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.04us 5mb |------------------L0.?------------------| " + - "L0.?[173,355] 1.01us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.01us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.01us 4mb |---------------L0.?----------------| " - "Committing partition 1:" - - " Soft Deleting 40 files: L0.11, L0.12, L0.13, L0.14, L0.15, L0.16, L0.17, L0.18, L0.19, L0.20, L0.21, L0.22, L0.23, L0.24, L0.25, L0.26, L0.27, L0.28, L0.29, L0.30, L0.31, L0.32, L0.33, L0.34, L0.35, L0.36, L0.37, L0.38, L0.39, L0.40, L0.41, L0.42, L0.43, L0.44, L0.45, L0.46, L0.47, L0.48, L0.49, L0.50" - - " Creating 110 files" + - " Soft Deleting 40 files: L0.11, L0.12, L0.13, L0.14, L0.15, L0.16, L0.17, L0.18, L0.19, L0.20, L0.22, L0.23, L0.24, L0.26, L0.27, L0.28, L0.30, L0.31, L0.32, L0.34, L0.35, L0.36, L0.38, L0.39, L0.40, L0.42, L0.43, L0.44, L0.46, L0.47, L0.48, L0.50, L0.51, L0.52, L0.54, L0.55, L0.56, L0.58, L0.59, L0.60" + - " Creating 108 files" - "**** Simulation run 40, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.51[76,932] 1.04us |-----------------------------------------L0.51------------------------------------------|" + - "L0.21[173,950] 1.01us |-----------------------------------------L0.21------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.04us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.04us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.04us 4mb |------------L0.?-------------| " + - "L0.?[173,355] 1.01us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.01us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.01us 4mb |---------------L0.?----------------| " - "**** Simulation run 41, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.52[42,986] 1.04us |-----------------------------------------L0.52------------------------------------------|" + - "L0.25[173,950] 1.01us |-----------------------------------------L0.25------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.04us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.04us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.04us 4mb |-------------L0.?--------------| " + - "L0.?[173,355] 1.01us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.01us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.01us 4mb |---------------L0.?----------------| " - "**** Simulation run 42, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.53[173,950] 1.04us |-----------------------------------------L0.53------------------------------------------|" + - "L0.29[173,950] 1.02us |-----------------------------------------L0.29------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.04us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.04us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.04us 4mb |---------------L0.?----------------| " - - "**** Simulation run 43, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[173,355] 1.02us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.02us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.02us 4mb |---------------L0.?----------------| " + - "**** Simulation run 43, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.54[50,629] 1.04us |-----------------------------------------L0.54------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.33[173,950] 1.02us |-----------------------------------------L0.33------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.04us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.04us 5mb |------------------L0.?------------------| " + - "L0.?[173,355] 1.02us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.02us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.02us 4mb |---------------L0.?----------------| " - "**** Simulation run 44, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.55[76,932] 1.04us |-----------------------------------------L0.55------------------------------------------|" + - "L0.37[173,950] 1.03us |-----------------------------------------L0.37------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.04us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.04us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.04us 4mb |------------L0.?-------------| " + - "L0.?[173,355] 1.03us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.03us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.03us 4mb |---------------L0.?----------------| " - "**** Simulation run 45, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.56[42,986] 1.05us |-----------------------------------------L0.56------------------------------------------|" + - "L0.41[173,950] 1.03us |-----------------------------------------L0.41------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.05us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.05us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.05us 4mb |-------------L0.?--------------| " + - "L0.?[173,355] 1.03us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.03us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.03us 4mb |---------------L0.?----------------| " - "**** Simulation run 46, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.57[173,950] 1.05us |-----------------------------------------L0.57------------------------------------------|" + - "L0.45[173,950] 1.03us |-----------------------------------------L0.45------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[173,355] 1.05us 2mb |-------L0.?--------| " - - "L0.?[356,629] 1.05us 4mb |------------L0.?-------------| " - - "L0.?[630,950] 1.05us 4mb |---------------L0.?----------------| " - - "**** Simulation run 47, type=split(VerticalSplit)(split_times=[355]). 1 Input Files, 10mb total:" + - "L0.?[173,355] 1.03us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.03us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.03us 4mb |---------------L0.?----------------| " + - "**** Simulation run 47, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.58[50,629] 1.05us |-----------------------------------------L0.58------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L0.49[173,950] 1.04us |-----------------------------------------L0.49------------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[50,355] 1.05us 5mb |--------------------L0.?---------------------| " - - "L0.?[356,629] 1.05us 5mb |------------------L0.?------------------| " + - "L0.?[173,355] 1.04us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.04us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.04us 4mb |---------------L0.?----------------| " - "**** Simulation run 48, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.59[76,932] 1.05us |-----------------------------------------L0.59------------------------------------------|" + - "L0.53[173,950] 1.04us |-----------------------------------------L0.53------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[76,355] 1.05us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.05us 3mb |-----------L0.?-----------| " - - "L0.?[630,932] 1.05us 4mb |------------L0.?-------------| " + - "L0.?[173,355] 1.04us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.04us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.04us 4mb |---------------L0.?----------------| " - "**** Simulation run 49, type=split(VerticalSplit)(split_times=[355, 629]). 1 Input Files, 10mb total:" - "L0, all files 10mb " - - "L0.60[42,986] 1.05us |-----------------------------------------L0.60------------------------------------------|" + - "L0.57[173,950] 1.05us |-----------------------------------------L0.57------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L0 " - - "L0.?[42,355] 1.05us 3mb |-----------L0.?------------| " - - "L0.?[356,629] 1.05us 3mb |----------L0.?----------| " - - "L0.?[630,986] 1.05us 4mb |-------------L0.?--------------| " + - "L0.?[173,355] 1.05us 2mb |-------L0.?--------| " + - "L0.?[356,629] 1.05us 4mb |------------L0.?-------------| " + - "L0.?[630,950] 1.05us 4mb |---------------L0.?----------------| " - "Committing partition 1:" - - " Soft Deleting 10 files: L0.51, L0.52, L0.53, L0.54, L0.55, L0.56, L0.57, L0.58, L0.59, L0.60" - - " Creating 28 files" + - " Soft Deleting 10 files: L0.21, L0.25, L0.29, L0.33, L0.37, L0.41, L0.45, L0.49, L0.53, L0.57" + - " Creating 30 files" - "**** Simulation run 50, type=compact(ManySmallFiles). 10 Input Files, 35mb total:" - "L0 " - - "L0.61[76,355] 1us 3mb |------------------------------------L0.61-------------------------------------| " + - "L0.124[76,355] 1us 3mb |------------------------------------L0.124------------------------------------| " + - "L0.61[42,355] 1us 3mb |-----------------------------------------L0.61------------------------------------------|" + - "L0.163[173,355] 1us 2mb |----------------------L0.163----------------------| " + - "L0.100[50,355] 1us 5mb |---------------------------------------L0.100----------------------------------------| " + - "L0.127[76,355] 1us 3mb |------------------------------------L0.127------------------------------------| " - "L0.64[42,355] 1us 3mb |-----------------------------------------L0.64------------------------------------------|" - - "L0.67[173,355] 1us 2mb |----------------------L0.67-----------------------| " - - "L0.70[50,355] 1us 5mb |----------------------------------------L0.70----------------------------------------| " - - "L0.72[76,355] 1us 3mb |------------------------------------L0.72-------------------------------------| " - - "L0.75[42,355] 1us 3mb |-----------------------------------------L0.75------------------------------------------|" - - "L0.78[173,355] 1.01us 2mb |----------------------L0.78-----------------------| " - - "L0.81[50,355] 1.01us 5mb |----------------------------------------L0.81----------------------------------------| " - - "L0.83[76,355] 1.01us 3mb |------------------------------------L0.83-------------------------------------| " - - "L0.86[42,355] 1.01us 3mb |-----------------------------------------L0.86------------------------------------------|" + - "L0.166[173,355] 1.01us 2mb |----------------------L0.166----------------------| " + - "L0.102[50,355] 1.01us 5mb |---------------------------------------L0.102----------------------------------------| " + - "L0.130[76,355] 1.01us 3mb |------------------------------------L0.130------------------------------------| " + - "L0.67[42,355] 1.01us 3mb |-----------------------------------------L0.67------------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 35mb total:" - "L0, all files 35mb " - "L0.?[42,355] 1.01us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 10 files: L0.61, L0.64, L0.67, L0.70, L0.72, L0.75, L0.78, L0.81, L0.83, L0.86" + - " Soft Deleting 10 files: L0.61, L0.64, L0.67, L0.100, L0.102, L0.124, L0.127, L0.130, L0.163, L0.166" - " Creating 1 files" - - "**** Simulation run 51, type=compact(ManySmallFiles). 10 Input Files, 35mb total:" - - "L0 " - - "L0.62[356,629] 1us 3mb |-----------------------------------------L0.62------------------------------------------|" - - "L0.65[356,629] 1us 3mb |-----------------------------------------L0.65------------------------------------------|" - - "L0.68[356,629] 1us 4mb |-----------------------------------------L0.68------------------------------------------|" - - "L0.71[356,629] 1us 5mb |-----------------------------------------L0.71------------------------------------------|" - - "L0.73[356,629] 1us 3mb |-----------------------------------------L0.73------------------------------------------|" - - "L0.76[356,629] 1us 3mb |-----------------------------------------L0.76------------------------------------------|" - - "L0.79[356,629] 1.01us 4mb|-----------------------------------------L0.79------------------------------------------|" - - "L0.82[356,629] 1.01us 5mb|-----------------------------------------L0.82------------------------------------------|" - - "L0.84[356,629] 1.01us 3mb|-----------------------------------------L0.84------------------------------------------|" - - "L0.87[356,629] 1.01us 3mb|-----------------------------------------L0.87------------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 35mb total:" - - "L0, all files 35mb " - - "L0.?[356,629] 1.01us |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 51, type=compact(ManySmallFiles). 10 Input Files, 36mb total:" + - "L0 " + - "L0.169[173,355] 1.01us 2mb |----------------------L0.169----------------------| " + - "L0.104[50,355] 1.01us 5mb |---------------------------------------L0.104----------------------------------------| " + - "L0.133[76,355] 1.01us 3mb |------------------------------------L0.133------------------------------------| " + - "L0.70[42,355] 1.01us 3mb |-----------------------------------------L0.70------------------------------------------|" + - "L0.172[173,355] 1.01us 2mb |----------------------L0.172----------------------| " + - "L0.106[50,355] 1.01us 5mb |---------------------------------------L0.106----------------------------------------| " + - "L0.136[76,355] 1.02us 3mb |------------------------------------L0.136------------------------------------| " + - "L0.73[42,355] 1.02us 3mb |-----------------------------------------L0.73------------------------------------------|" + - "L0.175[173,355] 1.02us 2mb |----------------------L0.175----------------------| " + - "L0.108[50,355] 1.02us 5mb |---------------------------------------L0.108----------------------------------------| " + - "**** 1 Output Files (parquet_file_id not yet assigned), 36mb total:" + - "L0, all files 36mb " + - "L0.?[42,355] 1.02us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 10 files: L0.62, L0.65, L0.68, L0.71, L0.73, L0.76, L0.79, L0.82, L0.84, L0.87" + - " Soft Deleting 10 files: L0.70, L0.73, L0.104, L0.106, L0.108, L0.133, L0.136, L0.169, L0.172, L0.175" - " Creating 1 files" - - "**** Simulation run 52, type=compact(ManySmallFiles). 10 Input Files, 38mb total:" - - "L0 " - - "L0.63[630,932] 1us 4mb |----------------------------------L0.63-----------------------------------| " - - "L0.66[630,986] 1us 4mb |-----------------------------------------L0.66------------------------------------------|" - - "L0.69[630,950] 1us 4mb |------------------------------------L0.69-------------------------------------| " - - "L0.74[630,932] 1us 4mb |----------------------------------L0.74-----------------------------------| " - - "L0.77[630,986] 1us 4mb |-----------------------------------------L0.77------------------------------------------|" - - "L0.80[630,950] 1.01us 4mb|------------------------------------L0.80-------------------------------------| " - - "L0.85[630,932] 1.01us 4mb|----------------------------------L0.85-----------------------------------| " - - "L0.88[630,986] 1.01us 4mb|-----------------------------------------L0.88------------------------------------------|" - - "L0.91[630,950] 1.01us 4mb|------------------------------------L0.91-------------------------------------| " - - "L0.96[630,932] 1.01us 4mb|----------------------------------L0.96-----------------------------------| " - - "**** 1 Output Files (parquet_file_id not yet assigned), 38mb total:" - - "L0, all files 38mb " - - "L0.?[630,986] 1.01us |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 52, type=compact(ManySmallFiles). 10 Input Files, 35mb total:" + - "L0 " + - "L0.139[76,355] 1.02us 3mb |------------------------------------L0.139------------------------------------| " + - "L0.76[42,355] 1.02us 3mb |-----------------------------------------L0.76------------------------------------------|" + - "L0.178[173,355] 1.02us 2mb |----------------------L0.178----------------------| " + - "L0.110[50,355] 1.02us 5mb |---------------------------------------L0.110----------------------------------------| " + - "L0.142[76,355] 1.02us 3mb |------------------------------------L0.142------------------------------------| " + - "L0.79[42,355] 1.02us 3mb |-----------------------------------------L0.79------------------------------------------|" + - "L0.181[173,355] 1.03us 2mb |----------------------L0.181----------------------| " + - "L0.112[50,355] 1.03us 5mb |---------------------------------------L0.112----------------------------------------| " + - "L0.145[76,355] 1.03us 3mb |------------------------------------L0.145------------------------------------| " + - "L0.82[42,355] 1.03us 3mb |-----------------------------------------L0.82------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 35mb total:" + - "L0, all files 35mb " + - "L0.?[42,355] 1.03us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 10 files: L0.63, L0.66, L0.69, L0.74, L0.77, L0.80, L0.85, L0.88, L0.91, L0.96" + - " Soft Deleting 10 files: L0.76, L0.79, L0.82, L0.110, L0.112, L0.139, L0.142, L0.145, L0.178, L0.181" - " Creating 1 files" - - "**** Simulation run 53, type=compact(ManySmallFiles). 10 Input Files, 66mb total:" - - "L0 " - - "L0.199[42,355] 1.01us 35mb|-----------------------------------------L0.199-----------------------------------------|" - - "L0.89[173,355] 1.01us 2mb |----------------------L0.89-----------------------| " - - "L0.92[50,355] 1.01us 5mb |----------------------------------------L0.92----------------------------------------| " - - "L0.94[76,355] 1.01us 3mb |------------------------------------L0.94-------------------------------------| " - - "L0.97[42,355] 1.01us 3mb |-----------------------------------------L0.97------------------------------------------|" - - "L0.100[173,355] 1.01us 2mb |----------------------L0.100----------------------| " - - "L0.103[50,355] 1.01us 5mb |---------------------------------------L0.103----------------------------------------| " - - "L0.105[76,355] 1.02us 3mb |------------------------------------L0.105------------------------------------| " - - "L0.108[42,355] 1.02us 3mb|-----------------------------------------L0.108-----------------------------------------|" - - "L0.111[173,355] 1.02us 2mb |----------------------L0.111----------------------| " - - "**** 1 Output Files (parquet_file_id not yet assigned), 66mb total:" - - "L0, all files 66mb " - - "L0.?[42,355] 1.02us |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 53, type=compact(ManySmallFiles). 10 Input Files, 36mb total:" + - "L0 " + - "L0.184[173,355] 1.03us 2mb |----------------------L0.184----------------------| " + - "L0.114[50,355] 1.03us 5mb |---------------------------------------L0.114----------------------------------------| " + - "L0.148[76,355] 1.03us 3mb |------------------------------------L0.148------------------------------------| " + - "L0.85[42,355] 1.03us 3mb |-----------------------------------------L0.85------------------------------------------|" + - "L0.187[173,355] 1.03us 2mb |----------------------L0.187----------------------| " + - "L0.116[50,355] 1.03us 5mb |---------------------------------------L0.116----------------------------------------| " + - "L0.151[76,355] 1.04us 3mb |------------------------------------L0.151------------------------------------| " + - "L0.88[42,355] 1.04us 3mb |-----------------------------------------L0.88------------------------------------------|" + - "L0.190[173,355] 1.04us 2mb |----------------------L0.190----------------------| " + - "L0.118[50,355] 1.04us 5mb |---------------------------------------L0.118----------------------------------------| " + - "**** 1 Output Files (parquet_file_id not yet assigned), 36mb total:" + - "L0, all files 36mb " + - "L0.?[42,355] 1.04us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 10 files: L0.89, L0.92, L0.94, L0.97, L0.100, L0.103, L0.105, L0.108, L0.111, L0.199" + - " Soft Deleting 10 files: L0.85, L0.88, L0.114, L0.116, L0.118, L0.148, L0.151, L0.184, L0.187, L0.190" - " Creating 1 files" - - "**** Simulation run 54, type=compact(ManySmallFiles). 10 Input Files, 67mb total:" - - "L0 " - - "L0.200[356,629] 1.01us 35mb|-----------------------------------------L0.200-----------------------------------------|" - - "L0.90[356,629] 1.01us 4mb|-----------------------------------------L0.90------------------------------------------|" - - "L0.93[356,629] 1.01us 5mb|-----------------------------------------L0.93------------------------------------------|" - - "L0.95[356,629] 1.01us 3mb|-----------------------------------------L0.95------------------------------------------|" - - "L0.98[356,629] 1.01us 3mb|-----------------------------------------L0.98------------------------------------------|" - - "L0.101[356,629] 1.01us 4mb|-----------------------------------------L0.101-----------------------------------------|" - - "L0.104[356,629] 1.01us 5mb|-----------------------------------------L0.104-----------------------------------------|" - - "L0.106[356,629] 1.02us 3mb|-----------------------------------------L0.106-----------------------------------------|" - - "L0.109[356,629] 1.02us 3mb|-----------------------------------------L0.109-----------------------------------------|" - - "L0.112[356,629] 1.02us 4mb|-----------------------------------------L0.112-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 67mb total:" - - "L0, all files 67mb " - - "L0.?[356,629] 1.02us |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 54, type=compact(ManySmallFiles). 10 Input Files, 35mb total:" + - "L0 " + - "L0.154[76,355] 1.04us 3mb |------------------------------------L0.154------------------------------------| " + - "L0.91[42,355] 1.04us 3mb |-----------------------------------------L0.91------------------------------------------|" + - "L0.193[173,355] 1.04us 2mb |----------------------L0.193----------------------| " + - "L0.120[50,355] 1.04us 5mb |---------------------------------------L0.120----------------------------------------| " + - "L0.157[76,355] 1.04us 3mb |------------------------------------L0.157------------------------------------| " + - "L0.94[42,355] 1.05us 3mb |-----------------------------------------L0.94------------------------------------------|" + - "L0.196[173,355] 1.05us 2mb |----------------------L0.196----------------------| " + - "L0.122[50,355] 1.05us 5mb |---------------------------------------L0.122----------------------------------------| " + - "L0.160[76,355] 1.05us 3mb |------------------------------------L0.160------------------------------------| " + - "L0.97[42,355] 1.05us 3mb |-----------------------------------------L0.97------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 35mb total:" + - "L0, all files 35mb " + - "L0.?[42,355] 1.05us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 10 files: L0.90, L0.93, L0.95, L0.98, L0.101, L0.104, L0.106, L0.109, L0.112, L0.200" + - " Soft Deleting 10 files: L0.91, L0.94, L0.97, L0.120, L0.122, L0.154, L0.157, L0.160, L0.193, L0.196" - " Creating 1 files" - - "**** Simulation run 55, type=compact(ManySmallFiles). 10 Input Files, 72mb total:" - - "L0 " - - "L0.201[630,986] 1.01us 38mb|-----------------------------------------L0.201-----------------------------------------|" - - "L0.99[630,986] 1.01us 4mb|-----------------------------------------L0.99------------------------------------------|" - - "L0.102[630,950] 1.01us 4mb|------------------------------------L0.102------------------------------------| " - - "L0.107[630,932] 1.02us 4mb|----------------------------------L0.107----------------------------------| " - - "L0.110[630,986] 1.02us 4mb|-----------------------------------------L0.110-----------------------------------------|" - - "L0.113[630,950] 1.02us 4mb|------------------------------------L0.113------------------------------------| " - - "L0.118[630,932] 1.02us 4mb|----------------------------------L0.118----------------------------------| " - - "L0.121[630,986] 1.02us 4mb|-----------------------------------------L0.121-----------------------------------------|" - - "L0.124[630,950] 1.02us 4mb|------------------------------------L0.124------------------------------------| " - - "L0.129[630,932] 1.02us 4mb|----------------------------------L0.129----------------------------------| " - - "**** 1 Output Files (parquet_file_id not yet assigned), 72mb total:" - - "L0, all files 72mb " - - "L0.?[630,986] 1.02us |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 55, type=compact(ManySmallFiles). 10 Input Files, 35mb total:" + - "L0 " + - "L0.125[356,629] 1us 3mb |-----------------------------------------L0.125-----------------------------------------|" + - "L0.62[356,629] 1us 3mb |-----------------------------------------L0.62------------------------------------------|" + - "L0.164[356,629] 1us 4mb |-----------------------------------------L0.164-----------------------------------------|" + - "L0.101[356,629] 1us 5mb |-----------------------------------------L0.101-----------------------------------------|" + - "L0.128[356,629] 1us 3mb |-----------------------------------------L0.128-----------------------------------------|" + - "L0.65[356,629] 1us 3mb |-----------------------------------------L0.65------------------------------------------|" + - "L0.167[356,629] 1.01us 4mb|-----------------------------------------L0.167-----------------------------------------|" + - "L0.103[356,629] 1.01us 5mb|-----------------------------------------L0.103-----------------------------------------|" + - "L0.131[356,629] 1.01us 3mb|-----------------------------------------L0.131-----------------------------------------|" + - "L0.68[356,629] 1.01us 3mb|-----------------------------------------L0.68------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 35mb total:" + - "L0, all files 35mb " + - "L0.?[356,629] 1.01us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 10 files: L0.99, L0.102, L0.107, L0.110, L0.113, L0.118, L0.121, L0.124, L0.129, L0.201" + - " Soft Deleting 10 files: L0.62, L0.65, L0.68, L0.101, L0.103, L0.125, L0.128, L0.131, L0.164, L0.167" - " Creating 1 files" - "**** Simulation run 56, type=compact(ManySmallFiles). 10 Input Files, 37mb total:" - "L0 " - - "L0.114[50,355] 1.02us 5mb |---------------------------------------L0.114----------------------------------------| " - - "L0.116[76,355] 1.02us 3mb |------------------------------------L0.116------------------------------------| " - - "L0.119[42,355] 1.02us 3mb|-----------------------------------------L0.119-----------------------------------------|" - - "L0.122[173,355] 1.02us 2mb |----------------------L0.122----------------------| " - - "L0.125[50,355] 1.02us 5mb |---------------------------------------L0.125----------------------------------------| " - - "L0.127[76,355] 1.02us 3mb |------------------------------------L0.127------------------------------------| " - - "L0.130[42,355] 1.02us 3mb|-----------------------------------------L0.130-----------------------------------------|" - - "L0.133[173,355] 1.03us 2mb |----------------------L0.133----------------------| " - - "L0.136[50,355] 1.03us 5mb |---------------------------------------L0.136----------------------------------------| " - - "L0.138[76,355] 1.03us 3mb |------------------------------------L0.138------------------------------------| " + - "L0.170[356,629] 1.01us 4mb|-----------------------------------------L0.170-----------------------------------------|" + - "L0.105[356,629] 1.01us 5mb|-----------------------------------------L0.105-----------------------------------------|" + - "L0.134[356,629] 1.01us 3mb|-----------------------------------------L0.134-----------------------------------------|" + - "L0.71[356,629] 1.01us 3mb|-----------------------------------------L0.71------------------------------------------|" + - "L0.173[356,629] 1.01us 4mb|-----------------------------------------L0.173-----------------------------------------|" + - "L0.107[356,629] 1.01us 5mb|-----------------------------------------L0.107-----------------------------------------|" + - "L0.137[356,629] 1.02us 3mb|-----------------------------------------L0.137-----------------------------------------|" + - "L0.74[356,629] 1.02us 3mb|-----------------------------------------L0.74------------------------------------------|" + - "L0.176[356,629] 1.02us 4mb|-----------------------------------------L0.176-----------------------------------------|" + - "L0.109[356,629] 1.02us 5mb|-----------------------------------------L0.109-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 37mb total:" - "L0, all files 37mb " - - "L0.?[42,355] 1.03us |------------------------------------------L0.?------------------------------------------|" + - "L0.?[356,629] 1.02us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 10 files: L0.114, L0.116, L0.119, L0.122, L0.125, L0.127, L0.130, L0.133, L0.136, L0.138" + - " Soft Deleting 10 files: L0.71, L0.74, L0.105, L0.107, L0.109, L0.134, L0.137, L0.170, L0.173, L0.176" - " Creating 1 files" - - "**** Simulation run 57, type=compact(ManySmallFiles). 10 Input Files, 37mb total:" - - "L0 " - - "L0.115[356,629] 1.02us 5mb|-----------------------------------------L0.115-----------------------------------------|" - - "L0.117[356,629] 1.02us 3mb|-----------------------------------------L0.117-----------------------------------------|" - - "L0.120[356,629] 1.02us 3mb|-----------------------------------------L0.120-----------------------------------------|" - - "L0.123[356,629] 1.02us 4mb|-----------------------------------------L0.123-----------------------------------------|" - - "L0.126[356,629] 1.02us 5mb|-----------------------------------------L0.126-----------------------------------------|" - - "L0.128[356,629] 1.02us 3mb|-----------------------------------------L0.128-----------------------------------------|" - - "L0.131[356,629] 1.02us 3mb|-----------------------------------------L0.131-----------------------------------------|" - - "L0.134[356,629] 1.03us 4mb|-----------------------------------------L0.134-----------------------------------------|" - - "L0.137[356,629] 1.03us 5mb|-----------------------------------------L0.137-----------------------------------------|" - - "L0.139[356,629] 1.03us 3mb|-----------------------------------------L0.139-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 37mb total:" - - "L0, all files 37mb " + - "**** Simulation run 57, type=compact(ManySmallFiles). 10 Input Files, 35mb total:" + - "L0 " + - "L0.140[356,629] 1.02us 3mb|-----------------------------------------L0.140-----------------------------------------|" + - "L0.77[356,629] 1.02us 3mb|-----------------------------------------L0.77------------------------------------------|" + - "L0.179[356,629] 1.02us 4mb|-----------------------------------------L0.179-----------------------------------------|" + - "L0.111[356,629] 1.02us 5mb|-----------------------------------------L0.111-----------------------------------------|" + - "L0.143[356,629] 1.02us 3mb|-----------------------------------------L0.143-----------------------------------------|" + - "L0.80[356,629] 1.02us 3mb|-----------------------------------------L0.80------------------------------------------|" + - "L0.182[356,629] 1.03us 4mb|-----------------------------------------L0.182-----------------------------------------|" + - "L0.113[356,629] 1.03us 5mb|-----------------------------------------L0.113-----------------------------------------|" + - "L0.146[356,629] 1.03us 3mb|-----------------------------------------L0.146-----------------------------------------|" + - "L0.83[356,629] 1.03us 3mb|-----------------------------------------L0.83------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 35mb total:" + - "L0, all files 35mb " - "L0.?[356,629] 1.03us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 10 files: L0.115, L0.117, L0.120, L0.123, L0.126, L0.128, L0.131, L0.134, L0.137, L0.139" + - " Soft Deleting 10 files: L0.77, L0.80, L0.83, L0.111, L0.113, L0.140, L0.143, L0.146, L0.179, L0.182" - " Creating 1 files" - - "**** Simulation run 58, type=compact(ManySmallFiles). 10 Input Files, 38mb total:" - - "L0 " - - "L0.132[630,986] 1.02us 4mb|-----------------------------------------L0.132-----------------------------------------|" - - "L0.135[630,950] 1.03us 4mb|------------------------------------L0.135------------------------------------| " - - "L0.140[630,932] 1.03us 4mb|----------------------------------L0.140----------------------------------| " - - "L0.143[630,986] 1.03us 4mb|-----------------------------------------L0.143-----------------------------------------|" - - "L0.146[630,950] 1.03us 4mb|------------------------------------L0.146------------------------------------| " - - "L0.151[630,932] 1.03us 4mb|----------------------------------L0.151----------------------------------| " - - "L0.154[630,986] 1.03us 4mb|-----------------------------------------L0.154-----------------------------------------|" - - "L0.157[630,950] 1.03us 4mb|------------------------------------L0.157------------------------------------| " - - "L0.162[630,932] 1.04us 4mb|----------------------------------L0.162----------------------------------| " - - "L0.165[630,986] 1.04us 4mb|-----------------------------------------L0.165-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 38mb total:" - - "L0, all files 38mb " - - "L0.?[630,986] 1.04us |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 58, type=compact(ManySmallFiles). 10 Input Files, 37mb total:" + - "L0 " + - "L0.185[356,629] 1.03us 4mb|-----------------------------------------L0.185-----------------------------------------|" + - "L0.115[356,629] 1.03us 5mb|-----------------------------------------L0.115-----------------------------------------|" + - "L0.149[356,629] 1.03us 3mb|-----------------------------------------L0.149-----------------------------------------|" + - "L0.86[356,629] 1.03us 3mb|-----------------------------------------L0.86------------------------------------------|" + - "L0.188[356,629] 1.03us 4mb|-----------------------------------------L0.188-----------------------------------------|" + - "L0.117[356,629] 1.03us 5mb|-----------------------------------------L0.117-----------------------------------------|" + - "L0.152[356,629] 1.04us 3mb|-----------------------------------------L0.152-----------------------------------------|" + - "L0.89[356,629] 1.04us 3mb|-----------------------------------------L0.89------------------------------------------|" + - "L0.191[356,629] 1.04us 4mb|-----------------------------------------L0.191-----------------------------------------|" + - "L0.119[356,629] 1.04us 5mb|-----------------------------------------L0.119-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 37mb total:" + - "L0, all files 37mb " + - "L0.?[356,629] 1.04us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 10 files: L0.132, L0.135, L0.140, L0.143, L0.146, L0.151, L0.154, L0.157, L0.162, L0.165" + - " Soft Deleting 10 files: L0.86, L0.89, L0.115, L0.117, L0.119, L0.149, L0.152, L0.185, L0.188, L0.191" - " Creating 1 files" - - "**** Simulation run 59, type=compact(ManySmallFiles). 10 Input Files, 69mb total:" - - "L0 " - - "L0.205[42,355] 1.03us 37mb|-----------------------------------------L0.205-----------------------------------------|" - - "L0.141[42,355] 1.03us 3mb|-----------------------------------------L0.141-----------------------------------------|" - - "L0.144[173,355] 1.03us 2mb |----------------------L0.144----------------------| " - - "L0.147[50,355] 1.03us 5mb |---------------------------------------L0.147----------------------------------------| " - - "L0.149[76,355] 1.03us 3mb |------------------------------------L0.149------------------------------------| " - - "L0.152[42,355] 1.03us 3mb|-----------------------------------------L0.152-----------------------------------------|" - - "L0.155[173,355] 1.03us 2mb |----------------------L0.155----------------------| " - - "L0.158[50,355] 1.03us 5mb |---------------------------------------L0.158----------------------------------------| " - - "L0.160[76,355] 1.04us 3mb |------------------------------------L0.160------------------------------------| " - - "L0.163[42,355] 1.04us 3mb|-----------------------------------------L0.163-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 69mb total:" - - "L0, all files 69mb " - - "L0.?[42,355] 1.04us |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 59, type=compact(ManySmallFiles). 10 Input Files, 35mb total:" + - "L0 " + - "L0.155[356,629] 1.04us 3mb|-----------------------------------------L0.155-----------------------------------------|" + - "L0.92[356,629] 1.04us 3mb|-----------------------------------------L0.92------------------------------------------|" + - "L0.194[356,629] 1.04us 4mb|-----------------------------------------L0.194-----------------------------------------|" + - "L0.121[356,629] 1.04us 5mb|-----------------------------------------L0.121-----------------------------------------|" + - "L0.158[356,629] 1.04us 3mb|-----------------------------------------L0.158-----------------------------------------|" + - "L0.95[356,629] 1.05us 3mb|-----------------------------------------L0.95------------------------------------------|" + - "L0.197[356,629] 1.05us 4mb|-----------------------------------------L0.197-----------------------------------------|" + - "L0.123[356,629] 1.05us 5mb|-----------------------------------------L0.123-----------------------------------------|" + - "L0.161[356,629] 1.05us 3mb|-----------------------------------------L0.161-----------------------------------------|" + - "L0.98[356,629] 1.05us 3mb|-----------------------------------------L0.98------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 35mb total:" + - "L0, all files 35mb " + - "L0.?[356,629] 1.05us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 10 files: L0.141, L0.144, L0.147, L0.149, L0.152, L0.155, L0.158, L0.160, L0.163, L0.205" + - " Soft Deleting 10 files: L0.92, L0.95, L0.98, L0.121, L0.123, L0.155, L0.158, L0.161, L0.194, L0.197" - " Creating 1 files" - - "**** Simulation run 60, type=compact(ManySmallFiles). 10 Input Files, 68mb total:" - - "L0 " - - "L0.206[356,629] 1.03us 37mb|-----------------------------------------L0.206-----------------------------------------|" - - "L0.142[356,629] 1.03us 3mb|-----------------------------------------L0.142-----------------------------------------|" - - "L0.145[356,629] 1.03us 4mb|-----------------------------------------L0.145-----------------------------------------|" - - "L0.148[356,629] 1.03us 5mb|-----------------------------------------L0.148-----------------------------------------|" - - "L0.150[356,629] 1.03us 3mb|-----------------------------------------L0.150-----------------------------------------|" - - "L0.153[356,629] 1.03us 3mb|-----------------------------------------L0.153-----------------------------------------|" - - "L0.156[356,629] 1.03us 4mb|-----------------------------------------L0.156-----------------------------------------|" - - "L0.159[356,629] 1.03us 5mb|-----------------------------------------L0.159-----------------------------------------|" - - "L0.161[356,629] 1.04us 3mb|-----------------------------------------L0.161-----------------------------------------|" - - "L0.164[356,629] 1.04us 3mb|-----------------------------------------L0.164-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 68mb total:" - - "L0, all files 68mb " - - "L0.?[356,629] 1.04us |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 60, type=compact(ManySmallFiles). 10 Input Files, 38mb total:" + - "L0 " + - "L0.126[630,932] 1us 4mb |----------------------------------L0.126----------------------------------| " + - "L0.63[630,986] 1us 4mb |-----------------------------------------L0.63------------------------------------------|" + - "L0.165[630,950] 1us 4mb |------------------------------------L0.165------------------------------------| " + - "L0.129[630,932] 1us 4mb |----------------------------------L0.129----------------------------------| " + - "L0.66[630,986] 1us 4mb |-----------------------------------------L0.66------------------------------------------|" + - "L0.168[630,950] 1.01us 4mb|------------------------------------L0.168------------------------------------| " + - "L0.132[630,932] 1.01us 4mb|----------------------------------L0.132----------------------------------| " + - "L0.69[630,986] 1.01us 4mb|-----------------------------------------L0.69------------------------------------------|" + - "L0.171[630,950] 1.01us 4mb|------------------------------------L0.171------------------------------------| " + - "L0.135[630,932] 1.01us 4mb|----------------------------------L0.135----------------------------------| " + - "**** 1 Output Files (parquet_file_id not yet assigned), 38mb total:" + - "L0, all files 38mb " + - "L0.?[630,986] 1.01us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 10 files: L0.142, L0.145, L0.148, L0.150, L0.153, L0.156, L0.159, L0.161, L0.164, L0.206" + - " Soft Deleting 10 files: L0.63, L0.66, L0.69, L0.126, L0.129, L0.132, L0.135, L0.165, L0.168, L0.171" - " Creating 1 files" - - "**** Simulation run 61, type=compact(ManySmallFiles). 10 Input Files, 72mb total:" - - "L0 " - - "L0.207[630,986] 1.04us 38mb|-----------------------------------------L0.207-----------------------------------------|" - - "L0.168[630,950] 1.04us 4mb|------------------------------------L0.168------------------------------------| " - - "L0.173[630,932] 1.04us 4mb|----------------------------------L0.173----------------------------------| " - - "L0.176[630,986] 1.04us 4mb|-----------------------------------------L0.176-----------------------------------------|" - - "L0.179[630,950] 1.04us 4mb|------------------------------------L0.179------------------------------------| " - - "L0.184[630,932] 1.04us 4mb|----------------------------------L0.184----------------------------------| " - - "L0.187[630,986] 1.05us 4mb|-----------------------------------------L0.187-----------------------------------------|" - - "L0.190[630,950] 1.05us 4mb|------------------------------------L0.190------------------------------------| " - - "L0.195[630,932] 1.05us 4mb|----------------------------------L0.195----------------------------------| " - - "L0.198[630,986] 1.05us 4mb|-----------------------------------------L0.198-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 72mb total:" - - "L0, all files 72mb " - - "L0.?[630,986] 1.05us |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 61, type=compact(ManySmallFiles). 10 Input Files, 38mb total:" + - "L0 " + - "L0.72[630,986] 1.01us 4mb|-----------------------------------------L0.72------------------------------------------|" + - "L0.174[630,950] 1.01us 4mb|------------------------------------L0.174------------------------------------| " + - "L0.138[630,932] 1.02us 4mb|----------------------------------L0.138----------------------------------| " + - "L0.75[630,986] 1.02us 4mb|-----------------------------------------L0.75------------------------------------------|" + - "L0.177[630,950] 1.02us 4mb|------------------------------------L0.177------------------------------------| " + - "L0.141[630,932] 1.02us 4mb|----------------------------------L0.141----------------------------------| " + - "L0.78[630,986] 1.02us 4mb|-----------------------------------------L0.78------------------------------------------|" + - "L0.180[630,950] 1.02us 4mb|------------------------------------L0.180------------------------------------| " + - "L0.144[630,932] 1.02us 4mb|----------------------------------L0.144----------------------------------| " + - "L0.81[630,986] 1.02us 4mb|-----------------------------------------L0.81------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 38mb total:" + - "L0, all files 38mb " + - "L0.?[630,986] 1.02us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 10 files: L0.168, L0.173, L0.176, L0.179, L0.184, L0.187, L0.190, L0.195, L0.198, L0.207" + - " Soft Deleting 10 files: L0.72, L0.75, L0.78, L0.81, L0.138, L0.141, L0.144, L0.174, L0.177, L0.180" - " Creating 1 files" - - "**** Simulation run 62, type=compact(ManySmallFiles). 10 Input Files, 36mb total:" - - "L0 " - - "L0.166[173,355] 1.04us 2mb |----------------------L0.166----------------------| " - - "L0.169[50,355] 1.04us 5mb |---------------------------------------L0.169----------------------------------------| " - - "L0.171[76,355] 1.04us 3mb |------------------------------------L0.171------------------------------------| " - - "L0.174[42,355] 1.04us 3mb|-----------------------------------------L0.174-----------------------------------------|" - - "L0.177[173,355] 1.04us 2mb |----------------------L0.177----------------------| " - - "L0.180[50,355] 1.04us 5mb |---------------------------------------L0.180----------------------------------------| " - - "L0.182[76,355] 1.04us 3mb |------------------------------------L0.182------------------------------------| " - - "L0.185[42,355] 1.05us 3mb|-----------------------------------------L0.185-----------------------------------------|" - - "L0.188[173,355] 1.05us 2mb |----------------------L0.188----------------------| " - - "L0.191[50,355] 1.05us 5mb |---------------------------------------L0.191----------------------------------------| " - - "**** 1 Output Files (parquet_file_id not yet assigned), 36mb total:" - - "L0, all files 36mb " - - "L0.?[42,355] 1.05us |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 62, type=compact(ManySmallFiles). 10 Input Files, 38mb total:" + - "L0 " + - "L0.183[630,950] 1.03us 4mb|------------------------------------L0.183------------------------------------| " + - "L0.147[630,932] 1.03us 4mb|----------------------------------L0.147----------------------------------| " + - "L0.84[630,986] 1.03us 4mb|-----------------------------------------L0.84------------------------------------------|" + - "L0.186[630,950] 1.03us 4mb|------------------------------------L0.186------------------------------------| " + - "L0.150[630,932] 1.03us 4mb|----------------------------------L0.150----------------------------------| " + - "L0.87[630,986] 1.03us 4mb|-----------------------------------------L0.87------------------------------------------|" + - "L0.189[630,950] 1.03us 4mb|------------------------------------L0.189------------------------------------| " + - "L0.153[630,932] 1.04us 4mb|----------------------------------L0.153----------------------------------| " + - "L0.90[630,986] 1.04us 4mb|-----------------------------------------L0.90------------------------------------------|" + - "L0.192[630,950] 1.04us 4mb|------------------------------------L0.192------------------------------------| " + - "**** 1 Output Files (parquet_file_id not yet assigned), 38mb total:" + - "L0, all files 38mb " + - "L0.?[630,986] 1.04us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 10 files: L0.166, L0.169, L0.171, L0.174, L0.177, L0.180, L0.182, L0.185, L0.188, L0.191" + - " Soft Deleting 10 files: L0.84, L0.87, L0.90, L0.147, L0.150, L0.153, L0.183, L0.186, L0.189, L0.192" - " Creating 1 files" - - "**** Simulation run 63, type=compact(ManySmallFiles). 10 Input Files, 37mb total:" - - "L0 " - - "L0.167[356,629] 1.04us 4mb|-----------------------------------------L0.167-----------------------------------------|" - - "L0.170[356,629] 1.04us 5mb|-----------------------------------------L0.170-----------------------------------------|" - - "L0.172[356,629] 1.04us 3mb|-----------------------------------------L0.172-----------------------------------------|" - - "L0.175[356,629] 1.04us 3mb|-----------------------------------------L0.175-----------------------------------------|" - - "L0.178[356,629] 1.04us 4mb|-----------------------------------------L0.178-----------------------------------------|" - - "L0.181[356,629] 1.04us 5mb|-----------------------------------------L0.181-----------------------------------------|" - - "L0.183[356,629] 1.04us 3mb|-----------------------------------------L0.183-----------------------------------------|" - - "L0.186[356,629] 1.05us 3mb|-----------------------------------------L0.186-----------------------------------------|" - - "L0.189[356,629] 1.05us 4mb|-----------------------------------------L0.189-----------------------------------------|" - - "L0.192[356,629] 1.05us 5mb|-----------------------------------------L0.192-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 37mb total:" - - "L0, all files 37mb " - - "L0.?[356,629] 1.05us |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 63, type=compact(ManySmallFiles). 8 Input Files, 30mb total:" + - "L0 " + - "L0.156[630,932] 1.04us 4mb|----------------------------------L0.156----------------------------------| " + - "L0.93[630,986] 1.04us 4mb|-----------------------------------------L0.93------------------------------------------|" + - "L0.195[630,950] 1.04us 4mb|------------------------------------L0.195------------------------------------| " + - "L0.159[630,932] 1.04us 4mb|----------------------------------L0.159----------------------------------| " + - "L0.96[630,986] 1.05us 4mb|-----------------------------------------L0.96------------------------------------------|" + - "L0.198[630,950] 1.05us 4mb|------------------------------------L0.198------------------------------------| " + - "L0.162[630,932] 1.05us 4mb|----------------------------------L0.162----------------------------------| " + - "L0.99[630,986] 1.05us 4mb|-----------------------------------------L0.99------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 30mb total:" + - "L0, all files 30mb " + - "L0.?[630,986] 1.05us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 10 files: L0.167, L0.170, L0.172, L0.175, L0.178, L0.181, L0.183, L0.186, L0.189, L0.192" + - " Soft Deleting 8 files: L0.93, L0.96, L0.99, L0.156, L0.159, L0.162, L0.195, L0.198" - " Creating 1 files" - - "**** Simulation run 64, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[877]). 2 Input Files, 145mb total:" - - "L0 " - - "L0.204[630,986] 1.02us 72mb|-----------------------------------------L0.204-----------------------------------------|" - - "L0.210[630,986] 1.05us 72mb|-----------------------------------------L0.210-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 145mb total:" - - "L1 " - - "L1.?[630,877] 1.05us 100mb|----------------------------L1.?----------------------------| " - - "L1.?[878,986] 1.05us 44mb |----------L1.?-----------| " - - "Committing partition 1:" - - " Soft Deleting 2 files: L0.204, L0.210" - - " Creating 2 files" - - "**** Simulation run 65, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[219]). 5 Input Files, 177mb total:" + - "**** Simulation run 64, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[219]). 5 Input Files, 177mb total:" - "L0 " - - "L0.196[42,355] 1.05us 3mb|-----------------------------------------L0.196-----------------------------------------|" - - "L0.193[76,355] 1.05us 3mb |------------------------------------L0.193------------------------------------| " - - "L0.208[42,355] 1.04us 69mb|-----------------------------------------L0.208-----------------------------------------|" - - "L0.202[42,355] 1.02us 66mb|-----------------------------------------L0.202-----------------------------------------|" - - "L0.211[42,355] 1.05us 36mb|-----------------------------------------L0.211-----------------------------------------|" + - "L0.203[42,355] 1.05us 35mb|-----------------------------------------L0.203-----------------------------------------|" + - "L0.202[42,355] 1.04us 36mb|-----------------------------------------L0.202-----------------------------------------|" + - "L0.201[42,355] 1.03us 35mb|-----------------------------------------L0.201-----------------------------------------|" + - "L0.200[42,355] 1.02us 36mb|-----------------------------------------L0.200-----------------------------------------|" + - "L0.199[42,355] 1.01us 35mb|-----------------------------------------L0.199-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 177mb total:" - "L1 " - "L1.?[42,219] 1.05us 100mb|----------------------L1.?----------------------| " - "L1.?[220,355] 1.05us 77mb |----------------L1.?----------------| " - "Committing partition 1:" - - " Soft Deleting 5 files: L0.193, L0.196, L0.202, L0.208, L0.211" + - " Soft Deleting 5 files: L0.199, L0.200, L0.201, L0.202, L0.203" - " Creating 2 files" - - "**** Simulation run 66, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[510]). 5 Input Files, 178mb total:" + - "**** Simulation run 65, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[510]). 5 Input Files, 178mb total:" - "L0 " - - "L0.197[356,629] 1.05us 3mb|-----------------------------------------L0.197-----------------------------------------|" - - "L0.194[356,629] 1.05us 3mb|-----------------------------------------L0.194-----------------------------------------|" - - "L0.209[356,629] 1.04us 68mb|-----------------------------------------L0.209-----------------------------------------|" - - "L0.203[356,629] 1.02us 67mb|-----------------------------------------L0.203-----------------------------------------|" - - "L0.212[356,629] 1.05us 37mb|-----------------------------------------L0.212-----------------------------------------|" + - "L0.208[356,629] 1.05us 35mb|-----------------------------------------L0.208-----------------------------------------|" + - "L0.207[356,629] 1.04us 37mb|-----------------------------------------L0.207-----------------------------------------|" + - "L0.206[356,629] 1.03us 35mb|-----------------------------------------L0.206-----------------------------------------|" + - "L0.205[356,629] 1.02us 37mb|-----------------------------------------L0.205-----------------------------------------|" + - "L0.204[356,629] 1.01us 35mb|-----------------------------------------L0.204-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 178mb total:" - "L1 " - "L1.?[356,510] 1.05us 101mb|----------------------L1.?----------------------| " - "L1.?[511,629] 1.05us 77mb |----------------L1.?----------------| " - "Committing partition 1:" - - " Soft Deleting 5 files: L0.194, L0.197, L0.203, L0.209, L0.212" + - " Soft Deleting 5 files: L0.204, L0.205, L0.206, L0.207, L0.208" + - " Creating 2 files" + - "**** Simulation run 66, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[877]). 4 Input Files, 145mb total:" + - "L0 " + - "L0.212[630,986] 1.05us 30mb|-----------------------------------------L0.212-----------------------------------------|" + - "L0.211[630,986] 1.04us 38mb|-----------------------------------------L0.211-----------------------------------------|" + - "L0.210[630,986] 1.02us 38mb|-----------------------------------------L0.210-----------------------------------------|" + - "L0.209[630,986] 1.01us 38mb|-----------------------------------------L0.209-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 145mb total:" + - "L1 " + - "L1.?[630,877] 1.05us 100mb|----------------------------L1.?----------------------------| " + - "L1.?[878,986] 1.05us 44mb |----------L1.?-----------| " + - "Committing partition 1:" + - " Soft Deleting 4 files: L0.209, L0.210, L0.211, L0.212" - " Creating 2 files" - - "**** Simulation run 67, type=split(ReduceOverlap)(split_times=[599]). 1 Input Files, 77mb total:" + - "**** Simulation run 67, type=split(ReduceOverlap)(split_times=[899]). 1 Input Files, 44mb total:" + - "L1, all files 44mb " + - "L1.218[878,986] 1.05us |-----------------------------------------L1.218-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 44mb total:" + - "L1 " + - "L1.?[878,899] 1.05us 9mb |-----L1.?------| " + - "L1.?[900,986] 1.05us 35mb |--------------------------------L1.?---------------------------------| " + - "**** Simulation run 68, type=split(ReduceOverlap)(split_times=[699, 799]). 1 Input Files, 100mb total:" + - "L1, all files 100mb " + - "L1.217[630,877] 1.05us |-----------------------------------------L1.217-----------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:" + - "L1 " + - "L1.?[630,699] 1.05us 28mb|---------L1.?----------| " + - "L1.?[700,799] 1.05us 41mb |---------------L1.?---------------| " + - "L1.?[800,877] 1.05us 32mb |-----------L1.?-----------| " + - "**** Simulation run 69, type=split(ReduceOverlap)(split_times=[599]). 1 Input Files, 77mb total:" - "L1, all files 77mb " - - "L1.218[511,629] 1.05us |-----------------------------------------L1.218-----------------------------------------|" + - "L1.216[511,629] 1.05us |-----------------------------------------L1.216-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 77mb total:" - "L1 " - "L1.?[511,599] 1.05us 58mb|------------------------------L1.?-------------------------------| " - "L1.?[600,629] 1.05us 20mb |--------L1.?--------| " - - "**** Simulation run 68, type=split(ReduceOverlap)(split_times=[399, 499]). 1 Input Files, 101mb total:" + - "**** Simulation run 70, type=split(ReduceOverlap)(split_times=[399, 499]). 1 Input Files, 101mb total:" - "L1, all files 101mb " - - "L1.217[356,510] 1.05us |-----------------------------------------L1.217-----------------------------------------|" + - "L1.215[356,510] 1.05us |-----------------------------------------L1.215-----------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 101mb total:" - "L1 " - "L1.?[356,399] 1.05us 29mb|---------L1.?----------| " - "L1.?[400,499] 1.05us 65mb |-------------------------L1.?--------------------------| " - "L1.?[500,510] 1.05us 7mb |L1.?|" - - "**** Simulation run 69, type=split(ReduceOverlap)(split_times=[299]). 1 Input Files, 77mb total:" + - "**** Simulation run 71, type=split(ReduceOverlap)(split_times=[299]). 1 Input Files, 77mb total:" - "L1, all files 77mb " - - "L1.216[220,355] 1.05us |-----------------------------------------L1.216-----------------------------------------|" + - "L1.214[220,355] 1.05us |-----------------------------------------L1.214-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 77mb total:" - "L1 " - "L1.?[220,299] 1.05us 45mb|-----------------------L1.?-----------------------| " - "L1.?[300,355] 1.05us 32mb |---------------L1.?---------------| " - - "**** Simulation run 70, type=split(ReduceOverlap)(split_times=[99, 199]). 1 Input Files, 100mb total:" + - "**** Simulation run 72, type=split(ReduceOverlap)(split_times=[99, 199]). 1 Input Files, 100mb total:" - "L1, all files 100mb " - - "L1.215[42,219] 1.05us |-----------------------------------------L1.215-----------------------------------------|" + - "L1.213[42,219] 1.05us |-----------------------------------------L1.213-----------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:" - "L1 " - "L1.?[42,99] 1.05us 33mb |-----------L1.?-----------| " - "L1.?[100,199] 1.05us 56mb |----------------------L1.?----------------------| " - "L1.?[200,219] 1.05us 11mb |-L1.?--| " - - "**** Simulation run 71, type=split(ReduceOverlap)(split_times=[899]). 1 Input Files, 44mb total:" - - "L1, all files 44mb " - - "L1.214[878,986] 1.05us |-----------------------------------------L1.214-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 44mb total:" - - "L1 " - - "L1.?[878,899] 1.05us 9mb |-----L1.?------| " - - "L1.?[900,986] 1.05us 35mb |--------------------------------L1.?---------------------------------| " - - "**** Simulation run 72, type=split(ReduceOverlap)(split_times=[699, 799]). 1 Input Files, 100mb total:" - - "L1, all files 100mb " - - "L1.213[630,877] 1.05us |-----------------------------------------L1.213-----------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:" - - "L1 " - - "L1.?[630,699] 1.05us 28mb|---------L1.?----------| " - - "L1.?[700,799] 1.05us 41mb |---------------L1.?---------------| " - - "L1.?[800,877] 1.05us 32mb |-----------L1.?-----------| " - "Committing partition 1:" - " Soft Deleting 6 files: L1.213, L1.214, L1.215, L1.216, L1.217, L1.218" - " Creating 15 files" - "**** Simulation run 73, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[69, 138]). 4 Input Files, 289mb total:" - "L1 " - - "L1.226[42,99] 1.05us 33mb |--------L1.226---------| " - - "L1.227[100,199] 1.05us 56mb |------------------L1.227------------------| " + - "L1.231[42,99] 1.05us 33mb |--------L1.231---------| " + - "L1.232[100,199] 1.05us 56mb |------------------L1.232------------------| " - "L2 " - "L2.1[0,99] 99ns 100mb |-------------------L2.1-------------------| " - "L2.2[100,199] 199ns 100mb |-------------------L2.2-------------------| " @@ -1656,12 +1705,12 @@ async fn random_backfill_over_l2s() { - "L2.?[70,138] 1.05us 100mb |------------L2.?------------| " - "L2.?[139,199] 1.05us 88mb |----------L2.?-----------| " - "Committing partition 1:" - - " Soft Deleting 4 files: L2.1, L2.2, L1.226, L1.227" + - " Soft Deleting 4 files: L2.1, L2.2, L1.231, L1.232" - " Creating 3 files" - "**** Simulation run 74, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[264]). 3 Input Files, 156mb total:" - "L1 " - - "L1.228[200,219] 1.05us 11mb|----L1.228-----| " - - "L1.224[220,299] 1.05us 45mb |-------------------------------L1.224--------------------------------| " + - "L1.233[200,219] 1.05us 11mb|----L1.233-----| " + - "L1.229[220,299] 1.05us 45mb |-------------------------------L1.229--------------------------------| " - "L2 " - "L2.3[200,299] 299ns 100mb|-----------------------------------------L2.3------------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 156mb total:" @@ -1669,12 +1718,12 @@ async fn random_backfill_over_l2s() { - "L2.?[200,264] 1.05us 102mb|--------------------------L2.?--------------------------| " - "L2.?[265,299] 1.05us 55mb |------------L2.?------------| " - "Committing partition 1:" - - " Soft Deleting 3 files: L2.3, L1.224, L1.228" + - " Soft Deleting 3 files: L2.3, L1.229, L1.233" - " Creating 2 files" - "**** Simulation run 75, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[362]). 3 Input Files, 160mb total:" - "L1 " - - "L1.225[300,355] 1.05us 32mb|--------------------L1.225---------------------| " - - "L1.221[356,399] 1.05us 29mb |---------------L1.221----------------| " + - "L1.230[300,355] 1.05us 32mb|--------------------L1.230---------------------| " + - "L1.226[356,399] 1.05us 29mb |---------------L1.226----------------| " - "L2 " - "L2.4[300,399] 399ns 100mb|-----------------------------------------L2.4------------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 160mb total:" @@ -1682,11 +1731,11 @@ async fn random_backfill_over_l2s() { - "L2.?[300,362] 1.05us 101mb|-------------------------L2.?-------------------------| " - "L2.?[363,399] 1.05us 59mb |-------------L2.?-------------| " - "Committing partition 1:" - - " Soft Deleting 3 files: L2.4, L1.221, L1.225" + - " Soft Deleting 3 files: L2.4, L1.226, L1.230" - " Creating 2 files" - "**** Simulation run 76, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[460]). 2 Input Files, 165mb total:" - "L1 " - - "L1.222[400,499] 1.05us 65mb|----------------------------------------L1.222-----------------------------------------| " + - "L1.227[400,499] 1.05us 65mb|----------------------------------------L1.227-----------------------------------------| " - "L2 " - "L2.5[400,499] 499ns 100mb|-----------------------------------------L2.5------------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 165mb total:" @@ -1694,12 +1743,12 @@ async fn random_backfill_over_l2s() { - "L2.?[400,460] 1.05us 101mb|------------------------L2.?------------------------| " - "L2.?[461,499] 1.05us 64mb |--------------L2.?--------------| " - "Committing partition 1:" - - " Soft Deleting 2 files: L2.5, L1.222" + - " Soft Deleting 2 files: L2.5, L1.227" - " Creating 2 files" - "**** Simulation run 77, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[560]). 3 Input Files, 165mb total:" - "L1 " - - "L1.223[500,510] 1.05us 7mb|L1.223-| " - - "L1.219[511,599] 1.05us 58mb |------------------------------------L1.219------------------------------------|" + - "L1.228[500,510] 1.05us 7mb|L1.228-| " + - "L1.224[511,599] 1.05us 58mb |------------------------------------L1.224------------------------------------|" - "L2 " - "L2.6[500,599] 599ns 100mb|-----------------------------------------L2.6------------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 165mb total:" @@ -1707,13 +1756,13 @@ async fn random_backfill_over_l2s() { - "L2.?[500,560] 1.05us 101mb|------------------------L2.?------------------------| " - "L2.?[561,599] 1.05us 64mb |--------------L2.?--------------| " - "Committing partition 1:" - - " Soft Deleting 3 files: L2.6, L1.219, L1.223" + - " Soft Deleting 3 files: L2.6, L1.224, L1.228" - " Creating 2 files" - "**** Simulation run 78, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[670, 740]). 5 Input Files, 288mb total:" - "L1 " - - "L1.220[600,629] 1.05us 20mb|--L1.220---| " - - "L1.231[630,699] 1.05us 28mb |-----------L1.231------------| " - - "L1.232[700,799] 1.05us 41mb |------------------L1.232------------------| " + - "L1.225[600,629] 1.05us 20mb|--L1.225---| " + - "L1.221[630,699] 1.05us 28mb |-----------L1.221------------| " + - "L1.222[700,799] 1.05us 41mb |------------------L1.222------------------| " - "L2 " - "L2.7[600,699] 699ns 100mb|-------------------L2.7-------------------| " - "L2.8[700,799] 799ns 100mb |-------------------L2.8-------------------| " @@ -1723,13 +1772,13 @@ async fn random_backfill_over_l2s() { - "L2.?[671,740] 1.05us 101mb |------------L2.?-------------| " - "L2.?[741,799] 1.05us 85mb |----------L2.?----------| " - "Committing partition 1:" - - " Soft Deleting 5 files: L2.7, L2.8, L1.220, L1.231, L1.232" + - " Soft Deleting 5 files: L2.7, L2.8, L1.221, L1.222, L1.225" - " Creating 3 files" - - "**** Final Output Files (3.35gb written)" + - "**** Final Output Files (3.15gb written)" - "L1 " - - "L1.229[878,899] 1.05us 9mb |L1.229| " - - "L1.230[900,986] 1.05us 35mb |L1.230| " - - "L1.233[800,877] 1.05us 32mb |L1.233| " + - "L1.219[878,899] 1.05us 9mb |L1.219| " + - "L1.220[900,986] 1.05us 35mb |L1.220| " + - "L1.223[800,877] 1.05us 32mb |L1.223| " - "L2 " - "L2.9[800,899] 899ns 100mb |-L2.9-| " - "L2.10[900,999] 999ns 100mb |L2.10-| " @@ -1749,10 +1798,10 @@ async fn random_backfill_over_l2s() { - "L2.247[741,799] 1.05us 85mb |L2.247| " - "**** Breakdown of where bytes were written" - 1.2gb written by split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize)) + - 500mb written by compact(ManySmallFiles) - 500mb written by split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize)) - 500mb written by split(ReduceOverlap) - 500mb written by split(VerticalSplit) - - 707mb written by compact(ManySmallFiles) "### ); } @@ -3770,19 +3819,21 @@ async fn actual_case_from_catalog_1() { - "WARNING: file L0.161[3270000,3330000] 3.36ms 183mb exceeds soft limit 100mb by more than 50%" - "WARNING: file L0.162[3300000,3380000] 3.4ms 231mb exceeds soft limit 100mb by more than 50%" - "WARNING: file L0.163[3310000,3380000] 3.41ms 232mb exceeds soft limit 100mb by more than 50%" - - "**** Final Output Files (15.63gb written)" + - "**** Final Output Files (13.84gb written)" - "L2 " - - "L2.374[997570,1082662] 3.42ms 100mb |L2.374| " - - "L2.379[1260887,1305975] 3.42ms 78mb |L2.379| " - - "L2.380[1305976,1374631] 3.42ms 100mb |L2.380| " - - "L2.385[1755317,1807829] 3.42ms 100mb |L2.385| " - - "L2.390[1980000,2037321] 3.42ms 100mb |L2.390| " - - "L2.391[2037322,2070000] 3.42ms 57mb |L2.391| " - - "L2.392[2080000,2156222] 3.42ms 100mb |L2.392| " - - "L2.393[2156223,2229999] 3.42ms 97mb |L2.393| " - - "L2.394[2230000,2264878] 3.42ms 100mb |L2.394| " - - "L2.395[2264879,2299756] 3.42ms 100mb |L2.395| " - - "L2.402[2505050,2550000] 3.42ms 82mb |L2.402| " + - "L2.244[1980000,2037321] 3.42ms 100mb |L2.244| " + - "L2.245[2037322,2070000] 3.42ms 57mb |L2.245| " + - "L2.377[894767,968639] 3.42ms 100mb |L2.377| " + - "L2.382[1153380,1203173] 3.42ms 94mb |L2.382| " + - "L2.383[1203174,1260886] 3.42ms 100mb |L2.383| " + - "L2.384[1260887,1305975] 3.42ms 78mb |L2.384| " + - "L2.385[1305976,1374631] 3.42ms 100mb |L2.385| " + - "L2.394[1755317,1807829] 3.42ms 100mb |L2.394| " + - "L2.396[2080000,2156222] 3.42ms 100mb |L2.396| " + - "L2.397[2156223,2229999] 3.42ms 97mb |L2.397| " + - "L2.398[2230000,2264878] 3.42ms 100mb |L2.398| " + - "L2.399[2264879,2299756] 3.42ms 100mb |L2.399| " + - "L2.406[2505050,2550000] 3.42ms 82mb |L2.406| " - "L2.407[2550001,2616666] 3.42ms 93mb |L2.407| " - "L2.413[2760000,2799998] 3.42ms 87mb |L2.413| " - "L2.419[2931176,2964443] 3.42ms 100mb |L2.419| " @@ -3799,56 +3850,53 @@ async fn actual_case_from_catalog_1() { - "L2.437[3222149,3234702] 3.42ms 94mb |L2.437|" - "L2.438[3234703,3249276] 3.42ms 100mb |L2.438|" - "L2.439[3249277,3260584] 3.42ms 78mb |L2.439|" - - "L2.441[3323922,3335493] 3.42ms 100mb |L2.441|" - - "L2.445[3364113,3384822] 3.42ms 88mb |L2.445|" - - "L2.447[3260585,3278130] 3.42ms 100mb |L2.447|" - - "L2.506[729249,871903] 3.42ms 100mb |L2.506| " - - "L2.507[871904,997569] 3.42ms 88mb |L2.507| " - - "L2.508[10000,272071] 3.42ms 100mb|L2.508| " - - "L2.509[272072,534142] 3.42ms 100mb |L2.509| " - - "L2.510[534143,729248] 3.42ms 74mb |L2.510| " - - "L2.511[1082663,1143912] 3.42ms 100mb |L2.511| " - - "L2.512[1143913,1205161] 3.42ms 100mb |L2.512| " - - "L2.513[1205162,1260886] 3.42ms 91mb |L2.513| " - - "L2.514[1374632,1446111] 3.42ms 100mb |L2.514| " - - "L2.515[1446112,1517590] 3.42ms 100mb |L2.515| " - - "L2.516[1517591,1572539] 3.42ms 77mb |L2.516| " - - "L2.517[1572540,1635002] 3.42ms 100mb |L2.517| " - - "L2.518[1635003,1697464] 3.42ms 100mb |L2.518| " - - "L2.519[1697465,1755316] 3.42ms 93mb |L2.519| " - - "L2.520[1807830,1863768] 3.42ms 100mb |L2.520| " - - "L2.521[1863769,1919706] 3.42ms 100mb |L2.521| " - - "L2.522[1919707,1970000] 3.42ms 90mb |L2.522| " - - "L2.523[2299757,2376597] 3.42ms 100mb |L2.523| " - - "L2.524[2376598,2453437] 3.42ms 100mb |L2.524| " - - "L2.525[2453438,2505049] 3.42ms 67mb |L2.525| " - - "L2.526[2616667,2669559] 3.42ms 100mb |L2.526| " - - "L2.527[2669560,2722451] 3.42ms 100mb |L2.527| " - - "L2.528[2722452,2759999] 3.42ms 71mb |L2.528| " - - "L2.529[2799999,2857709] 3.42ms 100mb |L2.529| " - - "L2.530[2857710,2915419] 3.42ms 100mb |L2.530| " - - "L2.531[2915420,2931175] 3.42ms 27mb |L2.531| " - - "L2.532[3071616,3091082] 3.42ms 100mb |L2.532| " - - "L2.533[3091083,3110548] 3.42ms 100mb |L2.533|" - - "L2.534[3110549,3121807] 3.42ms 58mb |L2.534|" - - "L2.535[3149991,3166126] 3.42ms 100mb |L2.535|" - - "L2.536[3166127,3182261] 3.42ms 100mb |L2.536|" - - "L2.537[3182262,3182938] 3.42ms 4mb |L2.537|" - - "L2.538[3278131,3293432] 3.42ms 100mb |L2.538|" - - "L2.539[3293433,3308733] 3.42ms 100mb |L2.539|" - - "L2.540[3308734,3323921] 3.42ms 99mb |L2.540|" - - "L2.541[3335494,3348934] 3.42ms 100mb |L2.541|" - - "L2.542[3348935,3362374] 3.42ms 100mb |L2.542|" - - "L2.543[3362375,3364112] 3.42ms 13mb |L2.543|" - - "L2.544[3384823,3388964] 3.42ms 18mb |L2.544|" - - "L2.545[3388965,3390000] 3.42ms 4mb |L2.545|" + - "L2.440[3260585,3278130] 3.42ms 100mb |L2.440|" + - "L2.445[3323922,3335493] 3.42ms 100mb |L2.445|" + - "L2.449[3364113,3384822] 3.42ms 88mb |L2.449|" + - "L2.455[10000,344482] 3.42ms 100mb|L2.455| " + - "L2.456[344483,678964] 3.42ms 100mb |L2.456| " + - "L2.457[678965,894766] 3.42ms 65mb |L2.457| " + - "L2.458[968640,1031250] 3.42ms 100mb |L2.458| " + - "L2.459[1031251,1093860] 3.42ms 100mb |L2.459| " + - "L2.460[1093861,1153379] 3.42ms 95mb |L2.460| " + - "L2.461[1374632,1446111] 3.42ms 100mb |L2.461| " + - "L2.462[1446112,1517590] 3.42ms 100mb |L2.462| " + - "L2.463[1517591,1572539] 3.42ms 77mb |L2.463| " + - "L2.464[1572540,1635002] 3.42ms 100mb |L2.464| " + - "L2.465[1635003,1697464] 3.42ms 100mb |L2.465| " + - "L2.466[1697465,1755316] 3.42ms 93mb |L2.466| " + - "L2.467[1807830,1863768] 3.42ms 100mb |L2.467| " + - "L2.468[1863769,1919706] 3.42ms 100mb |L2.468| " + - "L2.469[1919707,1970000] 3.42ms 90mb |L2.469| " + - "L2.470[2299757,2376597] 3.42ms 100mb |L2.470| " + - "L2.471[2376598,2453437] 3.42ms 100mb |L2.471| " + - "L2.472[2453438,2505049] 3.42ms 67mb |L2.472| " + - "L2.473[2616667,2669559] 3.42ms 100mb |L2.473| " + - "L2.474[2669560,2722451] 3.42ms 100mb |L2.474| " + - "L2.475[2722452,2759999] 3.42ms 71mb |L2.475| " + - "L2.476[2799999,2857709] 3.42ms 100mb |L2.476| " + - "L2.477[2857710,2915419] 3.42ms 100mb |L2.477| " + - "L2.478[2915420,2931175] 3.42ms 27mb |L2.478| " + - "L2.479[3071616,3091082] 3.42ms 100mb |L2.479| " + - "L2.480[3091083,3110548] 3.42ms 100mb |L2.480|" + - "L2.481[3110549,3121807] 3.42ms 58mb |L2.481|" + - "L2.482[3149991,3166126] 3.42ms 100mb |L2.482|" + - "L2.483[3166127,3182261] 3.42ms 100mb |L2.483|" + - "L2.484[3182262,3182938] 3.42ms 4mb |L2.484|" + - "L2.485[3278131,3293432] 3.42ms 100mb |L2.485|" + - "L2.486[3293433,3308733] 3.42ms 100mb |L2.486|" + - "L2.487[3308734,3323921] 3.42ms 99mb |L2.487|" + - "L2.488[3335494,3348934] 3.42ms 100mb |L2.488|" + - "L2.489[3348935,3362374] 3.42ms 100mb |L2.489|" + - "L2.490[3362375,3364112] 3.42ms 13mb |L2.490|" + - "L2.491[3384823,3388964] 3.42ms 18mb |L2.491|" + - "L2.492[3388965,3390000] 3.42ms 4mb |L2.492|" - "**** Breakdown of where bytes were written" - - 38mb written by compact(ManySmallFiles) - - 4.66gb written by split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize)) - - 4.8gb written by split(VerticalSplit) - - 406mb written by split(ReduceOverlap) - - 5.73gb written by split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize)) - - 5mb written by compact(TotalSizeLessThanMaxCompactSize) + - 3.34gb written by split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize)) + - 4.75gb written by split(VerticalSplit) + - 5.74gb written by split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize)) + - 6mb written by compact(ManySmallFiles) + - 931kb written by compact(TotalSizeLessThanMaxCompactSize) "### ); } diff --git a/compactor/tests/layouts/common_use_cases.rs b/compactor/tests/layouts/common_use_cases.rs index 4e1334a010..7be19a8003 100644 --- a/compactor/tests/layouts/common_use_cases.rs +++ b/compactor/tests/layouts/common_use_cases.rs @@ -441,11 +441,11 @@ async fn test_keep_ingesting_l0_files_40_percent_overlap_l1_left() { - " Creating 2 files" - "**** Simulation run 84, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[3362]). 6 Input Files, 122mb total:" - "L1 " - - "L1.508[3392,3404] 339ns 6mb |L1.508|" - - "L1.507[3342,3391] 339ns 25mb |-----L1.507-----| " - - "L1.500[3293,3341] 334ns 24mb |----L1.500-----| " - "L1.493[3244,3292] 329ns 24mb |----L1.493-----| " - "L1.486[3200,3243] 324ns 20mb |---L1.486----| " + - "L1.500[3293,3341] 334ns 24mb |----L1.500-----| " + - "L1.508[3392,3404] 339ns 6mb |L1.508|" + - "L1.507[3342,3391] 339ns 25mb |-----L1.507-----| " - "L2 " - "L2.480[3162,3204] 319ns 22mb|---L2.480----| " - "**** 2 Output Files (parquet_file_id not yet assigned), 122mb total:" @@ -624,11 +624,11 @@ async fn test_keep_ingesting_l0_files_40_percent_overlap_l1_left() { - " Creating 2 files" - "**** Simulation run 124, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[4961]). 6 Input Files, 122mb total:" - "L1 " - - "L1.748[4992,5004] 499ns 6mb |L1.748|" - - "L1.747[4942,4991] 499ns 25mb |-----L1.747-----| " - - "L1.740[4893,4941] 494ns 24mb |----L1.740-----| " - "L1.733[4844,4892] 489ns 24mb |----L1.733-----| " - "L1.726[4800,4843] 484ns 20mb |---L1.726----| " + - "L1.740[4893,4941] 494ns 24mb |----L1.740-----| " + - "L1.748[4992,5004] 499ns 6mb |L1.748|" + - "L1.747[4942,4991] 499ns 25mb |-----L1.747-----| " - "L2 " - "L2.720[4761,4804] 479ns 22mb|---L2.720----| " - "**** 2 Output Files (parquet_file_id not yet assigned), 122mb total:" diff --git a/compactor/tests/layouts/core.rs b/compactor/tests/layouts/core.rs index 16a828ae7e..4517fbdc08 100644 --- a/compactor/tests/layouts/core.rs +++ b/compactor/tests/layouts/core.rs @@ -911,27 +911,27 @@ async fn overlapping_out_of_order_l0() { - "L2.267[100,105] 111ns 114mb |L2.267| " - "L2.273[119,123] 126ns 97mb |L2.273| " - "L2.279[137,141] 146ns 97mb |L2.279| " - - "L2.281[200,205] 211ns 114mb |L2.281| " - - "L2.287[219,223] 226ns 97mb |L2.287| " - - "L2.293[237,241] 246ns 97mb |L2.293| " + - "L2.283[200,205] 211ns 114mb |L2.283| " + - "L2.289[219,223] 226ns 97mb |L2.289| " + - "L2.295[237,241] 246ns 97mb |L2.295| " - "L2.299[300,305] 311ns 114mb |L2.299| " - "L2.305[319,323] 326ns 97mb |L2.305| " - "L2.311[337,341] 346ns 97mb |L2.311| " - "L2.315[400,405] 411ns 114mb |L2.315| " - "L2.321[419,423] 426ns 97mb |L2.321| " - "L2.327[437,441] 446ns 97mb |L2.327| " - - "L2.329[500,505] 511ns 114mb |L2.329| " - - "L2.335[519,523] 526ns 97mb |L2.335| " - - "L2.341[537,541] 546ns 97mb |L2.341| " + - "L2.331[500,505] 511ns 114mb |L2.331| " + - "L2.337[519,523] 526ns 97mb |L2.337| " + - "L2.343[537,541] 546ns 97mb |L2.343| " - "L2.347[600,605] 611ns 114mb |L2.347| " - "L2.353[619,623] 626ns 97mb |L2.353| " - "L2.359[637,641] 646ns 97mb |L2.359| " - "L2.363[700,705] 711ns 114mb |L2.363| " - "L2.369[719,723] 726ns 97mb |L2.369| " - - "L2.375[800,805] 811ns 114mb |L2.375| " - - "L2.381[819,823] 826ns 97mb |L2.381| " - - "L2.387[837,841] 846ns 97mb |L2.387| " - - "L2.389[737,741] 746ns 97mb |L2.389| " + - "L2.375[737,741] 746ns 97mb |L2.375| " + - "L2.379[800,805] 811ns 114mb |L2.379| " + - "L2.385[819,823] 826ns 97mb |L2.385| " + - "L2.391[837,841] 846ns 97mb |L2.391| " - "L2.395[900,905] 911ns 114mb |L2.395|" - "L2.401[919,923] 926ns 97mb |L2.401|" - "L2.407[937,941] 946ns 97mb |L2.407|" @@ -1196,8 +1196,8 @@ async fn overlapping_out_of_order_l0_small() { - "**** Final Output Files (1.83gb written)" - "L2 " - "L2.111[0,40] 51ns 80mb |L2.111| " - - "L2.126[600,640] 651ns 80mb |L2.126| " - - "L2.132[300,340] 351ns 80mb |L2.132| " + - "L2.120[300,340] 351ns 80mb |L2.120| " + - "L2.129[600,640] 651ns 80mb |L2.129| " - "L2.138[900,940] 951ns 80mb |L2.138|" - "L2.141[41,135] 1.3us 101mb |L2.141| " - "L2.142[136,229] 1.3us 100mb |L2.142| " diff --git a/compactor/tests/layouts/large_files.rs b/compactor/tests/layouts/large_files.rs index d0e8911363..19ca09262b 100644 --- a/compactor/tests/layouts/large_files.rs +++ b/compactor/tests/layouts/large_files.rs @@ -646,7 +646,7 @@ async fn two_large_files_total_over_max_compact_size_start_l0() { - "Committing partition 1:" - " Soft Deleting 2 files: L0.1, L1.2" - " Creating 4 files" - - "**** Simulation run 2, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[334]). 2 Input Files, 200mb total:" + - "**** Simulation run 2, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[334]). 2 Input Files, 200mb total:" - "L0 " - "L0.3[0,667] 10ns 100mb |------------------------------------------L0.3------------------------------------------|" - "L1 " @@ -1577,22 +1577,7 @@ async fn pre_split_large_l0() { - "L0.76[7421,8704] 10.01us 14mb |-L0.76-| " - "L0.77[8705,10000] 10.01us 14mb |-L0.77-| " - "L0.78[3000,13000] 11us 100mb |----------------------------------L0.78----------------------------------| " - - "**** Simulation run 0, type=split(VerticalSplit)(split_times=[3568, 4852, 6136, 7420, 8704, 10312]). 1 Input Files, 100mb total:" - - "L0, all files 100mb " - - "L0.78[3000,13000] 11us |-----------------------------------------L0.78------------------------------------------|" - - "**** 7 Output Files (parquet_file_id not yet assigned), 100mb total:" - - "L0 " - - "L0.?[3000,3568] 11us 6mb |L0.?| " - - "L0.?[3569,4852] 11us 13mb |--L0.?---| " - - "L0.?[4853,6136] 11us 13mb |--L0.?---| " - - "L0.?[6137,7420] 11us 13mb |--L0.?---| " - - "L0.?[7421,8704] 11us 13mb |--L0.?---| " - - "L0.?[8705,10312] 11us 16mb |----L0.?----| " - - "L0.?[10313,13000] 11us 27mb |---------L0.?---------| " - - "Committing partition 1:" - - " Soft Deleting 1 files: L0.78" - - " Creating 7 files" - - "**** Simulation run 1, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1818]). 11 Input Files, 157mb total:" + - "**** Simulation run 0, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1818]). 11 Input Files, 157mb total:" - "L0, all files 14mb " - "L0.71[1000,2284] 10.01us |-----------------------------------------L0.71------------------------------------------|" - "L0.64[1000,2284] 10.01us |-----------------------------------------L0.64------------------------------------------|" @@ -1612,9 +1597,24 @@ async fn pre_split_large_l0() { - "Committing partition 1:" - " Soft Deleting 11 files: L0.1, L0.8, L0.15, L0.22, L0.29, L0.36, L0.43, L0.50, L0.57, L0.64, L0.71" - " Creating 2 files" + - "**** Simulation run 1, type=split(VerticalSplit)(split_times=[3568, 4852, 6136, 7420, 8704, 10312]). 1 Input Files, 100mb total:" + - "L0, all files 100mb " + - "L0.78[3000,13000] 11us |-----------------------------------------L0.78------------------------------------------|" + - "**** 7 Output Files (parquet_file_id not yet assigned), 100mb total:" + - "L0 " + - "L0.?[3000,3568] 11us 6mb |L0.?| " + - "L0.?[3569,4852] 11us 13mb |--L0.?---| " + - "L0.?[4853,6136] 11us 13mb |--L0.?---| " + - "L0.?[6137,7420] 11us 13mb |--L0.?---| " + - "L0.?[7421,8704] 11us 13mb |--L0.?---| " + - "L0.?[8705,10312] 11us 16mb |----L0.?----| " + - "L0.?[10313,13000] 11us 27mb |---------L0.?---------| " + - "Committing partition 1:" + - " Soft Deleting 1 files: L0.78" + - " Creating 7 files" - "**** Simulation run 2, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[3073]). 12 Input Files, 163mb total:" - "L0 " - - "L0.79[3000,3568] 11us 6mb |----------------L0.79----------------| " + - "L0.81[3000,3568] 11us 6mb |----------------L0.81----------------| " - "L0.72[2285,3568] 10.01us 14mb|-----------------------------------------L0.72------------------------------------------|" - "L0.65[2285,3568] 10.01us 14mb|-----------------------------------------L0.65------------------------------------------|" - "L0.58[2285,3568] 10.01us 14mb|-----------------------------------------L0.58------------------------------------------|" @@ -1631,11 +1631,11 @@ async fn pre_split_large_l0() { - "L1.?[2285,3073] 11us 100mb|------------------------L1.?-------------------------| " - "L1.?[3074,3568] 11us 63mb |--------------L1.?--------------| " - "Committing partition 1:" - - " Soft Deleting 12 files: L0.2, L0.9, L0.16, L0.23, L0.30, L0.37, L0.44, L0.51, L0.58, L0.65, L0.72, L0.79" + - " Soft Deleting 12 files: L0.2, L0.9, L0.16, L0.23, L0.30, L0.37, L0.44, L0.51, L0.58, L0.65, L0.72, L0.81" - " Creating 2 files" - "**** Simulation run 3, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[4324]). 12 Input Files, 170mb total:" - "L0 " - - "L0.80[3569,4852] 11us 13mb|-----------------------------------------L0.80------------------------------------------|" + - "L0.82[3569,4852] 11us 13mb|-----------------------------------------L0.82------------------------------------------|" - "L0.73[3569,4852] 10.01us 14mb|-----------------------------------------L0.73------------------------------------------|" - "L0.66[3569,4852] 10.01us 14mb|-----------------------------------------L0.66------------------------------------------|" - "L0.59[3569,4852] 10.01us 14mb|-----------------------------------------L0.59------------------------------------------|" @@ -1652,11 +1652,11 @@ async fn pre_split_large_l0() { - "L1.?[3569,4324] 11us 100mb|-----------------------L1.?-----------------------| " - "L1.?[4325,4852] 11us 70mb |---------------L1.?---------------| " - "Committing partition 1:" - - " Soft Deleting 12 files: L0.3, L0.10, L0.17, L0.24, L0.31, L0.38, L0.45, L0.52, L0.59, L0.66, L0.73, L0.80" + - " Soft Deleting 12 files: L0.3, L0.10, L0.17, L0.24, L0.31, L0.38, L0.45, L0.52, L0.59, L0.66, L0.73, L0.82" - " Creating 2 files" - "**** Simulation run 4, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[5608]). 12 Input Files, 170mb total:" - "L0 " - - "L0.81[4853,6136] 11us 13mb|-----------------------------------------L0.81------------------------------------------|" + - "L0.83[4853,6136] 11us 13mb|-----------------------------------------L0.83------------------------------------------|" - "L0.74[4853,6136] 10.01us 14mb|-----------------------------------------L0.74------------------------------------------|" - "L0.67[4853,6136] 10.01us 14mb|-----------------------------------------L0.67------------------------------------------|" - "L0.60[4853,6136] 10.01us 14mb|-----------------------------------------L0.60------------------------------------------|" @@ -1673,11 +1673,11 @@ async fn pre_split_large_l0() { - "L1.?[4853,5608] 11us 100mb|-----------------------L1.?-----------------------| " - "L1.?[5609,6136] 11us 70mb |---------------L1.?---------------| " - "Committing partition 1:" - - " Soft Deleting 12 files: L0.4, L0.11, L0.18, L0.25, L0.32, L0.39, L0.46, L0.53, L0.60, L0.67, L0.74, L0.81" + - " Soft Deleting 12 files: L0.4, L0.11, L0.18, L0.25, L0.32, L0.39, L0.46, L0.53, L0.60, L0.67, L0.74, L0.83" - " Creating 2 files" - "**** Simulation run 5, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[6892]). 12 Input Files, 170mb total:" - "L0 " - - "L0.82[6137,7420] 11us 13mb|-----------------------------------------L0.82------------------------------------------|" + - "L0.84[6137,7420] 11us 13mb|-----------------------------------------L0.84------------------------------------------|" - "L0.75[6137,7420] 10.01us 14mb|-----------------------------------------L0.75------------------------------------------|" - "L0.68[6137,7420] 10.01us 14mb|-----------------------------------------L0.68------------------------------------------|" - "L0.61[6137,7420] 10.01us 14mb|-----------------------------------------L0.61------------------------------------------|" @@ -1694,11 +1694,11 @@ async fn pre_split_large_l0() { - "L1.?[6137,6892] 11us 100mb|-----------------------L1.?-----------------------| " - "L1.?[6893,7420] 11us 70mb |---------------L1.?---------------| " - "Committing partition 1:" - - " Soft Deleting 12 files: L0.5, L0.12, L0.19, L0.26, L0.33, L0.40, L0.47, L0.54, L0.61, L0.68, L0.75, L0.82" + - " Soft Deleting 12 files: L0.5, L0.12, L0.19, L0.26, L0.33, L0.40, L0.47, L0.54, L0.61, L0.68, L0.75, L0.84" - " Creating 2 files" - "**** Simulation run 6, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[8176]). 12 Input Files, 170mb total:" - "L0 " - - "L0.83[7421,8704] 11us 13mb|-----------------------------------------L0.83------------------------------------------|" + - "L0.85[7421,8704] 11us 13mb|-----------------------------------------L0.85------------------------------------------|" - "L0.76[7421,8704] 10.01us 14mb|-----------------------------------------L0.76------------------------------------------|" - "L0.69[7421,8704] 10.01us 14mb|-----------------------------------------L0.69------------------------------------------|" - "L0.62[7421,8704] 10.01us 14mb|-----------------------------------------L0.62------------------------------------------|" @@ -1715,11 +1715,11 @@ async fn pre_split_large_l0() { - "L1.?[7421,8176] 11us 100mb|-----------------------L1.?-----------------------| " - "L1.?[8177,8704] 11us 70mb |---------------L1.?---------------| " - "Committing partition 1:" - - " Soft Deleting 12 files: L0.6, L0.13, L0.20, L0.27, L0.34, L0.41, L0.48, L0.55, L0.62, L0.69, L0.76, L0.83" + - " Soft Deleting 12 files: L0.6, L0.13, L0.20, L0.27, L0.34, L0.41, L0.48, L0.55, L0.62, L0.69, L0.76, L0.85" - " Creating 2 files" - "**** Simulation run 7, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[9633]). 12 Input Files, 173mb total:" - "L0 " - - "L0.84[8705,10312] 11us 16mb|-----------------------------------------L0.84------------------------------------------|" + - "L0.86[8705,10312] 11us 16mb|-----------------------------------------L0.86------------------------------------------|" - "L0.77[8705,10000] 10.01us 14mb|--------------------------------L0.77---------------------------------| " - "L0.70[8705,10000] 10.01us 14mb|--------------------------------L0.70---------------------------------| " - "L0.63[8705,10000] 10.01us 14mb|--------------------------------L0.63---------------------------------| " @@ -1736,17 +1736,17 @@ async fn pre_split_large_l0() { - "L1.?[8705,9633] 11us 100mb|----------------------L1.?-----------------------| " - "L1.?[9634,10312] 11us 73mb |---------------L1.?----------------| " - "Committing partition 1:" - - " Soft Deleting 12 files: L0.7, L0.14, L0.21, L0.28, L0.35, L0.42, L0.49, L0.56, L0.63, L0.70, L0.77, L0.84" + - " Soft Deleting 12 files: L0.7, L0.14, L0.21, L0.28, L0.35, L0.42, L0.49, L0.56, L0.63, L0.70, L0.77, L0.86" - " Creating 2 files" - "**** Simulation run 8, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[12462]). 1 Input Files, 27mb total:" - "L0, all files 27mb " - - "L0.85[10313,13000] 11us |-----------------------------------------L0.85------------------------------------------|" + - "L0.87[10313,13000] 11us |-----------------------------------------L0.87------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 27mb total:" - "L1 " - "L1.?[10313,12462] 11us 21mb|--------------------------------L1.?---------------------------------| " - "L1.?[12463,13000] 11us 5mb |-----L1.?------| " - "Committing partition 1:" - - " Soft Deleting 1 files: L0.85" + - " Soft Deleting 1 files: L0.87" - " Creating 2 files" - "**** Simulation run 9, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[12462]). 2 Input Files, 27mb total:" - "L1 " @@ -1758,12 +1758,12 @@ async fn pre_split_large_l0() { - "L2.?[12463,13000] 11us 5mb |-----L2.?------| " - "Committing partition 1:" - " Soft Deleting 2 files: L1.100, L1.101" - - " Upgrading 14 files level to CompactionLevel::L2: L1.86, L1.87, L1.88, L1.89, L1.90, L1.91, L1.92, L1.93, L1.94, L1.95, L1.96, L1.97, L1.98, L1.99" + - " Upgrading 14 files level to CompactionLevel::L2: L1.79, L1.80, L1.88, L1.89, L1.90, L1.91, L1.92, L1.93, L1.94, L1.95, L1.96, L1.97, L1.98, L1.99" - " Creating 2 files" - "**** Final Output Files (1.3gb written)" - "L2 " - - "L2.86[1000,1818] 10.01us 100mb|L2.86| " - - "L2.87[1819,2284] 10.01us 57mb |L2.87| " + - "L2.79[1000,1818] 10.01us 100mb|L2.79| " + - "L2.80[1819,2284] 10.01us 57mb |L2.80| " - "L2.88[2285,3073] 11us 100mb |L2.88| " - "L2.89[3074,3568] 11us 63mb |L2.89| " - "L2.90[3569,4324] 11us 100mb |L2.90| " @@ -1951,14 +1951,15 @@ async fn file_over_max_size() { - "**** Final Output Files (91mb written)" - "L2 " - "L2.102[1100,1200] 2us 33mb |L2.102|" - - "L2.109[0,271] 2us 9mb |------L2.109------| " - - "L2.115[272,571] 2us 10mb |-------L2.115-------| " - - "L2.116[572,870] 2us 10mb |-------L2.116-------| " - - "L2.117[871,1001] 2us 4mb |L2.117-| " + - "L2.113[0,271] 2us 9mb |------L2.113------| " + - "L2.119[272,571] 2us 10mb |-------L2.119-------| " + - "L2.120[572,870] 2us 10mb |-------L2.120-------| " + - "L2.121[871,1001] 2us 4mb |L2.121-| " - "**** Breakdown of where bytes were written" + - 1b written by compact(TotalSizeLessThanMaxCompactSize) - 33mb written by split(VerticalSplit) - 58mb written by split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize)) - - 60b written by compact(ManySmallFiles) + - 99b written by compact(ManySmallFiles) - "WARNING: file L2.102[1100,1200] 2us 33mb exceeds soft limit 10mb by more than 50%" "### ); diff --git a/compactor/tests/layouts/large_overlaps.rs b/compactor/tests/layouts/large_overlaps.rs index 36a3f751ab..69f865e9c3 100644 --- a/compactor/tests/layouts/large_overlaps.rs +++ b/compactor/tests/layouts/large_overlaps.rs @@ -686,7 +686,112 @@ async fn many_good_size_l0_files() { - "Committing partition 1:" - " Soft Deleting 96 files: L0.1, L0.2, L0.3, L0.4, L0.5, L0.6, L0.7, L0.8, L0.9, L0.10, L0.11, L0.12, L0.13, L0.14, L0.15, L0.16, L0.17, L0.18, L0.19, L0.20, L0.21, L0.22, L0.23, L0.24, L0.25, L0.26, L0.27, L0.28, L0.29, L0.30, L0.31, L0.32, L0.33, L0.34, L0.35, L0.36, L0.37, L0.38, L0.39, L0.40, L0.41, L0.42, L0.43, L0.44, L0.45, L0.46, L0.47, L0.48, L0.49, L0.50, L0.51, L0.52, L0.53, L0.54, L0.55, L0.56, L0.57, L0.58, L0.59, L0.60, L0.61, L0.62, L0.63, L0.64, L0.65, L0.66, L0.67, L0.68, L0.69, L0.70, L0.71, L0.72, L0.73, L0.74, L0.75, L0.76, L0.77, L0.78, L0.79, L0.80, L0.81, L0.82, L0.83, L0.84, L0.85, L0.86, L0.87, L0.88, L0.89, L0.90, L0.91, L0.92, L0.93, L0.94, L0.95, L0.289" - " Creating 2 files" - - "**** Simulation run 3, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[241]). 98 Input Files, 195mb total:" + - "**** Simulation run 3, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[146]). 96 Input Files, 190mb total:" + - "L0 " + - "L0.291[190,190] 191ns 1mb |L0.291|" + - "L0.290[96,96] 96ns 1mb |L0.290| " + - "L0.190[189,190] 190ns 2mb |L0.190|" + - "L0.189[188,189] 189ns 2mb |L0.189|" + - "L0.188[187,188] 188ns 2mb |L0.188|" + - "L0.187[186,187] 187ns 2mb |L0.187|" + - "L0.186[185,186] 186ns 2mb |L0.186|" + - "L0.185[184,185] 185ns 2mb |L0.185|" + - "L0.184[183,184] 184ns 2mb |L0.184|" + - "L0.183[182,183] 183ns 2mb |L0.183|" + - "L0.182[181,182] 182ns 2mb |L0.182| " + - "L0.181[180,181] 181ns 2mb |L0.181| " + - "L0.180[179,180] 180ns 2mb |L0.180| " + - "L0.179[178,179] 179ns 2mb |L0.179| " + - "L0.178[177,178] 178ns 2mb |L0.178| " + - "L0.177[176,177] 177ns 2mb |L0.177| " + - "L0.176[175,176] 176ns 2mb |L0.176| " + - "L0.175[174,175] 175ns 2mb |L0.175| " + - "L0.174[173,174] 174ns 2mb |L0.174| " + - "L0.173[172,173] 173ns 2mb |L0.173| " + - "L0.172[171,172] 172ns 2mb |L0.172| " + - "L0.171[170,171] 171ns 2mb |L0.171| " + - "L0.170[169,170] 170ns 2mb |L0.170| " + - "L0.169[168,169] 169ns 2mb |L0.169| " + - "L0.168[167,168] 168ns 2mb |L0.168| " + - "L0.167[166,167] 167ns 2mb |L0.167| " + - "L0.166[165,166] 166ns 2mb |L0.166| " + - "L0.165[164,165] 165ns 2mb |L0.165| " + - "L0.164[163,164] 164ns 2mb |L0.164| " + - "L0.163[162,163] 163ns 2mb |L0.163| " + - "L0.162[161,162] 162ns 2mb |L0.162| " + - "L0.161[160,161] 161ns 2mb |L0.161| " + - "L0.160[159,160] 160ns 2mb |L0.160| " + - "L0.159[158,159] 159ns 2mb |L0.159| " + - "L0.158[157,158] 158ns 2mb |L0.158| " + - "L0.157[156,157] 157ns 2mb |L0.157| " + - "L0.156[155,156] 156ns 2mb |L0.156| " + - "L0.155[154,155] 155ns 2mb |L0.155| " + - "L0.154[153,154] 154ns 2mb |L0.154| " + - "L0.153[152,153] 153ns 2mb |L0.153| " + - "L0.152[151,152] 152ns 2mb |L0.152| " + - "L0.151[150,151] 151ns 2mb |L0.151| " + - "L0.150[149,150] 150ns 2mb |L0.150| " + - "L0.149[148,149] 149ns 2mb |L0.149| " + - "L0.148[147,148] 148ns 2mb |L0.148| " + - "L0.147[146,147] 147ns 2mb |L0.147| " + - "L0.146[145,146] 146ns 2mb |L0.146| " + - "L0.145[144,145] 145ns 2mb |L0.145| " + - "L0.144[143,144] 144ns 2mb |L0.144| " + - "L0.143[142,143] 143ns 2mb |L0.143| " + - "L0.142[141,142] 142ns 2mb |L0.142| " + - "L0.141[140,141] 141ns 2mb |L0.141| " + - "L0.140[139,140] 140ns 2mb |L0.140| " + - "L0.139[138,139] 139ns 2mb |L0.139| " + - "L0.138[137,138] 138ns 2mb |L0.138| " + - "L0.137[136,137] 137ns 2mb |L0.137| " + - "L0.136[135,136] 136ns 2mb |L0.136| " + - "L0.135[134,135] 135ns 2mb |L0.135| " + - "L0.134[133,134] 134ns 2mb |L0.134| " + - "L0.133[132,133] 133ns 2mb |L0.133| " + - "L0.132[131,132] 132ns 2mb |L0.132| " + - "L0.131[130,131] 131ns 2mb |L0.131| " + - "L0.130[129,130] 130ns 2mb |L0.130| " + - "L0.129[128,129] 129ns 2mb |L0.129| " + - "L0.128[127,128] 128ns 2mb |L0.128| " + - "L0.127[126,127] 127ns 2mb |L0.127| " + - "L0.126[125,126] 126ns 2mb |L0.126| " + - "L0.125[124,125] 125ns 2mb |L0.125| " + - "L0.124[123,124] 124ns 2mb |L0.124| " + - "L0.123[122,123] 123ns 2mb |L0.123| " + - "L0.122[121,122] 122ns 2mb |L0.122| " + - "L0.121[120,121] 121ns 2mb |L0.121| " + - "L0.120[119,120] 120ns 2mb |L0.120| " + - "L0.119[118,119] 119ns 2mb |L0.119| " + - "L0.118[117,118] 118ns 2mb |L0.118| " + - "L0.117[116,117] 117ns 2mb |L0.117| " + - "L0.116[115,116] 116ns 2mb |L0.116| " + - "L0.115[114,115] 115ns 2mb |L0.115| " + - "L0.114[113,114] 114ns 2mb |L0.114| " + - "L0.113[112,113] 113ns 2mb |L0.113| " + - "L0.112[111,112] 112ns 2mb |L0.112| " + - "L0.111[110,111] 111ns 2mb |L0.111| " + - "L0.110[109,110] 110ns 2mb |L0.110| " + - "L0.109[108,109] 109ns 2mb |L0.109| " + - "L0.108[107,108] 108ns 2mb |L0.108| " + - "L0.107[106,107] 107ns 2mb |L0.107| " + - "L0.106[105,106] 106ns 2mb |L0.106| " + - "L0.105[104,105] 105ns 2mb |L0.105| " + - "L0.104[103,104] 104ns 2mb |L0.104| " + - "L0.103[102,103] 103ns 2mb |L0.103| " + - "L0.102[101,102] 102ns 2mb |L0.102| " + - "L0.101[100,101] 101ns 2mb |L0.101| " + - "L0.100[99,100] 100ns 2mb |L0.100| " + - "L0.99[98,99] 99ns 2mb |L0.99| " + - "L0.98[97,98] 98ns 2mb |L0.98| " + - "L0.97[96,97] 97ns 2mb |L0.97| " + - "**** 2 Output Files (parquet_file_id not yet assigned), 190mb total:" + - "L1 " + - "L1.?[96,146] 191ns 102mb |--------------------L1.?---------------------| " + - "L1.?[147,190] 191ns 88mb |-----------------L1.?------------------| " + - "Committing partition 1:" + - " Soft Deleting 96 files: L0.97, L0.98, L0.99, L0.100, L0.101, L0.102, L0.103, L0.104, L0.105, L0.106, L0.107, L0.108, L0.109, L0.110, L0.111, L0.112, L0.113, L0.114, L0.115, L0.116, L0.117, L0.118, L0.119, L0.120, L0.121, L0.122, L0.123, L0.124, L0.125, L0.126, L0.127, L0.128, L0.129, L0.130, L0.131, L0.132, L0.133, L0.134, L0.135, L0.136, L0.137, L0.138, L0.139, L0.140, L0.141, L0.142, L0.143, L0.144, L0.145, L0.146, L0.147, L0.148, L0.149, L0.150, L0.151, L0.152, L0.153, L0.154, L0.155, L0.156, L0.157, L0.158, L0.159, L0.160, L0.161, L0.162, L0.163, L0.164, L0.165, L0.166, L0.167, L0.168, L0.169, L0.170, L0.171, L0.172, L0.173, L0.174, L0.175, L0.176, L0.177, L0.178, L0.179, L0.180, L0.181, L0.182, L0.183, L0.184, L0.185, L0.186, L0.187, L0.188, L0.189, L0.190, L0.290, L0.291" + - " Creating 2 files" + - "**** Simulation run 4, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[241]). 98 Input Files, 195mb total:" - "L0 " - "L0.292[191,191] 191ns 1mb|L0.292| " - "L0.288[287,288] 288ns 2mb |L0.288|" @@ -793,121 +898,16 @@ async fn many_good_size_l0_files() { - "Committing partition 1:" - " Soft Deleting 98 files: L0.192, L0.193, L0.194, L0.195, L0.196, L0.197, L0.198, L0.199, L0.200, L0.201, L0.202, L0.203, L0.204, L0.205, L0.206, L0.207, L0.208, L0.209, L0.210, L0.211, L0.212, L0.213, L0.214, L0.215, L0.216, L0.217, L0.218, L0.219, L0.220, L0.221, L0.222, L0.223, L0.224, L0.225, L0.226, L0.227, L0.228, L0.229, L0.230, L0.231, L0.232, L0.233, L0.234, L0.235, L0.236, L0.237, L0.238, L0.239, L0.240, L0.241, L0.242, L0.243, L0.244, L0.245, L0.246, L0.247, L0.248, L0.249, L0.250, L0.251, L0.252, L0.253, L0.254, L0.255, L0.256, L0.257, L0.258, L0.259, L0.260, L0.261, L0.262, L0.263, L0.264, L0.265, L0.266, L0.267, L0.268, L0.269, L0.270, L0.271, L0.272, L0.273, L0.274, L0.275, L0.276, L0.277, L0.278, L0.279, L0.280, L0.281, L0.282, L0.283, L0.284, L0.285, L0.286, L0.287, L0.288, L0.292" - " Creating 2 files" - - "**** Simulation run 4, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[146]). 96 Input Files, 190mb total:" - - "L0 " - - "L0.291[190,190] 191ns 1mb |L0.291|" - - "L0.290[96,96] 96ns 1mb |L0.290| " - - "L0.190[189,190] 190ns 2mb |L0.190|" - - "L0.189[188,189] 189ns 2mb |L0.189|" - - "L0.188[187,188] 188ns 2mb |L0.188|" - - "L0.187[186,187] 187ns 2mb |L0.187|" - - "L0.186[185,186] 186ns 2mb |L0.186|" - - "L0.185[184,185] 185ns 2mb |L0.185|" - - "L0.184[183,184] 184ns 2mb |L0.184|" - - "L0.183[182,183] 183ns 2mb |L0.183|" - - "L0.182[181,182] 182ns 2mb |L0.182| " - - "L0.181[180,181] 181ns 2mb |L0.181| " - - "L0.180[179,180] 180ns 2mb |L0.180| " - - "L0.179[178,179] 179ns 2mb |L0.179| " - - "L0.178[177,178] 178ns 2mb |L0.178| " - - "L0.177[176,177] 177ns 2mb |L0.177| " - - "L0.176[175,176] 176ns 2mb |L0.176| " - - "L0.175[174,175] 175ns 2mb |L0.175| " - - "L0.174[173,174] 174ns 2mb |L0.174| " - - "L0.173[172,173] 173ns 2mb |L0.173| " - - "L0.172[171,172] 172ns 2mb |L0.172| " - - "L0.171[170,171] 171ns 2mb |L0.171| " - - "L0.170[169,170] 170ns 2mb |L0.170| " - - "L0.169[168,169] 169ns 2mb |L0.169| " - - "L0.168[167,168] 168ns 2mb |L0.168| " - - "L0.167[166,167] 167ns 2mb |L0.167| " - - "L0.166[165,166] 166ns 2mb |L0.166| " - - "L0.165[164,165] 165ns 2mb |L0.165| " - - "L0.164[163,164] 164ns 2mb |L0.164| " - - "L0.163[162,163] 163ns 2mb |L0.163| " - - "L0.162[161,162] 162ns 2mb |L0.162| " - - "L0.161[160,161] 161ns 2mb |L0.161| " - - "L0.160[159,160] 160ns 2mb |L0.160| " - - "L0.159[158,159] 159ns 2mb |L0.159| " - - "L0.158[157,158] 158ns 2mb |L0.158| " - - "L0.157[156,157] 157ns 2mb |L0.157| " - - "L0.156[155,156] 156ns 2mb |L0.156| " - - "L0.155[154,155] 155ns 2mb |L0.155| " - - "L0.154[153,154] 154ns 2mb |L0.154| " - - "L0.153[152,153] 153ns 2mb |L0.153| " - - "L0.152[151,152] 152ns 2mb |L0.152| " - - "L0.151[150,151] 151ns 2mb |L0.151| " - - "L0.150[149,150] 150ns 2mb |L0.150| " - - "L0.149[148,149] 149ns 2mb |L0.149| " - - "L0.148[147,148] 148ns 2mb |L0.148| " - - "L0.147[146,147] 147ns 2mb |L0.147| " - - "L0.146[145,146] 146ns 2mb |L0.146| " - - "L0.145[144,145] 145ns 2mb |L0.145| " - - "L0.144[143,144] 144ns 2mb |L0.144| " - - "L0.143[142,143] 143ns 2mb |L0.143| " - - "L0.142[141,142] 142ns 2mb |L0.142| " - - "L0.141[140,141] 141ns 2mb |L0.141| " - - "L0.140[139,140] 140ns 2mb |L0.140| " - - "L0.139[138,139] 139ns 2mb |L0.139| " - - "L0.138[137,138] 138ns 2mb |L0.138| " - - "L0.137[136,137] 137ns 2mb |L0.137| " - - "L0.136[135,136] 136ns 2mb |L0.136| " - - "L0.135[134,135] 135ns 2mb |L0.135| " - - "L0.134[133,134] 134ns 2mb |L0.134| " - - "L0.133[132,133] 133ns 2mb |L0.133| " - - "L0.132[131,132] 132ns 2mb |L0.132| " - - "L0.131[130,131] 131ns 2mb |L0.131| " - - "L0.130[129,130] 130ns 2mb |L0.130| " - - "L0.129[128,129] 129ns 2mb |L0.129| " - - "L0.128[127,128] 128ns 2mb |L0.128| " - - "L0.127[126,127] 127ns 2mb |L0.127| " - - "L0.126[125,126] 126ns 2mb |L0.126| " - - "L0.125[124,125] 125ns 2mb |L0.125| " - - "L0.124[123,124] 124ns 2mb |L0.124| " - - "L0.123[122,123] 123ns 2mb |L0.123| " - - "L0.122[121,122] 122ns 2mb |L0.122| " - - "L0.121[120,121] 121ns 2mb |L0.121| " - - "L0.120[119,120] 120ns 2mb |L0.120| " - - "L0.119[118,119] 119ns 2mb |L0.119| " - - "L0.118[117,118] 118ns 2mb |L0.118| " - - "L0.117[116,117] 117ns 2mb |L0.117| " - - "L0.116[115,116] 116ns 2mb |L0.116| " - - "L0.115[114,115] 115ns 2mb |L0.115| " - - "L0.114[113,114] 114ns 2mb |L0.114| " - - "L0.113[112,113] 113ns 2mb |L0.113| " - - "L0.112[111,112] 112ns 2mb |L0.112| " - - "L0.111[110,111] 111ns 2mb |L0.111| " - - "L0.110[109,110] 110ns 2mb |L0.110| " - - "L0.109[108,109] 109ns 2mb |L0.109| " - - "L0.108[107,108] 108ns 2mb |L0.108| " - - "L0.107[106,107] 107ns 2mb |L0.107| " - - "L0.106[105,106] 106ns 2mb |L0.106| " - - "L0.105[104,105] 105ns 2mb |L0.105| " - - "L0.104[103,104] 104ns 2mb |L0.104| " - - "L0.103[102,103] 103ns 2mb |L0.103| " - - "L0.102[101,102] 102ns 2mb |L0.102| " - - "L0.101[100,101] 101ns 2mb |L0.101| " - - "L0.100[99,100] 100ns 2mb |L0.100| " - - "L0.99[98,99] 99ns 2mb |L0.99| " - - "L0.98[97,98] 98ns 2mb |L0.98| " - - "L0.97[96,97] 97ns 2mb |L0.97| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 190mb total:" - - "L1 " - - "L1.?[96,146] 191ns 102mb |--------------------L1.?---------------------| " - - "L1.?[147,190] 191ns 88mb |-----------------L1.?------------------| " - - "Committing partition 1:" - - " Soft Deleting 96 files: L0.97, L0.98, L0.99, L0.100, L0.101, L0.102, L0.103, L0.104, L0.105, L0.106, L0.107, L0.108, L0.109, L0.110, L0.111, L0.112, L0.113, L0.114, L0.115, L0.116, L0.117, L0.118, L0.119, L0.120, L0.121, L0.122, L0.123, L0.124, L0.125, L0.126, L0.127, L0.128, L0.129, L0.130, L0.131, L0.132, L0.133, L0.134, L0.135, L0.136, L0.137, L0.138, L0.139, L0.140, L0.141, L0.142, L0.143, L0.144, L0.145, L0.146, L0.147, L0.148, L0.149, L0.150, L0.151, L0.152, L0.153, L0.154, L0.155, L0.156, L0.157, L0.158, L0.159, L0.160, L0.161, L0.162, L0.163, L0.164, L0.165, L0.166, L0.167, L0.168, L0.169, L0.170, L0.171, L0.172, L0.173, L0.174, L0.175, L0.176, L0.177, L0.178, L0.179, L0.180, L0.181, L0.182, L0.183, L0.184, L0.185, L0.186, L0.187, L0.188, L0.189, L0.190, L0.290, L0.291" - - " Creating 2 files" - "Committing partition 1:" - " Upgrading 6 files level to CompactionLevel::L2: L1.293, L1.294, L1.295, L1.296, L1.297, L1.298" - "**** Final Output Files (580mb written)" - "L2 " - "L2.293[0,50] 96ns 101mb |---L2.293----| " - "L2.294[51,95] 96ns 90mb |--L2.294---| " - - "L2.295[191,241] 288ns 101mb |---L2.295----| " - - "L2.296[242,288] 288ns 94mb |---L2.296---| " - - "L2.297[96,146] 191ns 102mb |---L2.297----| " - - "L2.298[147,190] 191ns 88mb |--L2.298---| " + - "L2.295[96,146] 191ns 102mb |---L2.295----| " + - "L2.296[147,190] 191ns 88mb |--L2.296---| " + - "L2.297[191,241] 288ns 101mb |---L2.297----| " + - "L2.298[242,288] 288ns 94mb |---L2.298---| " "### ); } diff --git a/compactor/tests/layouts/many_files.rs b/compactor/tests/layouts/many_files.rs index 3f1f0cb6cb..bcd47da409 100644 --- a/compactor/tests/layouts/many_files.rs +++ b/compactor/tests/layouts/many_files.rs @@ -297,39 +297,31 @@ async fn many_l0_files_different_created_order_non_overlap() { - "Committing partition 1:" - " Soft Deleting 1 files: L0.1" - " Creating 1 files" - - "**** Simulation run 1, type=compact(ManySmallFiles). 2 Input Files, 5kb total:" + - "**** Simulation run 1, type=compact(FoundSubsetLessThanMaxCompactSize). 2 Input Files, 5kb total:" - "L0, all files 3kb " - "L0.2[31,40] 2ns |------------------L0.2------------------| " - "L0.3[21,30] 3ns |------------------L0.3------------------| " - "**** 1 Output Files (parquet_file_id not yet assigned), 5kb total:" - - "L0, all files 5kb " - - "L0.?[21,40] 3ns |------------------------------------------L0.?------------------------------------------|" + - "L1, all files 5kb " + - "L1.?[21,40] 3ns |------------------------------------------L1.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 2 files: L0.2, L0.3" - " Creating 1 files" - - "**** Simulation run 2, type=compact(FoundSubsetLessThanMaxCompactSize). 2 Input Files, 8kb total:" + - "**** Simulation run 2, type=compact(TotalSizeLessThanMaxCompactSize). 3 Input Files, 10kb total:" - "L0 " - - "L0.6[21,40] 3ns 5kb |--------------------------L0.6--------------------------| " - - "L0.4[41,50] 4ns 3kb |----------L0.4-----------| " - - "**** 1 Output Files (parquet_file_id not yet assigned), 8kb total:" - - "L1, all files 8kb " - - "L1.?[21,50] 4ns |------------------------------------------L1.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 2 files: L0.4, L0.6" - - " Creating 1 files" - - "**** Simulation run 3, type=compact(FoundSubsetLessThanMaxCompactSize). 2 Input Files, 10kb total:" + - "L0.4[41,50] 4ns 3kb |-------L0.4-------| " - "L1 " + - "L1.6[21,40] 3ns 5kb |------------------L1.6-------------------| " - "L1.5[11,20] 1ns 3kb |-------L1.5-------| " - - "L1.7[21,50] 4ns 8kb |------------------------------L1.7------------------------------| " - "**** 1 Output Files (parquet_file_id not yet assigned), 10kb total:" - - "L2, all files 10kb " - - "L2.?[11,50] 4ns |------------------------------------------L2.?------------------------------------------|" + - "L1, all files 10kb " + - "L1.?[11,50] 4ns |------------------------------------------L1.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 2 files: L1.5, L1.7" + - " Soft Deleting 3 files: L0.4, L1.5, L1.6" - " Creating 1 files" - - "**** Final Output Files (25kb written)" - - "L2, all files 10kb " - - "L2.8[11,50] 4ns |------------------------------------------L2.8------------------------------------------|" + - "**** Final Output Files (18kb written)" + - "L1, all files 10kb " + - "L1.7[11,50] 4ns |------------------------------------------L1.7------------------------------------------|" "### ); } @@ -421,6 +413,7 @@ async fn many_l1_files() { - "L1.17[32,33] 17ns 10mb |L1.17| " - "L1.16[30,31] 16ns 10mb |L1.16| " - "L1.15[28,29] 15ns 10mb |L1.15| " + - "L1.14[26,27] 14ns 10mb |L1.14| " - "L1.11[20,21] 11ns 10mb |L1.11| " - "L1.10[18,19] 10ns 10mb |L1.10| " - "L1.9[16,17] 9ns 10mb |L1.9| " @@ -432,7 +425,6 @@ async fn many_l1_files() { - "L1.3[4,5] 3ns 10mb |L1.3| " - "L1.2[2,3] 2ns 10mb |L1.2| " - "L1.1[0,1] 1ns 10mb |L1.1| " - - "L1.14[26,27] 14ns 10mb |L1.14| " - "L1.12[22,23] 12ns 10mb |L1.12| " - "L1.24[24,25] 23ns 13mb |L1.24| " - "**** 2 Output Files (parquet_file_id not yet assigned), 203mb total:" @@ -3420,230 +3412,239 @@ async fn not_many_l0_and_overlapped_l1_files() { - "L1.208[2600,2609] 18ns 1mb |L1.208| " - "L1.209[2800,2809] 19ns 1mb |L1.209|" - "L1.210[3000,3009] 20ns 1mb |L1.210|" - - "**** Simulation run 0, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[2407]). 210 Input Files, 21mb total:" + - "**** Simulation run 0, type=compact(TotalSizeLessThanMaxCompactSize). 195 Input Files, 6mb total:" - "L0 " - - "L0.190[189,190] 210ns 7kb |L0.190| " - - "L0.189[188,189] 209ns 7kb |L0.189| " - - "L0.188[187,188] 208ns 7kb |L0.188| " - - "L0.187[186,187] 207ns 7kb |L0.187| " - - "L0.186[185,186] 206ns 7kb |L0.186| " - - "L0.185[184,185] 205ns 7kb |L0.185| " - - "L0.184[183,184] 204ns 7kb |L0.184| " - - "L0.183[182,183] 203ns 7kb |L0.183| " - - "L0.182[181,182] 202ns 7kb |L0.182| " - - "L0.181[180,181] 201ns 7kb |L0.181| " - - "L0.180[179,180] 200ns 7kb |L0.180| " - - "L0.179[178,179] 199ns 7kb |L0.179| " - - "L0.178[177,178] 198ns 7kb |L0.178| " - - "L0.177[176,177] 197ns 7kb |L0.177| " - - "L0.176[175,176] 196ns 7kb |L0.176| " - - "L0.175[174,175] 195ns 7kb |L0.175| " - - "L0.174[173,174] 194ns 7kb |L0.174| " - - "L0.173[172,173] 193ns 7kb |L0.173| " - - "L0.172[171,172] 192ns 7kb |L0.172| " - - "L0.171[170,171] 191ns 7kb |L0.171| " - - "L0.170[169,170] 190ns 7kb |L0.170| " - - "L0.169[168,169] 189ns 7kb |L0.169| " - - "L0.168[167,168] 188ns 7kb |L0.168| " - - "L0.167[166,167] 187ns 7kb |L0.167| " - - "L0.166[165,166] 186ns 7kb |L0.166| " - - "L0.165[164,165] 185ns 7kb |L0.165| " - - "L0.164[163,164] 184ns 7kb |L0.164| " - - "L0.163[162,163] 183ns 7kb |L0.163| " - - "L0.162[161,162] 182ns 7kb |L0.162| " - - "L0.161[160,161] 181ns 7kb |L0.161| " - - "L0.160[159,160] 180ns 7kb |L0.160| " - - "L0.159[158,159] 179ns 7kb |L0.159| " - - "L0.158[157,158] 178ns 7kb |L0.158| " - - "L0.157[156,157] 177ns 7kb |L0.157| " - - "L0.156[155,156] 176ns 7kb |L0.156| " - - "L0.155[154,155] 175ns 7kb |L0.155| " - - "L0.154[153,154] 174ns 7kb |L0.154| " - - "L0.153[152,153] 173ns 7kb |L0.153| " - - "L0.152[151,152] 172ns 7kb |L0.152| " - - "L0.151[150,151] 171ns 7kb |L0.151| " - - "L0.150[149,150] 170ns 7kb |L0.150| " - - "L0.149[148,149] 169ns 7kb |L0.149| " - - "L0.148[147,148] 168ns 7kb |L0.148| " - - "L0.147[146,147] 167ns 7kb |L0.147| " - - "L0.146[145,146] 166ns 7kb |L0.146| " - - "L0.145[144,145] 165ns 7kb |L0.145| " - - "L0.144[143,144] 164ns 7kb |L0.144| " - - "L0.143[142,143] 163ns 7kb |L0.143| " - - "L0.142[141,142] 162ns 7kb |L0.142| " - - "L0.141[140,141] 161ns 7kb |L0.141| " - - "L0.140[139,140] 160ns 7kb |L0.140| " - - "L0.139[138,139] 159ns 7kb |L0.139| " - - "L0.138[137,138] 158ns 7kb |L0.138| " - - "L0.137[136,137] 157ns 7kb |L0.137| " - - "L0.136[135,136] 156ns 7kb |L0.136| " - - "L0.135[134,135] 155ns 7kb |L0.135| " - - "L0.134[133,134] 154ns 7kb |L0.134| " - - "L0.133[132,133] 153ns 7kb |L0.133| " - - "L0.132[131,132] 152ns 7kb |L0.132| " - - "L0.131[130,131] 151ns 7kb |L0.131| " - - "L0.130[129,130] 150ns 7kb |L0.130| " - - "L0.129[128,129] 149ns 7kb |L0.129| " - - "L0.128[127,128] 148ns 7kb |L0.128| " - - "L0.127[126,127] 147ns 7kb |L0.127| " - - "L0.126[125,126] 146ns 7kb |L0.126| " - - "L0.125[124,125] 145ns 7kb |L0.125| " - - "L0.124[123,124] 144ns 7kb |L0.124| " - - "L0.123[122,123] 143ns 7kb |L0.123| " - - "L0.122[121,122] 142ns 7kb |L0.122| " - - "L0.121[120,121] 141ns 7kb |L0.121| " - - "L0.120[119,120] 140ns 7kb |L0.120| " - - "L0.119[118,119] 139ns 7kb |L0.119| " - - "L0.118[117,118] 138ns 7kb |L0.118| " - - "L0.117[116,117] 137ns 7kb |L0.117| " - - "L0.116[115,116] 136ns 7kb |L0.116| " - - "L0.115[114,115] 135ns 7kb |L0.115| " - - "L0.114[113,114] 134ns 7kb |L0.114| " - - "L0.113[112,113] 133ns 7kb |L0.113| " - - "L0.112[111,112] 132ns 7kb |L0.112| " - - "L0.111[110,111] 131ns 7kb |L0.111| " - - "L0.110[109,110] 130ns 7kb |L0.110| " - - "L0.109[108,109] 129ns 7kb |L0.109| " - - "L0.108[107,108] 128ns 7kb |L0.108| " - - "L0.107[106,107] 127ns 7kb |L0.107| " - - "L0.106[105,106] 126ns 7kb |L0.106| " - - "L0.105[104,105] 125ns 7kb |L0.105| " - - "L0.104[103,104] 124ns 7kb |L0.104| " - - "L0.103[102,103] 123ns 7kb |L0.103| " - - "L0.102[101,102] 122ns 7kb |L0.102| " - - "L0.101[100,101] 121ns 7kb |L0.101| " - - "L0.100[99,100] 120ns 7kb |L0.100| " - - "L0.99[98,99] 119ns 7kb |L0.99| " - - "L0.98[97,98] 118ns 7kb |L0.98| " - - "L0.97[96,97] 117ns 7kb |L0.97| " - - "L0.96[95,96] 116ns 7kb |L0.96| " - - "L0.95[94,95] 115ns 7kb |L0.95| " - - "L0.94[93,94] 114ns 7kb |L0.94| " - - "L0.93[92,93] 113ns 7kb |L0.93| " - - "L0.92[91,92] 112ns 7kb |L0.92| " - - "L0.91[90,91] 111ns 7kb |L0.91| " - - "L0.90[89,90] 110ns 7kb |L0.90| " - - "L0.89[88,89] 109ns 7kb |L0.89| " - - "L0.88[87,88] 108ns 7kb |L0.88| " - - "L0.87[86,87] 107ns 7kb |L0.87| " - - "L0.86[85,86] 106ns 7kb |L0.86| " - - "L0.85[84,85] 105ns 7kb |L0.85| " - - "L0.84[83,84] 104ns 7kb |L0.84| " - - "L0.83[82,83] 103ns 7kb |L0.83| " - - "L0.82[81,82] 102ns 7kb |L0.82| " - - "L0.81[80,81] 101ns 7kb |L0.81| " - - "L0.80[79,80] 100ns 7kb |L0.80| " - - "L0.79[78,79] 99ns 7kb |L0.79| " - - "L0.78[77,78] 98ns 7kb |L0.78| " - - "L0.77[76,77] 97ns 7kb |L0.77| " - - "L0.76[75,76] 96ns 7kb |L0.76| " - - "L0.75[74,75] 95ns 7kb |L0.75| " - - "L0.74[73,74] 94ns 7kb |L0.74| " - - "L0.73[72,73] 93ns 7kb |L0.73| " - - "L0.72[71,72] 92ns 7kb |L0.72| " - - "L0.71[70,71] 91ns 7kb |L0.71| " - - "L0.70[69,70] 90ns 7kb |L0.70| " - - "L0.69[68,69] 89ns 7kb |L0.69| " - - "L0.68[67,68] 88ns 7kb |L0.68| " - - "L0.67[66,67] 87ns 7kb |L0.67| " - - "L0.66[65,66] 86ns 7kb |L0.66| " - - "L0.65[64,65] 85ns 7kb |L0.65| " - - "L0.64[63,64] 84ns 7kb |L0.64| " - - "L0.63[62,63] 83ns 7kb |L0.63| " - - "L0.62[61,62] 82ns 7kb |L0.62| " - - "L0.61[60,61] 81ns 7kb |L0.61| " - - "L0.60[59,60] 80ns 7kb |L0.60| " - - "L0.59[58,59] 79ns 7kb |L0.59| " - - "L0.58[57,58] 78ns 7kb |L0.58| " - - "L0.57[56,57] 77ns 7kb |L0.57| " - - "L0.56[55,56] 76ns 7kb |L0.56| " - - "L0.55[54,55] 75ns 7kb |L0.55| " - - "L0.54[53,54] 74ns 7kb |L0.54| " - - "L0.53[52,53] 73ns 7kb |L0.53| " - - "L0.52[51,52] 72ns 7kb |L0.52| " - - "L0.51[50,51] 71ns 7kb |L0.51| " - - "L0.50[49,50] 70ns 7kb |L0.50| " - - "L0.49[48,49] 69ns 7kb |L0.49| " - - "L0.48[47,48] 68ns 7kb |L0.48| " - - "L0.47[46,47] 67ns 7kb |L0.47| " - - "L0.46[45,46] 66ns 7kb |L0.46| " - - "L0.45[44,45] 65ns 7kb |L0.45| " - - "L0.44[43,44] 64ns 7kb |L0.44| " - - "L0.43[42,43] 63ns 7kb |L0.43| " - - "L0.42[41,42] 62ns 7kb |L0.42| " - - "L0.41[40,41] 61ns 7kb |L0.41| " - - "L0.40[39,40] 60ns 7kb |L0.40| " - - "L0.39[38,39] 59ns 7kb |L0.39| " - - "L0.38[37,38] 58ns 7kb |L0.38| " - - "L0.37[36,37] 57ns 7kb |L0.37| " - - "L0.36[35,36] 56ns 7kb |L0.36| " - - "L0.35[34,35] 55ns 7kb |L0.35| " - - "L0.34[33,34] 54ns 7kb |L0.34| " - - "L0.33[32,33] 53ns 7kb |L0.33| " - - "L0.32[31,32] 52ns 7kb |L0.32| " - - "L0.31[30,31] 51ns 7kb |L0.31| " - - "L0.30[29,30] 50ns 7kb |L0.30| " - - "L0.29[28,29] 49ns 7kb |L0.29| " - - "L0.28[27,28] 48ns 7kb |L0.28| " - - "L0.27[26,27] 47ns 7kb |L0.27| " - - "L0.26[25,26] 46ns 7kb |L0.26| " - - "L0.25[24,25] 45ns 7kb |L0.25| " - - "L0.24[23,24] 44ns 7kb |L0.24| " - - "L0.23[22,23] 43ns 7kb |L0.23| " - - "L0.22[21,22] 42ns 7kb |L0.22| " - - "L0.21[20,21] 41ns 7kb |L0.21| " - - "L0.20[19,20] 40ns 7kb |L0.20| " - - "L0.19[18,19] 39ns 7kb |L0.19| " - - "L0.18[17,18] 38ns 7kb |L0.18| " - - "L0.17[16,17] 37ns 7kb |L0.17| " - - "L0.16[15,16] 36ns 7kb |L0.16| " - - "L0.15[14,15] 35ns 7kb |L0.15| " - - "L0.14[13,14] 34ns 7kb |L0.14| " - - "L0.13[12,13] 33ns 7kb |L0.13| " - - "L0.12[11,12] 32ns 7kb |L0.12| " - - "L0.11[10,11] 31ns 7kb |L0.11| " - - "L0.10[9,10] 30ns 7kb |L0.10| " - - "L0.9[8,9] 29ns 7kb |L0.9| " - - "L0.8[7,8] 28ns 7kb |L0.8| " - - "L0.7[6,7] 27ns 7kb |L0.7| " - - "L0.6[5,6] 26ns 7kb |L0.6| " - - "L0.5[4,5] 25ns 7kb |L0.5| " - - "L0.4[3,4] 24ns 7kb |L0.4| " + - "L0.190[189,190] 210ns 7kb |L0.190|" + - "L0.189[188,189] 209ns 7kb |L0.189|" + - "L0.188[187,188] 208ns 7kb |L0.188|" + - "L0.187[186,187] 207ns 7kb |L0.187|" + - "L0.186[185,186] 206ns 7kb |L0.186|" + - "L0.185[184,185] 205ns 7kb |L0.185|" + - "L0.184[183,184] 204ns 7kb |L0.184|" + - "L0.183[182,183] 203ns 7kb |L0.183|" + - "L0.182[181,182] 202ns 7kb |L0.182|" + - "L0.181[180,181] 201ns 7kb |L0.181|" + - "L0.180[179,180] 200ns 7kb |L0.180|" + - "L0.179[178,179] 199ns 7kb |L0.179|" + - "L0.178[177,178] 198ns 7kb |L0.178|" + - "L0.177[176,177] 197ns 7kb |L0.177|" + - "L0.176[175,176] 196ns 7kb |L0.176|" + - "L0.175[174,175] 195ns 7kb |L0.175|" + - "L0.174[173,174] 194ns 7kb |L0.174| " + - "L0.173[172,173] 193ns 7kb |L0.173| " + - "L0.172[171,172] 192ns 7kb |L0.172| " + - "L0.171[170,171] 191ns 7kb |L0.171| " + - "L0.170[169,170] 190ns 7kb |L0.170| " + - "L0.169[168,169] 189ns 7kb |L0.169| " + - "L0.168[167,168] 188ns 7kb |L0.168| " + - "L0.167[166,167] 187ns 7kb |L0.167| " + - "L0.166[165,166] 186ns 7kb |L0.166| " + - "L0.165[164,165] 185ns 7kb |L0.165| " + - "L0.164[163,164] 184ns 7kb |L0.164| " + - "L0.163[162,163] 183ns 7kb |L0.163| " + - "L0.162[161,162] 182ns 7kb |L0.162| " + - "L0.161[160,161] 181ns 7kb |L0.161| " + - "L0.160[159,160] 180ns 7kb |L0.160| " + - "L0.159[158,159] 179ns 7kb |L0.159| " + - "L0.158[157,158] 178ns 7kb |L0.158| " + - "L0.157[156,157] 177ns 7kb |L0.157| " + - "L0.156[155,156] 176ns 7kb |L0.156| " + - "L0.155[154,155] 175ns 7kb |L0.155| " + - "L0.154[153,154] 174ns 7kb |L0.154| " + - "L0.153[152,153] 173ns 7kb |L0.153| " + - "L0.152[151,152] 172ns 7kb |L0.152| " + - "L0.151[150,151] 171ns 7kb |L0.151| " + - "L0.150[149,150] 170ns 7kb |L0.150| " + - "L0.149[148,149] 169ns 7kb |L0.149| " + - "L0.148[147,148] 168ns 7kb |L0.148| " + - "L0.147[146,147] 167ns 7kb |L0.147| " + - "L0.146[145,146] 166ns 7kb |L0.146| " + - "L0.145[144,145] 165ns 7kb |L0.145| " + - "L0.144[143,144] 164ns 7kb |L0.144| " + - "L0.143[142,143] 163ns 7kb |L0.143| " + - "L0.142[141,142] 162ns 7kb |L0.142| " + - "L0.141[140,141] 161ns 7kb |L0.141| " + - "L0.140[139,140] 160ns 7kb |L0.140| " + - "L0.139[138,139] 159ns 7kb |L0.139| " + - "L0.138[137,138] 158ns 7kb |L0.138| " + - "L0.137[136,137] 157ns 7kb |L0.137| " + - "L0.136[135,136] 156ns 7kb |L0.136| " + - "L0.135[134,135] 155ns 7kb |L0.135| " + - "L0.134[133,134] 154ns 7kb |L0.134| " + - "L0.133[132,133] 153ns 7kb |L0.133| " + - "L0.132[131,132] 152ns 7kb |L0.132| " + - "L0.131[130,131] 151ns 7kb |L0.131| " + - "L0.130[129,130] 150ns 7kb |L0.130| " + - "L0.129[128,129] 149ns 7kb |L0.129| " + - "L0.128[127,128] 148ns 7kb |L0.128| " + - "L0.127[126,127] 147ns 7kb |L0.127| " + - "L0.126[125,126] 146ns 7kb |L0.126| " + - "L0.125[124,125] 145ns 7kb |L0.125| " + - "L0.124[123,124] 144ns 7kb |L0.124| " + - "L0.123[122,123] 143ns 7kb |L0.123| " + - "L0.122[121,122] 142ns 7kb |L0.122| " + - "L0.121[120,121] 141ns 7kb |L0.121| " + - "L0.120[119,120] 140ns 7kb |L0.120| " + - "L0.119[118,119] 139ns 7kb |L0.119| " + - "L0.118[117,118] 138ns 7kb |L0.118| " + - "L0.117[116,117] 137ns 7kb |L0.117| " + - "L0.116[115,116] 136ns 7kb |L0.116| " + - "L0.115[114,115] 135ns 7kb |L0.115| " + - "L0.114[113,114] 134ns 7kb |L0.114| " + - "L0.113[112,113] 133ns 7kb |L0.113| " + - "L0.112[111,112] 132ns 7kb |L0.112| " + - "L0.111[110,111] 131ns 7kb |L0.111| " + - "L0.110[109,110] 130ns 7kb |L0.110| " + - "L0.109[108,109] 129ns 7kb |L0.109| " + - "L0.108[107,108] 128ns 7kb |L0.108| " + - "L0.107[106,107] 127ns 7kb |L0.107| " + - "L0.106[105,106] 126ns 7kb |L0.106| " + - "L0.105[104,105] 125ns 7kb |L0.105| " + - "L0.104[103,104] 124ns 7kb |L0.104| " + - "L0.103[102,103] 123ns 7kb |L0.103| " + - "L0.102[101,102] 122ns 7kb |L0.102| " + - "L0.101[100,101] 121ns 7kb |L0.101| " + - "L0.100[99,100] 120ns 7kb |L0.100| " + - "L0.99[98,99] 119ns 7kb |L0.99| " + - "L0.98[97,98] 118ns 7kb |L0.98| " + - "L0.97[96,97] 117ns 7kb |L0.97| " + - "L0.96[95,96] 116ns 7kb |L0.96| " + - "L0.95[94,95] 115ns 7kb |L0.95| " + - "L0.94[93,94] 114ns 7kb |L0.94| " + - "L0.93[92,93] 113ns 7kb |L0.93| " + - "L0.92[91,92] 112ns 7kb |L0.92| " + - "L0.91[90,91] 111ns 7kb |L0.91| " + - "L0.90[89,90] 110ns 7kb |L0.90| " + - "L0.89[88,89] 109ns 7kb |L0.89| " + - "L0.88[87,88] 108ns 7kb |L0.88| " + - "L0.87[86,87] 107ns 7kb |L0.87| " + - "L0.86[85,86] 106ns 7kb |L0.86| " + - "L0.85[84,85] 105ns 7kb |L0.85| " + - "L0.84[83,84] 104ns 7kb |L0.84| " + - "L0.83[82,83] 103ns 7kb |L0.83| " + - "L0.82[81,82] 102ns 7kb |L0.82| " + - "L0.81[80,81] 101ns 7kb |L0.81| " + - "L0.80[79,80] 100ns 7kb |L0.80| " + - "L0.79[78,79] 99ns 7kb |L0.79| " + - "L0.78[77,78] 98ns 7kb |L0.78| " + - "L0.77[76,77] 97ns 7kb |L0.77| " + - "L0.76[75,76] 96ns 7kb |L0.76| " + - "L0.75[74,75] 95ns 7kb |L0.75| " + - "L0.74[73,74] 94ns 7kb |L0.74| " + - "L0.73[72,73] 93ns 7kb |L0.73| " + - "L0.72[71,72] 92ns 7kb |L0.72| " + - "L0.71[70,71] 91ns 7kb |L0.71| " + - "L0.70[69,70] 90ns 7kb |L0.70| " + - "L0.69[68,69] 89ns 7kb |L0.69| " + - "L0.68[67,68] 88ns 7kb |L0.68| " + - "L0.67[66,67] 87ns 7kb |L0.67| " + - "L0.66[65,66] 86ns 7kb |L0.66| " + - "L0.65[64,65] 85ns 7kb |L0.65| " + - "L0.64[63,64] 84ns 7kb |L0.64| " + - "L0.63[62,63] 83ns 7kb |L0.63| " + - "L0.62[61,62] 82ns 7kb |L0.62| " + - "L0.61[60,61] 81ns 7kb |L0.61| " + - "L0.60[59,60] 80ns 7kb |L0.60| " + - "L0.59[58,59] 79ns 7kb |L0.59| " + - "L0.58[57,58] 78ns 7kb |L0.58| " + - "L0.57[56,57] 77ns 7kb |L0.57| " + - "L0.56[55,56] 76ns 7kb |L0.56| " + - "L0.55[54,55] 75ns 7kb |L0.55| " + - "L0.54[53,54] 74ns 7kb |L0.54| " + - "L0.53[52,53] 73ns 7kb |L0.53| " + - "L0.52[51,52] 72ns 7kb |L0.52| " + - "L0.51[50,51] 71ns 7kb |L0.51| " + - "L0.50[49,50] 70ns 7kb |L0.50| " + - "L0.49[48,49] 69ns 7kb |L0.49| " + - "L0.48[47,48] 68ns 7kb |L0.48| " + - "L0.47[46,47] 67ns 7kb |L0.47| " + - "L0.46[45,46] 66ns 7kb |L0.46| " + - "L0.45[44,45] 65ns 7kb |L0.45| " + - "L0.44[43,44] 64ns 7kb |L0.44| " + - "L0.43[42,43] 63ns 7kb |L0.43| " + - "L0.42[41,42] 62ns 7kb |L0.42| " + - "L0.41[40,41] 61ns 7kb |L0.41| " + - "L0.40[39,40] 60ns 7kb |L0.40| " + - "L0.39[38,39] 59ns 7kb |L0.39| " + - "L0.38[37,38] 58ns 7kb |L0.38| " + - "L0.37[36,37] 57ns 7kb |L0.37| " + - "L0.36[35,36] 56ns 7kb |L0.36| " + - "L0.35[34,35] 55ns 7kb |L0.35| " + - "L0.34[33,34] 54ns 7kb |L0.34| " + - "L0.33[32,33] 53ns 7kb |L0.33| " + - "L0.32[31,32] 52ns 7kb |L0.32| " + - "L0.31[30,31] 51ns 7kb |L0.31| " + - "L0.30[29,30] 50ns 7kb |L0.30| " + - "L0.29[28,29] 49ns 7kb |L0.29| " + - "L0.28[27,28] 48ns 7kb |L0.28| " + - "L0.27[26,27] 47ns 7kb |L0.27| " + - "L0.26[25,26] 46ns 7kb |L0.26| " + - "L0.25[24,25] 45ns 7kb |L0.25| " + - "L0.24[23,24] 44ns 7kb |L0.24| " + - "L0.23[22,23] 43ns 7kb |L0.23| " + - "L0.22[21,22] 42ns 7kb |L0.22| " + - "L0.21[20,21] 41ns 7kb |L0.21| " + - "L0.20[19,20] 40ns 7kb |L0.20| " + - "L0.19[18,19] 39ns 7kb |L0.19| " + - "L0.18[17,18] 38ns 7kb |L0.18| " + - "L0.17[16,17] 37ns 7kb |L0.17| " + - "L0.16[15,16] 36ns 7kb |L0.16| " + - "L0.15[14,15] 35ns 7kb |L0.15| " + - "L0.14[13,14] 34ns 7kb |L0.14| " + - "L0.13[12,13] 33ns 7kb |L0.13| " + - "L0.12[11,12] 32ns 7kb |L0.12| " + - "L0.11[10,11] 31ns 7kb |L0.11| " + - "L0.10[9,10] 30ns 7kb |L0.10| " + - "L0.9[8,9] 29ns 7kb |L0.9| " + - "L0.8[7,8] 28ns 7kb |L0.8| " + - "L0.7[6,7] 27ns 7kb |L0.7| " + - "L0.6[5,6] 26ns 7kb |L0.6| " + - "L0.5[4,5] 25ns 7kb |L0.5| " + - "L0.4[3,4] 24ns 7kb |L0.4| " - "L0.3[2,3] 23ns 7kb |L0.3| " - "L0.2[1,2] 22ns 7kb |L0.2| " - "L0.1[0,1] 21ns 7kb |L0.1| " - "L1 " - - "L1.195[40,49] 5ns 1mb |L1.195| " - - "L1.194[30,39] 4ns 1mb |L1.194| " - - "L1.193[20,29] 3ns 1mb |L1.193| " - - "L1.192[10,19] 2ns 1mb |L1.192| " + - "L1.195[40,49] 5ns 1mb |L1.195| " + - "L1.194[30,39] 4ns 1mb |L1.194| " + - "L1.193[20,29] 3ns 1mb |L1.193| " + - "L1.192[10,19] 2ns 1mb |L1.192| " - "L1.191[0,9] 1ns 1mb |L1.191| " - - "L1.196[200,209] 6ns 1mb |L1.196| " - - "L1.197[400,409] 7ns 1mb |L1.197| " - - "L1.198[600,609] 8ns 1mb |L1.198| " - - "L1.199[800,809] 9ns 1mb |L1.199| " - - "L1.200[1000,1009] 10ns 1mb |L1.200| " - - "L1.201[1200,1209] 11ns 1mb |L1.201| " - - "L1.202[1400,1409] 12ns 1mb |L1.202| " - - "L1.203[1600,1609] 13ns 1mb |L1.203| " - - "L1.204[1800,1809] 14ns 1mb |L1.204| " - - "L1.205[2000,2009] 15ns 1mb |L1.205| " - - "L1.206[2200,2209] 16ns 1mb |L1.206| " - - "L1.207[2400,2409] 17ns 1mb |L1.207| " - - "L1.208[2600,2609] 18ns 1mb |L1.208| " - - "L1.209[2800,2809] 19ns 1mb |L1.209|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 6mb total:" + - "L1, all files 6mb " + - "L1.?[0,190] 210ns |------------------------------------------L1.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 195 files: L0.1, L0.2, L0.3, L0.4, L0.5, L0.6, L0.7, L0.8, L0.9, L0.10, L0.11, L0.12, L0.13, L0.14, L0.15, L0.16, L0.17, L0.18, L0.19, L0.20, L0.21, L0.22, L0.23, L0.24, L0.25, L0.26, L0.27, L0.28, L0.29, L0.30, L0.31, L0.32, L0.33, L0.34, L0.35, L0.36, L0.37, L0.38, L0.39, L0.40, L0.41, L0.42, L0.43, L0.44, L0.45, L0.46, L0.47, L0.48, L0.49, L0.50, L0.51, L0.52, L0.53, L0.54, L0.55, L0.56, L0.57, L0.58, L0.59, L0.60, L0.61, L0.62, L0.63, L0.64, L0.65, L0.66, L0.67, L0.68, L0.69, L0.70, L0.71, L0.72, L0.73, L0.74, L0.75, L0.76, L0.77, L0.78, L0.79, L0.80, L0.81, L0.82, L0.83, L0.84, L0.85, L0.86, L0.87, L0.88, L0.89, L0.90, L0.91, L0.92, L0.93, L0.94, L0.95, L0.96, L0.97, L0.98, L0.99, L0.100, L0.101, L0.102, L0.103, L0.104, L0.105, L0.106, L0.107, L0.108, L0.109, L0.110, L0.111, L0.112, L0.113, L0.114, L0.115, L0.116, L0.117, L0.118, L0.119, L0.120, L0.121, L0.122, L0.123, L0.124, L0.125, L0.126, L0.127, L0.128, L0.129, L0.130, L0.131, L0.132, L0.133, L0.134, L0.135, L0.136, L0.137, L0.138, L0.139, L0.140, L0.141, L0.142, L0.143, L0.144, L0.145, L0.146, L0.147, L0.148, L0.149, L0.150, L0.151, L0.152, L0.153, L0.154, L0.155, L0.156, L0.157, L0.158, L0.159, L0.160, L0.161, L0.162, L0.163, L0.164, L0.165, L0.166, L0.167, L0.168, L0.169, L0.170, L0.171, L0.172, L0.173, L0.174, L0.175, L0.176, L0.177, L0.178, L0.179, L0.180, L0.181, L0.182, L0.183, L0.184, L0.185, L0.186, L0.187, L0.188, L0.189, L0.190, L1.191, L1.192, L1.193, L1.194, L1.195" + - " Creating 1 files" + - "**** Simulation run 1, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[2407]). 16 Input Files, 21mb total:" + - "L1 " - "L1.210[3000,3009] 20ns 1mb |L1.210|" + - "L1.209[2800,2809] 19ns 1mb |L1.209|" + - "L1.208[2600,2609] 18ns 1mb |L1.208| " + - "L1.207[2400,2409] 17ns 1mb |L1.207| " + - "L1.206[2200,2209] 16ns 1mb |L1.206| " + - "L1.205[2000,2009] 15ns 1mb |L1.205| " + - "L1.204[1800,1809] 14ns 1mb |L1.204| " + - "L1.203[1600,1609] 13ns 1mb |L1.203| " + - "L1.202[1400,1409] 12ns 1mb |L1.202| " + - "L1.201[1200,1209] 11ns 1mb |L1.201| " + - "L1.200[1000,1009] 10ns 1mb |L1.200| " + - "L1.199[800,809] 9ns 1mb |L1.199| " + - "L1.198[600,609] 8ns 1mb |L1.198| " + - "L1.197[400,409] 7ns 1mb |L1.197| " + - "L1.196[200,209] 6ns 1mb |L1.196| " + - "L1.211[0,190] 210ns 6mb |L1.211| " - "**** 2 Output Files (parquet_file_id not yet assigned), 21mb total:" - - "L1 " - - "L1.?[0,2407] 210ns 17mb |--------------------------------L1.?---------------------------------| " - - "L1.?[2408,3009] 210ns 4mb |-----L1.?------| " + - "L2 " + - "L2.?[0,2407] 210ns 17mb |--------------------------------L2.?---------------------------------| " + - "L2.?[2408,3009] 210ns 4mb |-----L2.?------| " - "Committing partition 1:" - - " Soft Deleting 210 files: L0.1, L0.2, L0.3, L0.4, L0.5, L0.6, L0.7, L0.8, L0.9, L0.10, L0.11, L0.12, L0.13, L0.14, L0.15, L0.16, L0.17, L0.18, L0.19, L0.20, L0.21, L0.22, L0.23, L0.24, L0.25, L0.26, L0.27, L0.28, L0.29, L0.30, L0.31, L0.32, L0.33, L0.34, L0.35, L0.36, L0.37, L0.38, L0.39, L0.40, L0.41, L0.42, L0.43, L0.44, L0.45, L0.46, L0.47, L0.48, L0.49, L0.50, L0.51, L0.52, L0.53, L0.54, L0.55, L0.56, L0.57, L0.58, L0.59, L0.60, L0.61, L0.62, L0.63, L0.64, L0.65, L0.66, L0.67, L0.68, L0.69, L0.70, L0.71, L0.72, L0.73, L0.74, L0.75, L0.76, L0.77, L0.78, L0.79, L0.80, L0.81, L0.82, L0.83, L0.84, L0.85, L0.86, L0.87, L0.88, L0.89, L0.90, L0.91, L0.92, L0.93, L0.94, L0.95, L0.96, L0.97, L0.98, L0.99, L0.100, L0.101, L0.102, L0.103, L0.104, L0.105, L0.106, L0.107, L0.108, L0.109, L0.110, L0.111, L0.112, L0.113, L0.114, L0.115, L0.116, L0.117, L0.118, L0.119, L0.120, L0.121, L0.122, L0.123, L0.124, L0.125, L0.126, L0.127, L0.128, L0.129, L0.130, L0.131, L0.132, L0.133, L0.134, L0.135, L0.136, L0.137, L0.138, L0.139, L0.140, L0.141, L0.142, L0.143, L0.144, L0.145, L0.146, L0.147, L0.148, L0.149, L0.150, L0.151, L0.152, L0.153, L0.154, L0.155, L0.156, L0.157, L0.158, L0.159, L0.160, L0.161, L0.162, L0.163, L0.164, L0.165, L0.166, L0.167, L0.168, L0.169, L0.170, L0.171, L0.172, L0.173, L0.174, L0.175, L0.176, L0.177, L0.178, L0.179, L0.180, L0.181, L0.182, L0.183, L0.184, L0.185, L0.186, L0.187, L0.188, L0.189, L0.190, L1.191, L1.192, L1.193, L1.194, L1.195, L1.196, L1.197, L1.198, L1.199, L1.200, L1.201, L1.202, L1.203, L1.204, L1.205, L1.206, L1.207, L1.208, L1.209, L1.210" + - " Soft Deleting 16 files: L1.196, L1.197, L1.198, L1.199, L1.200, L1.201, L1.202, L1.203, L1.204, L1.205, L1.206, L1.207, L1.208, L1.209, L1.210, L1.211" - " Creating 2 files" - - "**** Final Output Files (21mb written)" - - "L1 " - - "L1.211[0,2407] 210ns 17mb|-------------------------------L1.211--------------------------------| " - - "L1.212[2408,3009] 210ns 4mb |----L1.212-----| " + - "**** Final Output Files (28mb written)" + - "L2 " + - "L2.212[0,2407] 210ns 17mb|-------------------------------L2.212--------------------------------| " + - "L2.213[2408,3009] 210ns 4mb |----L2.213-----| " "### ); } @@ -5742,21 +5743,21 @@ async fn l0s_needing_vertical_split() { - "L0.998[24,100] 1.02us |-----------------------------------------L0.998-----------------------------------------|" - "L0.999[24,100] 1.02us |-----------------------------------------L0.999-----------------------------------------|" - "L0.1000[24,100] 1.02us |----------------------------------------L0.1000-----------------------------------------|" - - "**** Final Output Files (2.62gb written)" + - "**** Final Output Files (2.63gb written)" - "L2 " - - "L2.1018[24,34] 1.02us 107mb|-L2.1018-| " - - "L2.1019[35,44] 1.02us 97mb |L2.1019-| " - - "L2.1020[45,50] 1.02us 58mb |L2.1020| " - - "L2.1021[51,61] 1.02us 107mb |-L2.1021-| " - - "L2.1022[62,71] 1.02us 97mb |L2.1022-| " - - "L2.1027[72,82] 1.02us 107mb |-L2.1027-| " - - "L2.1028[83,92] 1.02us 97mb |L2.1028-| " - - "L2.1029[93,100] 1.02us 78mb |L2.1029|" + - "L2.1031[24,34] 1.02us 107mb|-L2.1031-| " + - "L2.1039[81,91] 1.02us 107mb |-L2.1039-| " + - "L2.1040[92,100] 1.02us 88mb |L2.1040| " + - "L2.1041[35,45] 1.02us 107mb |-L2.1041-| " + - "L2.1042[46,55] 1.02us 97mb |L2.1042-| " + - "L2.1043[56,63] 1.02us 78mb |L2.1043| " + - "L2.1044[64,74] 1.02us 107mb |-L2.1044-| " + - "L2.1045[75,80] 1.02us 58mb |L2.1045| " - "**** Breakdown of where bytes were written" - - 1.01gb written by split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize)) - - 300mb written by split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize)) - - 450mb written by split(VerticalSplit) - - 899mb written by compact(ManySmallFiles) + - 282mb written by split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize)) + - 750mb written by compact(ManySmallFiles) + - 750mb written by split(VerticalSplit) + - 916mb written by split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize)) "### ); } diff --git a/compactor/tests/layouts/single_timestamp.rs b/compactor/tests/layouts/single_timestamp.rs index df4e66908f..629e180999 100644 --- a/compactor/tests/layouts/single_timestamp.rs +++ b/compactor/tests/layouts/single_timestamp.rs @@ -125,25 +125,34 @@ async fn two_giant_files_time_range_1() { - "L0.2[100,101] 2ns |------------------------------------------L0.2------------------------------------------|" - "WARNING: file L0.1[100,101] 1ns 4.88gb exceeds soft limit 100mb by more than 50%" - "WARNING: file L0.2[100,101] 2ns 4.88gb exceeds soft limit 100mb by more than 50%" - - "**** Simulation run 0, type=split(ReduceLargeFileSize)(split_times=[100]). 1 Input Files, 4.88gb total:" + - "**** Simulation run 0, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 4.88gb total:" - "L0, all files 4.88gb " - "L0.1[100,101] 1ns |------------------------------------------L0.1------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 4.88gb total:" - "L0, all files 2.44gb " - "L0.?[100,100] 1ns |L0.?| " - "L0.?[101,101] 1ns |L0.?|" + - "**** Simulation run 1, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 4.88gb total:" + - "L0, all files 4.88gb " + - "L0.2[100,101] 2ns |------------------------------------------L0.2------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 4.88gb total:" + - "L0, all files 2.44gb " + - "L0.?[100,100] 2ns |L0.?| " + - "L0.?[101,101] 2ns |L0.?|" - "Committing partition 1:" - - " Soft Deleting 1 files: L0.1" - - " Creating 2 files" + - " Soft Deleting 2 files: L0.1, L0.2" + - " Creating 4 files" - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. This may happen if a large amount of data has the same timestamp" - - "**** Final Output Files (4.88gb written)" - - "L0 " - - "L0.2[100,101] 2ns 4.88gb |------------------------------------------L0.2------------------------------------------|" - - "L0.3[100,100] 1ns 2.44gb |L0.3| " - - "L0.4[101,101] 1ns 2.44gb |L0.4|" - - "WARNING: file L0.2[100,101] 2ns 4.88gb exceeds soft limit 100mb by more than 50%" + - "**** Final Output Files (9.77gb written)" + - "L0, all files 2.44gb " + - "L0.3[100,100] 1ns |L0.3| " + - "L0.4[101,101] 1ns |L0.4|" + - "L0.5[100,100] 2ns |L0.5| " + - "L0.6[101,101] 2ns |L0.6|" - "WARNING: file L0.3[100,100] 1ns 2.44gb exceeds soft limit 100mb by more than 50%" - "WARNING: file L0.4[101,101] 1ns 2.44gb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L0.5[100,100] 2ns 2.44gb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L0.6[101,101] 2ns 2.44gb exceeds soft limit 100mb by more than 50%" "### ); } @@ -284,98 +293,140 @@ async fn many_medium_files_time_range_1() { - "L0.18[100,101] 18ns |-----------------------------------------L0.18------------------------------------------|" - "L0.19[100,101] 19ns |-----------------------------------------L0.19------------------------------------------|" - "L0.20[100,101] 20ns |-----------------------------------------L0.20------------------------------------------|" - - "**** Simulation run 0, type=compact(FoundSubsetLessThanMaxCompactSize). 10 Input Files, 300mb total:" + - "**** Simulation run 0, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" - "L0, all files 30mb " - "L0.1[100,101] 1ns |------------------------------------------L0.1------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" + - "L0, all files 15mb " + - "L0.?[100,100] 1ns |L0.?| " + - "L0.?[101,101] 1ns |L0.?|" + - "**** Simulation run 1, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" + - "L0, all files 30mb " - "L0.2[100,101] 2ns |------------------------------------------L0.2------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" + - "L0, all files 15mb " + - "L0.?[100,100] 2ns |L0.?| " + - "L0.?[101,101] 2ns |L0.?|" + - "**** Simulation run 2, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" + - "L0, all files 30mb " - "L0.3[100,101] 3ns |------------------------------------------L0.3------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" + - "L0, all files 15mb " + - "L0.?[100,100] 3ns |L0.?| " + - "L0.?[101,101] 3ns |L0.?|" + - "**** Simulation run 3, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" + - "L0, all files 30mb " - "L0.4[100,101] 4ns |------------------------------------------L0.4------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" + - "L0, all files 15mb " + - "L0.?[100,100] 4ns |L0.?| " + - "L0.?[101,101] 4ns |L0.?|" + - "**** Simulation run 4, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" + - "L0, all files 30mb " - "L0.5[100,101] 5ns |------------------------------------------L0.5------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" + - "L0, all files 15mb " + - "L0.?[100,100] 5ns |L0.?| " + - "L0.?[101,101] 5ns |L0.?|" + - "**** Simulation run 5, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" + - "L0, all files 30mb " - "L0.6[100,101] 6ns |------------------------------------------L0.6------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" + - "L0, all files 15mb " + - "L0.?[100,100] 6ns |L0.?| " + - "L0.?[101,101] 6ns |L0.?|" + - "**** Simulation run 6, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" + - "L0, all files 30mb " - "L0.7[100,101] 7ns |------------------------------------------L0.7------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" + - "L0, all files 15mb " + - "L0.?[100,100] 7ns |L0.?| " + - "L0.?[101,101] 7ns |L0.?|" + - "**** Simulation run 7, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" + - "L0, all files 30mb " - "L0.8[100,101] 8ns |------------------------------------------L0.8------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" + - "L0, all files 15mb " + - "L0.?[100,100] 8ns |L0.?| " + - "L0.?[101,101] 8ns |L0.?|" + - "**** Simulation run 8, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" + - "L0, all files 30mb " - "L0.9[100,101] 9ns |------------------------------------------L0.9------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" + - "L0, all files 15mb " + - "L0.?[100,100] 9ns |L0.?| " + - "L0.?[101,101] 9ns |L0.?|" + - "**** Simulation run 9, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" + - "L0, all files 30mb " - "L0.10[100,101] 10ns |-----------------------------------------L0.10------------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 300mb total:" - - "L1, all files 300mb " - - "L1.?[100,101] 10ns |------------------------------------------L1.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 10 files: L0.1, L0.2, L0.3, L0.4, L0.5, L0.6, L0.7, L0.8, L0.9, L0.10" - - " Creating 1 files" - - "**** Simulation run 1, type=split(ReduceLargeFileSize)(split_times=[100]). 1 Input Files, 300mb total:" - - "L1, all files 300mb " - - "L1.21[100,101] 10ns |-----------------------------------------L1.21------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 300mb total:" - - "L1, all files 150mb " - - "L1.?[100,100] 10ns |L1.?| " - - "L1.?[101,101] 10ns |L1.?|" - - "Committing partition 1:" - - " Soft Deleting 1 files: L1.21" - - " Creating 2 files" - - "**** Simulation run 2, type=split(ReduceOverlap)(split_times=[100]). 1 Input Files, 30mb total:" + - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" + - "L0, all files 15mb " + - "L0.?[100,100] 10ns |L0.?| " + - "L0.?[101,101] 10ns |L0.?|" + - "**** Simulation run 10, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" - "L0, all files 30mb " - "L0.11[100,101] 11ns |-----------------------------------------L0.11------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L0, all files 15mb " - "L0.?[100,100] 11ns |L0.?| " - "L0.?[101,101] 11ns |L0.?|" - - "**** Simulation run 3, type=split(ReduceOverlap)(split_times=[100]). 1 Input Files, 30mb total:" + - "**** Simulation run 11, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" - "L0, all files 30mb " - "L0.12[100,101] 12ns |-----------------------------------------L0.12------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L0, all files 15mb " - "L0.?[100,100] 12ns |L0.?| " - "L0.?[101,101] 12ns |L0.?|" - - "**** Simulation run 4, type=split(ReduceOverlap)(split_times=[100]). 1 Input Files, 30mb total:" + - "**** Simulation run 12, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" - "L0, all files 30mb " - "L0.13[100,101] 13ns |-----------------------------------------L0.13------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L0, all files 15mb " - "L0.?[100,100] 13ns |L0.?| " - "L0.?[101,101] 13ns |L0.?|" - - "**** Simulation run 5, type=split(ReduceOverlap)(split_times=[100]). 1 Input Files, 30mb total:" + - "**** Simulation run 13, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" - "L0, all files 30mb " - "L0.14[100,101] 14ns |-----------------------------------------L0.14------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L0, all files 15mb " - "L0.?[100,100] 14ns |L0.?| " - "L0.?[101,101] 14ns |L0.?|" - - "**** Simulation run 6, type=split(ReduceOverlap)(split_times=[100]). 1 Input Files, 30mb total:" + - "**** Simulation run 14, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" - "L0, all files 30mb " - "L0.15[100,101] 15ns |-----------------------------------------L0.15------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L0, all files 15mb " - "L0.?[100,100] 15ns |L0.?| " - "L0.?[101,101] 15ns |L0.?|" - - "**** Simulation run 7, type=split(ReduceOverlap)(split_times=[100]). 1 Input Files, 30mb total:" + - "**** Simulation run 15, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" - "L0, all files 30mb " - "L0.16[100,101] 16ns |-----------------------------------------L0.16------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L0, all files 15mb " - "L0.?[100,100] 16ns |L0.?| " - "L0.?[101,101] 16ns |L0.?|" - - "**** Simulation run 8, type=split(ReduceOverlap)(split_times=[100]). 1 Input Files, 30mb total:" + - "**** Simulation run 16, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" - "L0, all files 30mb " - "L0.17[100,101] 17ns |-----------------------------------------L0.17------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L0, all files 15mb " - "L0.?[100,100] 17ns |L0.?| " - "L0.?[101,101] 17ns |L0.?|" - - "**** Simulation run 9, type=split(ReduceOverlap)(split_times=[100]). 1 Input Files, 30mb total:" + - "**** Simulation run 17, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" - "L0, all files 30mb " - "L0.18[100,101] 18ns |-----------------------------------------L0.18------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L0, all files 15mb " - "L0.?[100,100] 18ns |L0.?| " - "L0.?[101,101] 18ns |L0.?|" - - "**** Simulation run 10, type=split(ReduceOverlap)(split_times=[100]). 1 Input Files, 30mb total:" + - "**** Simulation run 18, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" - "L0, all files 30mb " - "L0.19[100,101] 19ns |-----------------------------------------L0.19------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L0, all files 15mb " - "L0.?[100,100] 19ns |L0.?| " - "L0.?[101,101] 19ns |L0.?|" - - "**** Simulation run 11, type=split(ReduceOverlap)(split_times=[100]). 1 Input Files, 30mb total:" + - "**** Simulation run 19, type=split(VerticalSplit)(split_times=[100]). 1 Input Files, 30mb total:" - "L0, all files 30mb " - "L0.20[100,101] 20ns |-----------------------------------------L0.20------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" @@ -383,56 +434,72 @@ async fn many_medium_files_time_range_1() { - "L0.?[100,100] 20ns |L0.?| " - "L0.?[101,101] 20ns |L0.?|" - "Committing partition 1:" - - " Soft Deleting 10 files: L0.11, L0.12, L0.13, L0.14, L0.15, L0.16, L0.17, L0.18, L0.19, L0.20" - - " Creating 20 files" - - "**** Simulation run 12, type=compact(TotalSizeLessThanMaxCompactSize). 11 Input Files, 300mb total:" - - "L0 " - - "L0.42[100,100] 20ns 15mb |-----------------------------------------L0.42------------------------------------------|" - - "L0.40[100,100] 19ns 15mb |-----------------------------------------L0.40------------------------------------------|" - - "L0.38[100,100] 18ns 15mb |-----------------------------------------L0.38------------------------------------------|" - - "L0.36[100,100] 17ns 15mb |-----------------------------------------L0.36------------------------------------------|" - - "L0.34[100,100] 16ns 15mb |-----------------------------------------L0.34------------------------------------------|" - - "L0.32[100,100] 15ns 15mb |-----------------------------------------L0.32------------------------------------------|" - - "L0.30[100,100] 14ns 15mb |-----------------------------------------L0.30------------------------------------------|" - - "L0.28[100,100] 13ns 15mb |-----------------------------------------L0.28------------------------------------------|" - - "L0.26[100,100] 12ns 15mb |-----------------------------------------L0.26------------------------------------------|" - - "L0.24[100,100] 11ns 15mb |-----------------------------------------L0.24------------------------------------------|" - - "L1 " - - "L1.22[100,100] 10ns 150mb|-----------------------------------------L1.22------------------------------------------|" + - " Soft Deleting 20 files: L0.1, L0.2, L0.3, L0.4, L0.5, L0.6, L0.7, L0.8, L0.9, L0.10, L0.11, L0.12, L0.13, L0.14, L0.15, L0.16, L0.17, L0.18, L0.19, L0.20" + - " Creating 40 files" + - "**** Simulation run 20, type=compact(TotalSizeLessThanMaxCompactSize). 20 Input Files, 300mb total:" + - "L0, all files 15mb " + - "L0.59[100,100] 20ns |-----------------------------------------L0.59------------------------------------------|" + - "L0.57[100,100] 19ns |-----------------------------------------L0.57------------------------------------------|" + - "L0.55[100,100] 18ns |-----------------------------------------L0.55------------------------------------------|" + - "L0.53[100,100] 17ns |-----------------------------------------L0.53------------------------------------------|" + - "L0.51[100,100] 16ns |-----------------------------------------L0.51------------------------------------------|" + - "L0.49[100,100] 15ns |-----------------------------------------L0.49------------------------------------------|" + - "L0.47[100,100] 14ns |-----------------------------------------L0.47------------------------------------------|" + - "L0.45[100,100] 13ns |-----------------------------------------L0.45------------------------------------------|" + - "L0.43[100,100] 12ns |-----------------------------------------L0.43------------------------------------------|" + - "L0.41[100,100] 11ns |-----------------------------------------L0.41------------------------------------------|" + - "L0.39[100,100] 10ns |-----------------------------------------L0.39------------------------------------------|" + - "L0.37[100,100] 9ns |-----------------------------------------L0.37------------------------------------------|" + - "L0.35[100,100] 8ns |-----------------------------------------L0.35------------------------------------------|" + - "L0.33[100,100] 7ns |-----------------------------------------L0.33------------------------------------------|" + - "L0.31[100,100] 6ns |-----------------------------------------L0.31------------------------------------------|" + - "L0.29[100,100] 5ns |-----------------------------------------L0.29------------------------------------------|" + - "L0.27[100,100] 4ns |-----------------------------------------L0.27------------------------------------------|" + - "L0.25[100,100] 3ns |-----------------------------------------L0.25------------------------------------------|" + - "L0.23[100,100] 2ns |-----------------------------------------L0.23------------------------------------------|" + - "L0.21[100,100] 1ns |-----------------------------------------L0.21------------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 300mb total:" - "L1, all files 300mb " - "L1.?[100,100] 20ns |------------------------------------------L1.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 11 files: L1.22, L0.24, L0.26, L0.28, L0.30, L0.32, L0.34, L0.36, L0.38, L0.40, L0.42" + - " Soft Deleting 20 files: L0.21, L0.23, L0.25, L0.27, L0.29, L0.31, L0.33, L0.35, L0.37, L0.39, L0.41, L0.43, L0.45, L0.47, L0.49, L0.51, L0.53, L0.55, L0.57, L0.59" - " Creating 1 files" - - "**** Simulation run 13, type=compact(TotalSizeLessThanMaxCompactSize). 11 Input Files, 300mb total:" - - "L0 " - - "L0.43[101,101] 20ns 15mb |-----------------------------------------L0.43------------------------------------------|" - - "L0.41[101,101] 19ns 15mb |-----------------------------------------L0.41------------------------------------------|" - - "L0.39[101,101] 18ns 15mb |-----------------------------------------L0.39------------------------------------------|" - - "L0.37[101,101] 17ns 15mb |-----------------------------------------L0.37------------------------------------------|" - - "L0.35[101,101] 16ns 15mb |-----------------------------------------L0.35------------------------------------------|" - - "L0.33[101,101] 15ns 15mb |-----------------------------------------L0.33------------------------------------------|" - - "L0.31[101,101] 14ns 15mb |-----------------------------------------L0.31------------------------------------------|" - - "L0.29[101,101] 13ns 15mb |-----------------------------------------L0.29------------------------------------------|" - - "L0.27[101,101] 12ns 15mb |-----------------------------------------L0.27------------------------------------------|" - - "L0.25[101,101] 11ns 15mb |-----------------------------------------L0.25------------------------------------------|" - - "L1 " - - "L1.23[101,101] 10ns 150mb|-----------------------------------------L1.23------------------------------------------|" + - "**** Simulation run 21, type=compact(TotalSizeLessThanMaxCompactSize). 20 Input Files, 300mb total:" + - "L0, all files 15mb " + - "L0.60[101,101] 20ns |-----------------------------------------L0.60------------------------------------------|" + - "L0.58[101,101] 19ns |-----------------------------------------L0.58------------------------------------------|" + - "L0.56[101,101] 18ns |-----------------------------------------L0.56------------------------------------------|" + - "L0.54[101,101] 17ns |-----------------------------------------L0.54------------------------------------------|" + - "L0.52[101,101] 16ns |-----------------------------------------L0.52------------------------------------------|" + - "L0.50[101,101] 15ns |-----------------------------------------L0.50------------------------------------------|" + - "L0.48[101,101] 14ns |-----------------------------------------L0.48------------------------------------------|" + - "L0.46[101,101] 13ns |-----------------------------------------L0.46------------------------------------------|" + - "L0.44[101,101] 12ns |-----------------------------------------L0.44------------------------------------------|" + - "L0.42[101,101] 11ns |-----------------------------------------L0.42------------------------------------------|" + - "L0.40[101,101] 10ns |-----------------------------------------L0.40------------------------------------------|" + - "L0.38[101,101] 9ns |-----------------------------------------L0.38------------------------------------------|" + - "L0.36[101,101] 8ns |-----------------------------------------L0.36------------------------------------------|" + - "L0.34[101,101] 7ns |-----------------------------------------L0.34------------------------------------------|" + - "L0.32[101,101] 6ns |-----------------------------------------L0.32------------------------------------------|" + - "L0.30[101,101] 5ns |-----------------------------------------L0.30------------------------------------------|" + - "L0.28[101,101] 4ns |-----------------------------------------L0.28------------------------------------------|" + - "L0.26[101,101] 3ns |-----------------------------------------L0.26------------------------------------------|" + - "L0.24[101,101] 2ns |-----------------------------------------L0.24------------------------------------------|" + - "L0.22[101,101] 1ns |-----------------------------------------L0.22------------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 300mb total:" - "L1, all files 300mb " - "L1.?[101,101] 20ns |------------------------------------------L1.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 11 files: L1.23, L0.25, L0.27, L0.29, L0.31, L0.33, L0.35, L0.37, L0.39, L0.41, L0.43" + - " Soft Deleting 20 files: L0.22, L0.24, L0.26, L0.28, L0.30, L0.32, L0.34, L0.36, L0.38, L0.40, L0.42, L0.44, L0.46, L0.48, L0.50, L0.52, L0.54, L0.56, L0.58, L0.60" - " Creating 1 files" - "Committing partition 1:" - - " Upgrading 2 files level to CompactionLevel::L2: L1.44, L1.45" - - "**** Final Output Files (1.46gb written)" + - " Upgrading 2 files level to CompactionLevel::L2: L1.61, L1.62" + - "**** Final Output Files (1.17gb written)" - "L2, all files 300mb " - - "L2.44[100,100] 20ns |L2.44| " - - "L2.45[101,101] 20ns |L2.45|" - - "WARNING: file L2.44[100,100] 20ns 300mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.45[101,101] 20ns 300mb exceeds soft limit 100mb by more than 50%" + - "L2.61[100,100] 20ns |L2.61| " + - "L2.62[101,101] 20ns |L2.62|" + - "WARNING: file L2.61[100,100] 20ns 300mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.62[101,101] 20ns 300mb exceeds soft limit 100mb by more than 50%" "### ); } diff --git a/compactor/tests/layouts/stuck.rs b/compactor/tests/layouts/stuck.rs index 42dc8e754a..09a2a624df 100644 --- a/compactor/tests/layouts/stuck.rs +++ b/compactor/tests/layouts/stuck.rs @@ -1112,91 +1112,91 @@ async fn stuck_l0() { - "L2.59[1686863759000000000,1686867839000000000] 1686928811.43s 96mb |--L2.59--| " - "L2.74[1686867899000000000,1686868319000000000] 1686928811.43s 14mb |L2.74| " - "L2.78[1686868379000000000,1686873599000000000] 1686928118.43s 39mb |---L2.78----| " - - "**** Final Output Files (26.51gb written)" + - "**** Final Output Files (27.19gb written)" - "L2 " - - "L2.1091[1686841379000000000,1686841897338459020] 1686936871.55s 100mb|L2.1091| " - - "L2.1092[1686841897338459021,1686842415676918040] 1686936871.55s 100mb |L2.1092| " - - "L2.1093[1686842415676918041,1686842839817089481] 1686936871.55s 82mb |L2.1093| " - - "L2.1094[1686842839817089482,1686843358155548375] 1686936871.55s 100mb |L2.1094| " - - "L2.1095[1686843358155548376,1686843876494007268] 1686936871.55s 100mb |L2.1095| " - - "L2.1096[1686843876494007269,1686843991432432431] 1686936871.55s 22mb |L2.1096| " - - "L2.1097[1686843991432432432,1686844509879093149] 1686936871.55s 100mb |L2.1097| " - - "L2.1098[1686844509879093150,1686845028325753866] 1686936871.55s 100mb |L2.1098| " - - "L2.1099[1686845028325753867,1686845452596633248] 1686936871.55s 82mb |L2.1099| " - - "L2.1100[1686845452596633249,1686845601719326649] 1686936871.55s 28mb |L2.1100| " - - "L2.1101[1686845601719326650,1686845638999999999] 1686936871.55s 7mb |L2.1101| " - - "L2.1131[1686845639000000000,1686846157293611501] 1686936871.55s 100mb |L2.1131| " - - "L2.1132[1686846157293611502,1686846675587223002] 1686936871.55s 100mb |L2.1132| " - - "L2.1133[1686846675587223003,1686847099876735470] 1686936871.55s 82mb |L2.1133| " - - "L2.1134[1686847099876735471,1686847618170347409] 1686936871.55s 100mb |L2.1134| " - - "L2.1135[1686847618170347410,1686848136463959347] 1686936871.55s 100mb |L2.1135| " - - "L2.1136[1686848136463959348,1686848251432432430] 1686936871.55s 22mb |L2.1136| " - - "L2.1137[1686848251432432431,1686848771011201758] 1686936871.55s 100mb |L2.1137| " - - "L2.1138[1686848771011201759,1686849290589971085] 1686936871.55s 100mb |L2.1138| " - - "L2.1139[1686849290589971086,1686849779000000000] 1686936871.55s 94mb |L2.1139| " - - "L2.1140[1686849779000000001,1686850296071388521] 1686936871.55s 100mb |L2.1140| " - - "L2.1141[1686850296071388522,1686850618999999999] 1686936871.55s 62mb |L2.1141| " - - "L2.1171[1686850619000000000,1686851138080106992] 1686936871.55s 100mb |L2.1171| " - - "L2.1172[1686851138080106993,1686851657160213984] 1686936871.55s 100mb |L2.1172| " - - "L2.1173[1686851657160213985,1686852082153160619] 1686936871.55s 82mb |L2.1173| " - - "L2.1174[1686852082153160620,1686852597705498172] 1686936871.55s 100mb |L2.1174| " - - "L2.1175[1686852597705498173,1686853113257835724] 1686936871.55s 100mb |L2.1175| " - - "L2.1176[1686853113257835725,1686853231432432430] 1686936871.55s 23mb |L2.1176| " - - "L2.1177[1686853231432432431,1686853749668309875] 1686936871.55s 100mb |L2.1177| " - - "L2.1178[1686853749668309876,1686854267904187319] 1686936871.55s 100mb |L2.1178| " - - "L2.1179[1686854267904187320,1686854694562036946] 1686936871.55s 82mb |L2.1179| " - - "L2.1180[1686854694562036947,1686854842112407388] 1686936871.55s 27mb |L2.1180| " - - "L2.1181[1686854842112407389,1686854878999999999] 1686936871.55s 7mb |L2.1181| " - - "L2.1211[1686854879000000000,1686855392351651525] 1686936871.55s 100mb |L2.1211| " - - "L2.1212[1686855392351651526,1686855905703303050] 1686936871.55s 100mb |L2.1212| " - - "L2.1213[1686855905703303051,1686856326663196472] 1686936871.55s 82mb |L2.1213| " - - "L2.1214[1686856326663196473,1686856834721369390] 1686936871.55s 100mb |L2.1214| " - - "L2.1215[1686856834721369391,1686857342779542307] 1686936871.55s 100mb |L2.1215| " - - "L2.1216[1686857342779542308,1686857491432432430] 1686936871.55s 29mb |L2.1216| " - - "L2.1217[1686857491432432431,1686858000732587742] 1686936871.55s 100mb |L2.1217| " - - "L2.1218[1686858000732587743,1686858510032743053] 1686936871.55s 100mb |L2.1218| " - - "L2.1219[1686858510032743054,1686859019000000000] 1686936871.55s 100mb |L2.1219| " - - "L2.1220[1686859019000000001,1686859450999999999] 1686936871.55s 86mb |L2.1220| " - - "L2.1221[1686859451000000000,1686859558999999999] 1686936871.55s 21mb |L2.1221| " - - "L2.1251[1686859559000000000,1686860066906201610] 1686936871.55s 100mb |L2.1251| " - - "L2.1252[1686860066906201611,1686860574812403220] 1686936871.55s 100mb |L2.1252| " - - "L2.1253[1686860574812403221,1686861006462323658] 1686936871.55s 85mb |L2.1253| " - - "L2.1254[1686861006462323659,1686861514368526128] 1686936871.55s 100mb |L2.1254| " - - "L2.1255[1686861514368526129,1686862022274728597] 1686936871.55s 100mb |L2.1255| " - - "L2.1256[1686862022274728598,1686862171432432430] 1686936871.55s 29mb |L2.1256| " - - "L2.1257[1686862171432432431,1686862680758220490] 1686936871.55s 100mb |L2.1257| " - - "L2.1258[1686862680758220491,1686863190084008549] 1686936871.55s 100mb |L2.1258| " - - "L2.1259[1686863190084008550,1686863699000000000] 1686936871.55s 100mb |L2.1259| " - - "L2.1260[1686863699000000001,1686863758999999999] 1686936871.55s 10mb |L2.1260| " - - "L2.1286[1686863759000000000,1686864303793314985] 1686936871.55s 100mb |L2.1286| " - - "L2.1287[1686864303793314986,1686864848586629970] 1686936871.55s 100mb |L2.1287| " - - "L2.1288[1686864848586629971,1686865291084361649] 1686936871.55s 81mb |L2.1288| " - - "L2.1289[1686865291084361650,1686865872354961156] 1686936871.55s 100mb |L2.1289| " - - "L2.1290[1686865872354961157,1686866371432432430] 1686936871.55s 86mb |L2.1290| " - - "L2.1291[1686866371432432431,1686866958717584080] 1686936871.55s 100mb |L2.1291| " - - "L2.1292[1686866958717584081,1686867546002735729] 1686936871.55s 100mb |L2.1292| " - - "L2.1293[1686867546002735730,1686867839000000000] 1686936871.55s 50mb |L2.1293| " - - "L2.1294[1686867839000000001,1686868270999999999] 1686936871.55s 77mb |L2.1294| " - - "L2.1295[1686868271000000000,1686868378999999999] 1686936871.55s 19mb |L2.1295| " - - "L2.1330[1686868379000000000,1686868907139596727] 1686936871.55s 100mb |L2.1330| " - - "L2.1331[1686868907139596728,1686869435279193454] 1686936871.55s 100mb |L2.1331| " - - "L2.1332[1686869435279193455,1686869781827739749] 1686936871.55s 66mb |L2.1332| " - - "L2.1333[1686869781827739750,1686870279605194252] 1686936871.55s 100mb |L2.1333| " - - "L2.1334[1686870279605194253,1686870777382648754] 1686936871.55s 100mb |L2.1334| " - - "L2.1335[1686870777382648755,1686870991432432430] 1686936871.55s 43mb |L2.1335|" - - "L2.1336[1686870991432432431,1686871482686369018] 1686936871.55s 100mb |L2.1336|" - - "L2.1337[1686871482686369019,1686871973940305605] 1686936871.55s 100mb |L2.1337|" - - "L2.1338[1686871973940305606,1686872370429245091] 1686936871.55s 81mb |L2.1338|" - - "L2.1339[1686872370429245092,1686872859940700313] 1686936871.55s 100mb |L2.1339|" - - "L2.1340[1686872859940700314,1686873349452155534] 1686936871.55s 100mb |L2.1340|" - - "L2.1341[1686873349452155535,1686873599000000000] 1686936871.55s 51mb |L2.1341|" + - "L2.1107[1686841379000000000,1686841897338459020] 1686936871.55s 100mb|L2.1107| " + - "L2.1108[1686841897338459021,1686842415676918040] 1686936871.55s 100mb |L2.1108| " + - "L2.1109[1686842415676918041,1686842839817089481] 1686936871.55s 82mb |L2.1109| " + - "L2.1110[1686842839817089482,1686843358155548375] 1686936871.55s 100mb |L2.1110| " + - "L2.1111[1686843358155548376,1686843876494007268] 1686936871.55s 100mb |L2.1111| " + - "L2.1112[1686843876494007269,1686843991432432431] 1686936871.55s 22mb |L2.1112| " + - "L2.1113[1686843991432432432,1686844509879093149] 1686936871.55s 100mb |L2.1113| " + - "L2.1114[1686844509879093150,1686845028325753866] 1686936871.55s 100mb |L2.1114| " + - "L2.1115[1686845028325753867,1686845452596633248] 1686936871.55s 82mb |L2.1115| " + - "L2.1116[1686845452596633249,1686845601719326649] 1686936871.55s 28mb |L2.1116| " + - "L2.1117[1686845601719326650,1686845638999999999] 1686936871.55s 7mb |L2.1117| " + - "L2.1147[1686845639000000000,1686846157293611501] 1686936871.55s 100mb |L2.1147| " + - "L2.1148[1686846157293611502,1686846675587223002] 1686936871.55s 100mb |L2.1148| " + - "L2.1149[1686846675587223003,1686847099876735470] 1686936871.55s 82mb |L2.1149| " + - "L2.1150[1686847099876735471,1686847618170347409] 1686936871.55s 100mb |L2.1150| " + - "L2.1151[1686847618170347410,1686848136463959347] 1686936871.55s 100mb |L2.1151| " + - "L2.1152[1686848136463959348,1686848251432432430] 1686936871.55s 22mb |L2.1152| " + - "L2.1153[1686848251432432431,1686848771011201758] 1686936871.55s 100mb |L2.1153| " + - "L2.1154[1686848771011201759,1686849290589971085] 1686936871.55s 100mb |L2.1154| " + - "L2.1155[1686849290589971086,1686849779000000000] 1686936871.55s 94mb |L2.1155| " + - "L2.1156[1686849779000000001,1686850296071388521] 1686936871.55s 100mb |L2.1156| " + - "L2.1157[1686850296071388522,1686850618999999999] 1686936871.55s 62mb |L2.1157| " + - "L2.1187[1686850619000000000,1686851138080106992] 1686936871.55s 100mb |L2.1187| " + - "L2.1188[1686851138080106993,1686851657160213984] 1686936871.55s 100mb |L2.1188| " + - "L2.1189[1686851657160213985,1686852082153160619] 1686936871.55s 82mb |L2.1189| " + - "L2.1190[1686852082153160620,1686852597705498172] 1686936871.55s 100mb |L2.1190| " + - "L2.1191[1686852597705498173,1686853113257835724] 1686936871.55s 100mb |L2.1191| " + - "L2.1192[1686853113257835725,1686853231432432430] 1686936871.55s 23mb |L2.1192| " + - "L2.1193[1686853231432432431,1686853749668309875] 1686936871.55s 100mb |L2.1193| " + - "L2.1194[1686853749668309876,1686854267904187319] 1686936871.55s 100mb |L2.1194| " + - "L2.1195[1686854267904187320,1686854694562036946] 1686936871.55s 82mb |L2.1195| " + - "L2.1196[1686854694562036947,1686854842112407388] 1686936871.55s 27mb |L2.1196| " + - "L2.1197[1686854842112407389,1686854878999999999] 1686936871.55s 7mb |L2.1197| " + - "L2.1227[1686854879000000000,1686855392351651525] 1686936871.55s 100mb |L2.1227| " + - "L2.1228[1686855392351651526,1686855905703303050] 1686936871.55s 100mb |L2.1228| " + - "L2.1229[1686855905703303051,1686856326663196472] 1686936871.55s 82mb |L2.1229| " + - "L2.1230[1686856326663196473,1686856834721369390] 1686936871.55s 100mb |L2.1230| " + - "L2.1231[1686856834721369391,1686857342779542307] 1686936871.55s 100mb |L2.1231| " + - "L2.1232[1686857342779542308,1686857491432432430] 1686936871.55s 29mb |L2.1232| " + - "L2.1233[1686857491432432431,1686858000732587742] 1686936871.55s 100mb |L2.1233| " + - "L2.1234[1686858000732587743,1686858510032743053] 1686936871.55s 100mb |L2.1234| " + - "L2.1235[1686858510032743054,1686859019000000000] 1686936871.55s 100mb |L2.1235| " + - "L2.1236[1686859019000000001,1686859450999999999] 1686936871.55s 86mb |L2.1236| " + - "L2.1237[1686859451000000000,1686859558999999999] 1686936871.55s 21mb |L2.1237| " + - "L2.1267[1686859559000000000,1686860066906201610] 1686936871.55s 100mb |L2.1267| " + - "L2.1268[1686860066906201611,1686860574812403220] 1686936871.55s 100mb |L2.1268| " + - "L2.1269[1686860574812403221,1686861006462323658] 1686936871.55s 85mb |L2.1269| " + - "L2.1270[1686861006462323659,1686861514368526128] 1686936871.55s 100mb |L2.1270| " + - "L2.1271[1686861514368526129,1686862022274728597] 1686936871.55s 100mb |L2.1271| " + - "L2.1272[1686862022274728598,1686862171432432430] 1686936871.55s 29mb |L2.1272| " + - "L2.1273[1686862171432432431,1686862680758220490] 1686936871.55s 100mb |L2.1273| " + - "L2.1274[1686862680758220491,1686863190084008549] 1686936871.55s 100mb |L2.1274| " + - "L2.1275[1686863190084008550,1686863699000000000] 1686936871.55s 100mb |L2.1275| " + - "L2.1276[1686863699000000001,1686863758999999999] 1686936871.55s 10mb |L2.1276| " + - "L2.1302[1686863759000000000,1686864303793314985] 1686936871.55s 100mb |L2.1302| " + - "L2.1303[1686864303793314986,1686864848586629970] 1686936871.55s 100mb |L2.1303| " + - "L2.1304[1686864848586629971,1686865291084361649] 1686936871.55s 81mb |L2.1304| " + - "L2.1305[1686865291084361650,1686865872354961156] 1686936871.55s 100mb |L2.1305| " + - "L2.1306[1686865872354961157,1686866371432432430] 1686936871.55s 86mb |L2.1306| " + - "L2.1307[1686866371432432431,1686866958717584080] 1686936871.55s 100mb |L2.1307| " + - "L2.1308[1686866958717584081,1686867546002735729] 1686936871.55s 100mb |L2.1308| " + - "L2.1309[1686867546002735730,1686867839000000000] 1686936871.55s 50mb |L2.1309| " + - "L2.1310[1686867839000000001,1686868270999999999] 1686936871.55s 77mb |L2.1310| " + - "L2.1311[1686868271000000000,1686868378999999999] 1686936871.55s 19mb |L2.1311| " + - "L2.1346[1686868379000000000,1686868907139596727] 1686936871.55s 100mb |L2.1346| " + - "L2.1347[1686868907139596728,1686869435279193454] 1686936871.55s 100mb |L2.1347| " + - "L2.1348[1686869435279193455,1686869781827739749] 1686936871.55s 66mb |L2.1348| " + - "L2.1349[1686869781827739750,1686870279605194252] 1686936871.55s 100mb |L2.1349| " + - "L2.1350[1686870279605194253,1686870777382648754] 1686936871.55s 100mb |L2.1350| " + - "L2.1351[1686870777382648755,1686870991432432430] 1686936871.55s 43mb |L2.1351|" + - "L2.1352[1686870991432432431,1686871482686369018] 1686936871.55s 100mb |L2.1352|" + - "L2.1353[1686871482686369019,1686871973940305605] 1686936871.55s 100mb |L2.1353|" + - "L2.1354[1686871973940305606,1686872370429245091] 1686936871.55s 81mb |L2.1354|" + - "L2.1355[1686872370429245092,1686872859940700313] 1686936871.55s 100mb |L2.1355|" + - "L2.1356[1686872859940700314,1686873349452155534] 1686936871.55s 100mb |L2.1356|" + - "L2.1357[1686873349452155535,1686873599000000000] 1686936871.55s 51mb |L2.1357|" - "**** Breakdown of where bytes were written" - 10mb written by compact(FoundSubsetLessThanMaxCompactSize) - 4.51gb written by split(ReduceOverlap) - - 4.54gb written by compact(ManySmallFiles) - - 5.14gb written by split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize)) + - 5.22gb written by compact(ManySmallFiles) - 5.4gb written by split(VerticalSplit) - - 6.3gb written by split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize)) + - 5.64gb written by split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize)) + - 5.8gb written by split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize)) - 623mb written by split(StartLevelOverlapsTooBig) "### ); @@ -5484,7 +5484,213 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 20 files: L0.201, L0.213, L0.225, L0.237, L0.249, L0.261, L0.273, L0.285, L0.297, L0.309, L0.321, L0.333, L0.345, L0.357, L0.369, L0.381, L0.393, L0.405, L0.529, L0.541" - " Creating 1 files" - - "**** Simulation run 201, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" + - "**** Simulation run 201, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.417[20,161] 20ns |-----------------------------------------L0.417-----------------------------------------|" + - "L0.431[21,161] 21ns |----------------------------------------L0.431-----------------------------------------| " + - "L0.445[22,161] 22ns |----------------------------------------L0.445----------------------------------------| " + - "L0.459[23,161] 23ns |----------------------------------------L0.459----------------------------------------| " + - "L0.473[24,161] 24ns |---------------------------------------L0.473----------------------------------------| " + - "L0.487[25,161] 25ns |---------------------------------------L0.487---------------------------------------| " + - "L0.501[26,161] 26ns |---------------------------------------L0.501---------------------------------------| " + - "L0.515[27,161] 27ns |--------------------------------------L0.515---------------------------------------| " + - "L0.553[28,161] 28ns |--------------------------------------L0.553--------------------------------------| " + - "L0.567[29,161] 29ns |--------------------------------------L0.567--------------------------------------| " + - "L0.581[30,161] 30ns |-------------------------------------L0.581--------------------------------------| " + - "L0.595[31,161] 31ns |-------------------------------------L0.595-------------------------------------| " + - "L0.609[32,161] 32ns |-------------------------------------L0.609-------------------------------------| " + - "L0.623[33,161] 33ns |------------------------------------L0.623-------------------------------------| " + - "L0.637[34,161] 34ns |------------------------------------L0.637-------------------------------------| " + - "L0.651[35,161] 35ns |------------------------------------L0.651------------------------------------| " + - "L0.665[36,161] 36ns |-----------------------------------L0.665------------------------------------| " + - "L0.679[37,161] 37ns |-----------------------------------L0.679------------------------------------| " + - "L0.693[38,161] 38ns |-----------------------------------L0.693-----------------------------------| " + - "L0.707[39,161] 39ns |----------------------------------L0.707-----------------------------------| " + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[20,161] 39ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.417, L0.431, L0.445, L0.459, L0.473, L0.487, L0.501, L0.515, L0.553, L0.567, L0.581, L0.595, L0.609, L0.623, L0.637, L0.651, L0.665, L0.679, L0.693, L0.707" + - " Creating 1 files" + - "**** Simulation run 202, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.721[40,161] 40ns |----------------------------------------L0.721-----------------------------------------| " + - "L0.735[41,161] 41ns |----------------------------------------L0.735-----------------------------------------| " + - "L0.749[42,161] 42ns |----------------------------------------L0.749----------------------------------------| " + - "L0.763[43,161] 43ns |---------------------------------------L0.763----------------------------------------| " + - "L0.777[44,161] 44ns |---------------------------------------L0.777----------------------------------------| " + - "L0.791[45,161] 45ns |---------------------------------------L0.791---------------------------------------| " + - "L0.805[46,161] 46ns |--------------------------------------L0.805---------------------------------------| " + - "L0.819[47,161] 47ns |--------------------------------------L0.819--------------------------------------| " + - "L0.833[48,161] 48ns |--------------------------------------L0.833--------------------------------------| " + - "L0.847[49,161] 49ns |-------------------------------------L0.847--------------------------------------| " + - "L0.861[50,161] 50ns |-------------------------------------L0.861-------------------------------------| " + - "L0.875[51,161] 51ns |------------------------------------L0.875-------------------------------------| " + - "L0.889[52,161] 52ns |------------------------------------L0.889-------------------------------------| " + - "L0.903[53,161] 53ns |------------------------------------L0.903------------------------------------| " + - "L0.917[54,161] 54ns |-----------------------------------L0.917------------------------------------| " + - "L0.931[55,161] 55ns |-----------------------------------L0.931-----------------------------------| " + - "L0.945[56,161] 56ns |-----------------------------------L0.945-----------------------------------| " + - "L0.959[57,161] 57ns |----------------------------------L0.959-----------------------------------| " + - "L0.973[58,161] 58ns |----------------------------------L0.973----------------------------------| " + - "L0.987[59,161] 59ns |---------------------------------L0.987----------------------------------| " + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[40,161] 59ns |-----------------------------------------L0.?------------------------------------------| " + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.721, L0.735, L0.749, L0.763, L0.777, L0.791, L0.805, L0.819, L0.833, L0.847, L0.861, L0.875, L0.889, L0.903, L0.917, L0.931, L0.945, L0.959, L0.973, L0.987" + - " Creating 1 files" + - "**** Simulation run 203, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.1001[60,161] 60ns |----------------------------------------L0.1001-----------------------------------------|" + - "L0.1015[61,161] 61ns |----------------------------------------L0.1015----------------------------------------| " + - "L0.1029[62,161] 62ns |---------------------------------------L0.1029----------------------------------------| " + - "L0.1043[63,161] 63ns |---------------------------------------L0.1043---------------------------------------| " + - "L0.1057[64,161] 64ns |--------------------------------------L0.1057---------------------------------------| " + - "L0.1071[65,161] 65ns |--------------------------------------L0.1071--------------------------------------| " + - "L0.1085[66,161] 66ns |-------------------------------------L0.1085--------------------------------------| " + - "L0.1099[67,161] 67ns |-------------------------------------L0.1099-------------------------------------| " + - "L0.1113[68,161] 68ns |------------------------------------L0.1113-------------------------------------| " + - "L0.1127[69,161] 69ns |------------------------------------L0.1127------------------------------------| " + - "L0.1141[70,161] 70ns |------------------------------------L0.1141------------------------------------| " + - "L0.1155[71,161] 71ns |-----------------------------------L0.1155------------------------------------| " + - "L0.1169[72,161] 72ns |-----------------------------------L0.1169-----------------------------------| " + - "L0.1183[73,161] 73ns |----------------------------------L0.1183-----------------------------------| " + - "L0.1197[74,161] 74ns |----------------------------------L0.1197----------------------------------| " + - "L0.1211[75,161] 75ns |---------------------------------L0.1211----------------------------------| " + - "L0.1225[76,161] 76ns |---------------------------------L0.1225---------------------------------| " + - "L0.1239[77,161] 77ns |--------------------------------L0.1239---------------------------------| " + - "L0.1253[78,161] 78ns |--------------------------------L0.1253--------------------------------| " + - "L0.1267[79,161] 79ns |--------------------------------L0.1267--------------------------------| " + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[60,161] 79ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1001, L0.1015, L0.1029, L0.1043, L0.1057, L0.1071, L0.1085, L0.1099, L0.1113, L0.1127, L0.1141, L0.1155, L0.1169, L0.1183, L0.1197, L0.1211, L0.1225, L0.1239, L0.1253, L0.1267" + - " Creating 1 files" + - "**** Simulation run 204, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.1281[80,161] 80ns |----------------------------------------L0.1281-----------------------------------------|" + - "L0.1295[81,161] 81ns |---------------------------------------L0.1295----------------------------------------| " + - "L0.1309[82,161] 82ns |---------------------------------------L0.1309---------------------------------------| " + - "L0.1323[83,161] 83ns |--------------------------------------L0.1323---------------------------------------| " + - "L0.1337[84,161] 84ns |--------------------------------------L0.1337--------------------------------------| " + - "L0.1351[85,161] 85ns |-------------------------------------L0.1351--------------------------------------| " + - "L0.1365[86,161] 86ns |-------------------------------------L0.1365-------------------------------------| " + - "L0.1379[87,161] 87ns |------------------------------------L0.1379-------------------------------------| " + - "L0.1393[88,161] 88ns |------------------------------------L0.1393------------------------------------| " + - "L0.1407[89,161] 89ns |-----------------------------------L0.1407------------------------------------|" + - "L0.1421[90,161] 90ns |----------------------------------L0.1421-----------------------------------| " + - "L0.1435[91,161] 91ns |----------------------------------L0.1435----------------------------------| " + - "L0.1449[92,161] 92ns |---------------------------------L0.1449----------------------------------| " + - "L0.1463[93,161] 93ns |---------------------------------L0.1463---------------------------------| " + - "L0.1477[94,161] 94ns |--------------------------------L0.1477---------------------------------| " + - "L0.1491[95,161] 95ns |--------------------------------L0.1491--------------------------------| " + - "L0.1505[96,161] 96ns |-------------------------------L0.1505--------------------------------| " + - "L0.1519[97,161] 97ns |-------------------------------L0.1519-------------------------------| " + - "L0.1533[98,161] 98ns |------------------------------L0.1533-------------------------------|" + - "L0.1547[99,161] 99ns |-----------------------------L0.1547------------------------------| " + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[80,161] 99ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1281, L0.1295, L0.1309, L0.1323, L0.1337, L0.1351, L0.1365, L0.1379, L0.1393, L0.1407, L0.1421, L0.1435, L0.1449, L0.1463, L0.1477, L0.1491, L0.1505, L0.1519, L0.1533, L0.1547" + - " Creating 1 files" + - "**** Simulation run 205, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.1561[100,161] 100ns |----------------------------------------L0.1561-----------------------------------------|" + - "L0.1575[101,161] 101ns |---------------------------------------L0.1575----------------------------------------| " + - "L0.1589[102,161] 102ns |---------------------------------------L0.1589---------------------------------------| " + - "L0.1603[103,161] 103ns |--------------------------------------L0.1603--------------------------------------| " + - "L0.1617[104,161] 104ns |-------------------------------------L0.1617--------------------------------------| " + - "L0.1631[105,161] 105ns |------------------------------------L0.1631-------------------------------------| " + - "L0.1645[106,161] 106ns |------------------------------------L0.1645------------------------------------| " + - "L0.1659[107,161] 107ns |-----------------------------------L0.1659-----------------------------------| " + - "L0.1673[108,161] 108ns |----------------------------------L0.1673-----------------------------------| " + - "L0.1687[109,161] 109ns |---------------------------------L0.1687----------------------------------| " + - "L0.1701[110,161] 110ns |---------------------------------L0.1701---------------------------------| " + - "L0.1715[111,161] 111ns |--------------------------------L0.1715--------------------------------| " + - "L0.1729[112,161] 112ns |-------------------------------L0.1729--------------------------------| " + - "L0.1743[113,161] 113ns |------------------------------L0.1743-------------------------------| " + - "L0.1757[114,161] 114ns |------------------------------L0.1757------------------------------| " + - "L0.1771[115,161] 115ns |-----------------------------L0.1771-----------------------------| " + - "L0.1785[116,161] 116ns |----------------------------L0.1785-----------------------------| " + - "L0.1799[117,161] 117ns |---------------------------L0.1799----------------------------| " + - "L0.1813[118,161] 118ns |---------------------------L0.1813---------------------------| " + - "L0.1827[119,161] 119ns |--------------------------L0.1827--------------------------| " + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[100,161] 119ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1561, L0.1575, L0.1589, L0.1603, L0.1617, L0.1631, L0.1645, L0.1659, L0.1673, L0.1687, L0.1701, L0.1715, L0.1729, L0.1743, L0.1757, L0.1771, L0.1785, L0.1799, L0.1813, L0.1827" + - " Creating 1 files" + - "**** Simulation run 206, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.1841[120,161] 120ns |----------------------------------------L0.1841-----------------------------------------|" + - "L0.1855[121,161] 121ns |---------------------------------------L0.1855---------------------------------------| " + - "L0.1869[122,161] 122ns |--------------------------------------L0.1869--------------------------------------| " + - "L0.1883[123,161] 123ns |-------------------------------------L0.1883-------------------------------------| " + - "L0.1897[124,161] 124ns |------------------------------------L0.1897------------------------------------| " + - "L0.1911[125,161] 125ns |-----------------------------------L0.1911-----------------------------------| " + - "L0.1925[126,161] 126ns |---------------------------------L0.1925----------------------------------| " + - "L0.1939[127,161] 127ns |--------------------------------L0.1939---------------------------------| " + - "L0.1953[128,161] 128ns |-------------------------------L0.1953--------------------------------| " + - "L0.1967[129,161] 129ns |------------------------------L0.1967-------------------------------| " + - "L0.1981[130,161] 130ns |-----------------------------L0.1981------------------------------| " + - "L0.1995[131,161] 131ns |----------------------------L0.1995----------------------------| " + - "L0.2009[132,161] 132ns |---------------------------L0.2009---------------------------| " + - "L0.2023[133,161] 133ns |--------------------------L0.2023--------------------------| " + - "L0.2149[134,161] 134ns |-------------------------L0.2149-------------------------| " + - "L0.2163[135,161] 135ns |------------------------L0.2163------------------------| " + - "L0.2037[136,161] 136ns |----------------------L0.2037-----------------------| " + - "L0.2051[137,161] 137ns |---------------------L0.2051----------------------| " + - "L0.2065[138,161] 138ns |--------------------L0.2065---------------------| " + - "L0.2079[139,161] 139ns |-------------------L0.2079--------------------| " + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[120,161] 139ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1841, L0.1855, L0.1869, L0.1883, L0.1897, L0.1911, L0.1925, L0.1939, L0.1953, L0.1967, L0.1981, L0.1995, L0.2009, L0.2023, L0.2037, L0.2051, L0.2065, L0.2079, L0.2149, L0.2163" + - " Creating 1 files" + - "**** Simulation run 207, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.2093[140,161] 140ns |----------------------------------------L0.2093-----------------------------------------|" + - "L0.2107[141,161] 141ns |--------------------------------------L0.2107--------------------------------------| " + - "L0.2121[142,161] 142ns |------------------------------------L0.2121------------------------------------| " + - "L0.2135[143,161] 143ns |----------------------------------L0.2135----------------------------------| " + - "L0.2177[144,161] 144ns |-------------------------------L0.2177--------------------------------| " + - "L0.2191[145,161] 145ns |-----------------------------L0.2191------------------------------| " + - "L0.2205[146,161] 146ns |---------------------------L0.2205----------------------------| " + - "L0.2219[147,161] 147ns |-------------------------L0.2219--------------------------|" + - "L0.2233[148,161] 148ns |-----------------------L0.2233-----------------------| " + - "L0.2247[149,161] 149ns |---------------------L0.2247---------------------| " + - "L0.2261[150,161] 150ns |-------------------L0.2261-------------------| " + - "L0.2275[151,161] 151ns |----------------L0.2275-----------------| " + - "L0.2289[152,161] 152ns |--------------L0.2289---------------| " + - "L0.2303[153,161] 153ns |------------L0.2303-------------| " + - "L0.2317[154,161] 154ns |----------L0.2317-----------|" + - "L0.2331[155,161] 155ns |--------L0.2331--------| " + - "L0.2345[156,161] 156ns |------L0.2345------| " + - "L0.2359[157,161] 157ns |----L0.2359----| " + - "L0.2373[158,161] 158ns |-L0.2373--| " + - "L0.2387[159,161] 159ns |L0.2387|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[140,161] 159ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.2093, L0.2107, L0.2121, L0.2135, L0.2177, L0.2191, L0.2205, L0.2219, L0.2233, L0.2247, L0.2261, L0.2275, L0.2289, L0.2303, L0.2317, L0.2331, L0.2345, L0.2359, L0.2373, L0.2387" + - " Creating 1 files" + - "**** Simulation run 208, type=compact(ManySmallFiles). 2 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.2401[160,161] 160ns |----------------------------------------L0.2401-----------------------------------------|" + - "L0.2415[161,161] 161ns |L0.2415|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[160,161] 161ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 2 files: L0.2401, L0.2415" + - " Creating 1 files" + - "**** Simulation run 209, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" - "L0, all files 8mb " - "L0.202[162,321] 0ns |-----------------------------------------L0.202-----------------------------------------|" - "L0.214[162,321] 1ns |-----------------------------------------L0.214-----------------------------------------|" @@ -5512,399 +5718,287 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 20 files: L0.202, L0.214, L0.226, L0.238, L0.250, L0.262, L0.274, L0.286, L0.298, L0.310, L0.322, L0.334, L0.346, L0.358, L0.370, L0.382, L0.394, L0.406, L0.530, L0.542" - " Creating 1 files" - - "**** Simulation run 202, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" - - "L0, all files 8mb " - - "L0.203[322,481] 0ns |-----------------------------------------L0.203-----------------------------------------|" - - "L0.215[322,481] 1ns |-----------------------------------------L0.215-----------------------------------------|" - - "L0.227[322,481] 2ns |-----------------------------------------L0.227-----------------------------------------|" - - "L0.239[322,481] 3ns |-----------------------------------------L0.239-----------------------------------------|" - - "L0.251[322,481] 4ns |-----------------------------------------L0.251-----------------------------------------|" - - "L0.263[322,481] 5ns |-----------------------------------------L0.263-----------------------------------------|" - - "L0.275[322,481] 6ns |-----------------------------------------L0.275-----------------------------------------|" - - "L0.287[322,481] 7ns |-----------------------------------------L0.287-----------------------------------------|" - - "L0.299[322,481] 8ns |-----------------------------------------L0.299-----------------------------------------|" - - "L0.311[322,481] 9ns |-----------------------------------------L0.311-----------------------------------------|" - - "L0.323[322,481] 10ns |-----------------------------------------L0.323-----------------------------------------|" - - "L0.335[322,481] 11ns |-----------------------------------------L0.335-----------------------------------------|" - - "L0.347[322,481] 12ns |-----------------------------------------L0.347-----------------------------------------|" - - "L0.359[322,481] 13ns |-----------------------------------------L0.359-----------------------------------------|" - - "L0.371[322,481] 14ns |-----------------------------------------L0.371-----------------------------------------|" - - "L0.383[322,481] 15ns |-----------------------------------------L0.383-----------------------------------------|" - - "L0.395[322,481] 16ns |-----------------------------------------L0.395-----------------------------------------|" - - "L0.407[322,481] 17ns |-----------------------------------------L0.407-----------------------------------------|" - - "L0.531[322,481] 18ns |-----------------------------------------L0.531-----------------------------------------|" - - "L0.543[322,481] 19ns |-----------------------------------------L0.543-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" - - "L0, all files 160mb " - - "L0.?[322,481] 19ns |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 210, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.418[162,321] 20ns |-----------------------------------------L0.418-----------------------------------------|" + - "L0.432[162,321] 21ns |-----------------------------------------L0.432-----------------------------------------|" + - "L0.446[162,321] 22ns |-----------------------------------------L0.446-----------------------------------------|" + - "L0.460[162,321] 23ns |-----------------------------------------L0.460-----------------------------------------|" + - "L0.474[162,321] 24ns |-----------------------------------------L0.474-----------------------------------------|" + - "L0.488[162,321] 25ns |-----------------------------------------L0.488-----------------------------------------|" + - "L0.502[162,321] 26ns |-----------------------------------------L0.502-----------------------------------------|" + - "L0.516[162,321] 27ns |-----------------------------------------L0.516-----------------------------------------|" + - "L0.554[162,321] 28ns |-----------------------------------------L0.554-----------------------------------------|" + - "L0.568[162,321] 29ns |-----------------------------------------L0.568-----------------------------------------|" + - "L0.582[162,321] 30ns |-----------------------------------------L0.582-----------------------------------------|" + - "L0.596[162,321] 31ns |-----------------------------------------L0.596-----------------------------------------|" + - "L0.610[162,321] 32ns |-----------------------------------------L0.610-----------------------------------------|" + - "L0.624[162,321] 33ns |-----------------------------------------L0.624-----------------------------------------|" + - "L0.638[162,321] 34ns |-----------------------------------------L0.638-----------------------------------------|" + - "L0.652[162,321] 35ns |-----------------------------------------L0.652-----------------------------------------|" + - "L0.666[162,321] 36ns |-----------------------------------------L0.666-----------------------------------------|" + - "L0.680[162,321] 37ns |-----------------------------------------L0.680-----------------------------------------|" + - "L0.694[162,321] 38ns |-----------------------------------------L0.694-----------------------------------------|" + - "L0.708[162,321] 39ns |-----------------------------------------L0.708-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[162,321] 39ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.203, L0.215, L0.227, L0.239, L0.251, L0.263, L0.275, L0.287, L0.299, L0.311, L0.323, L0.335, L0.347, L0.359, L0.371, L0.383, L0.395, L0.407, L0.531, L0.543" + - " Soft Deleting 20 files: L0.418, L0.432, L0.446, L0.460, L0.474, L0.488, L0.502, L0.516, L0.554, L0.568, L0.582, L0.596, L0.610, L0.624, L0.638, L0.652, L0.666, L0.680, L0.694, L0.708" - " Creating 1 files" - - "**** Simulation run 203, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" - - "L0, all files 8mb " - - "L0.204[482,641] 0ns |-----------------------------------------L0.204-----------------------------------------|" - - "L0.216[482,641] 1ns |-----------------------------------------L0.216-----------------------------------------|" - - "L0.228[482,641] 2ns |-----------------------------------------L0.228-----------------------------------------|" - - "L0.240[482,641] 3ns |-----------------------------------------L0.240-----------------------------------------|" - - "L0.252[482,641] 4ns |-----------------------------------------L0.252-----------------------------------------|" - - "L0.264[482,641] 5ns |-----------------------------------------L0.264-----------------------------------------|" - - "L0.276[482,641] 6ns |-----------------------------------------L0.276-----------------------------------------|" - - "L0.288[482,641] 7ns |-----------------------------------------L0.288-----------------------------------------|" - - "L0.300[482,641] 8ns |-----------------------------------------L0.300-----------------------------------------|" - - "L0.312[482,641] 9ns |-----------------------------------------L0.312-----------------------------------------|" - - "L0.324[482,641] 10ns |-----------------------------------------L0.324-----------------------------------------|" - - "L0.336[482,641] 11ns |-----------------------------------------L0.336-----------------------------------------|" - - "L0.348[482,641] 12ns |-----------------------------------------L0.348-----------------------------------------|" - - "L0.360[482,641] 13ns |-----------------------------------------L0.360-----------------------------------------|" - - "L0.372[482,641] 14ns |-----------------------------------------L0.372-----------------------------------------|" - - "L0.384[482,641] 15ns |-----------------------------------------L0.384-----------------------------------------|" - - "L0.396[482,641] 16ns |-----------------------------------------L0.396-----------------------------------------|" - - "L0.408[482,641] 17ns |-----------------------------------------L0.408-----------------------------------------|" - - "L0.532[482,641] 18ns |-----------------------------------------L0.532-----------------------------------------|" - - "L0.544[482,641] 19ns |-----------------------------------------L0.544-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" - - "L0, all files 160mb " - - "L0.?[482,641] 19ns |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 211, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.1002[162,321] 60ns |----------------------------------------L0.1002-----------------------------------------|" + - "L0.1016[162,321] 61ns |----------------------------------------L0.1016-----------------------------------------|" + - "L0.1030[162,321] 62ns |----------------------------------------L0.1030-----------------------------------------|" + - "L0.1044[162,321] 63ns |----------------------------------------L0.1044-----------------------------------------|" + - "L0.1058[162,321] 64ns |----------------------------------------L0.1058-----------------------------------------|" + - "L0.1072[162,321] 65ns |----------------------------------------L0.1072-----------------------------------------|" + - "L0.1086[162,321] 66ns |----------------------------------------L0.1086-----------------------------------------|" + - "L0.1100[162,321] 67ns |----------------------------------------L0.1100-----------------------------------------|" + - "L0.1114[162,321] 68ns |----------------------------------------L0.1114-----------------------------------------|" + - "L0.1128[162,321] 69ns |----------------------------------------L0.1128-----------------------------------------|" + - "L0.1142[162,321] 70ns |----------------------------------------L0.1142-----------------------------------------|" + - "L0.1156[162,321] 71ns |----------------------------------------L0.1156-----------------------------------------|" + - "L0.1170[162,321] 72ns |----------------------------------------L0.1170-----------------------------------------|" + - "L0.1184[162,321] 73ns |----------------------------------------L0.1184-----------------------------------------|" + - "L0.1198[162,321] 74ns |----------------------------------------L0.1198-----------------------------------------|" + - "L0.1212[162,321] 75ns |----------------------------------------L0.1212-----------------------------------------|" + - "L0.1226[162,321] 76ns |----------------------------------------L0.1226-----------------------------------------|" + - "L0.1240[162,321] 77ns |----------------------------------------L0.1240-----------------------------------------|" + - "L0.1254[162,321] 78ns |----------------------------------------L0.1254-----------------------------------------|" + - "L0.1268[162,321] 79ns |----------------------------------------L0.1268-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[162,321] 79ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.204, L0.216, L0.228, L0.240, L0.252, L0.264, L0.276, L0.288, L0.300, L0.312, L0.324, L0.336, L0.348, L0.360, L0.372, L0.384, L0.396, L0.408, L0.532, L0.544" + - " Soft Deleting 20 files: L0.1002, L0.1016, L0.1030, L0.1044, L0.1058, L0.1072, L0.1086, L0.1100, L0.1114, L0.1128, L0.1142, L0.1156, L0.1170, L0.1184, L0.1198, L0.1212, L0.1226, L0.1240, L0.1254, L0.1268" - " Creating 1 files" - - "**** Simulation run 204, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" - - "L0, all files 8mb " - - "L0.205[642,801] 0ns |-----------------------------------------L0.205-----------------------------------------|" - - "L0.217[642,801] 1ns |-----------------------------------------L0.217-----------------------------------------|" - - "L0.229[642,801] 2ns |-----------------------------------------L0.229-----------------------------------------|" - - "L0.241[642,801] 3ns |-----------------------------------------L0.241-----------------------------------------|" - - "L0.253[642,801] 4ns |-----------------------------------------L0.253-----------------------------------------|" - - "L0.265[642,801] 5ns |-----------------------------------------L0.265-----------------------------------------|" - - "L0.277[642,801] 6ns |-----------------------------------------L0.277-----------------------------------------|" - - "L0.289[642,801] 7ns |-----------------------------------------L0.289-----------------------------------------|" - - "L0.301[642,801] 8ns |-----------------------------------------L0.301-----------------------------------------|" - - "L0.313[642,801] 9ns |-----------------------------------------L0.313-----------------------------------------|" - - "L0.325[642,801] 10ns |-----------------------------------------L0.325-----------------------------------------|" - - "L0.337[642,801] 11ns |-----------------------------------------L0.337-----------------------------------------|" - - "L0.349[642,801] 12ns |-----------------------------------------L0.349-----------------------------------------|" - - "L0.361[642,801] 13ns |-----------------------------------------L0.361-----------------------------------------|" - - "L0.373[642,801] 14ns |-----------------------------------------L0.373-----------------------------------------|" - - "L0.385[642,801] 15ns |-----------------------------------------L0.385-----------------------------------------|" - - "L0.397[642,801] 16ns |-----------------------------------------L0.397-----------------------------------------|" - - "L0.409[642,801] 17ns |-----------------------------------------L0.409-----------------------------------------|" - - "L0.533[642,801] 18ns |-----------------------------------------L0.533-----------------------------------------|" - - "L0.545[642,801] 19ns |-----------------------------------------L0.545-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" - - "L0, all files 160mb " - - "L0.?[642,801] 19ns |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 212, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.1282[162,321] 80ns |----------------------------------------L0.1282-----------------------------------------|" + - "L0.1296[162,321] 81ns |----------------------------------------L0.1296-----------------------------------------|" + - "L0.1310[162,321] 82ns |----------------------------------------L0.1310-----------------------------------------|" + - "L0.1324[162,321] 83ns |----------------------------------------L0.1324-----------------------------------------|" + - "L0.1338[162,321] 84ns |----------------------------------------L0.1338-----------------------------------------|" + - "L0.1352[162,321] 85ns |----------------------------------------L0.1352-----------------------------------------|" + - "L0.1366[162,321] 86ns |----------------------------------------L0.1366-----------------------------------------|" + - "L0.1380[162,321] 87ns |----------------------------------------L0.1380-----------------------------------------|" + - "L0.1394[162,321] 88ns |----------------------------------------L0.1394-----------------------------------------|" + - "L0.1408[162,321] 89ns |----------------------------------------L0.1408-----------------------------------------|" + - "L0.1422[162,321] 90ns |----------------------------------------L0.1422-----------------------------------------|" + - "L0.1436[162,321] 91ns |----------------------------------------L0.1436-----------------------------------------|" + - "L0.1450[162,321] 92ns |----------------------------------------L0.1450-----------------------------------------|" + - "L0.1464[162,321] 93ns |----------------------------------------L0.1464-----------------------------------------|" + - "L0.1478[162,321] 94ns |----------------------------------------L0.1478-----------------------------------------|" + - "L0.1492[162,321] 95ns |----------------------------------------L0.1492-----------------------------------------|" + - "L0.1506[162,321] 96ns |----------------------------------------L0.1506-----------------------------------------|" + - "L0.1520[162,321] 97ns |----------------------------------------L0.1520-----------------------------------------|" + - "L0.1534[162,321] 98ns |----------------------------------------L0.1534-----------------------------------------|" + - "L0.1548[162,321] 99ns |----------------------------------------L0.1548-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[162,321] 99ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.205, L0.217, L0.229, L0.241, L0.253, L0.265, L0.277, L0.289, L0.301, L0.313, L0.325, L0.337, L0.349, L0.361, L0.373, L0.385, L0.397, L0.409, L0.533, L0.545" + - " Soft Deleting 20 files: L0.1282, L0.1296, L0.1310, L0.1324, L0.1338, L0.1352, L0.1366, L0.1380, L0.1394, L0.1408, L0.1422, L0.1436, L0.1450, L0.1464, L0.1478, L0.1492, L0.1506, L0.1520, L0.1534, L0.1548" - " Creating 1 files" - - "**** Simulation run 205, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" - - "L0, all files 8mb " - - "L0.206[802,961] 0ns |-----------------------------------------L0.206-----------------------------------------|" - - "L0.218[802,961] 1ns |-----------------------------------------L0.218-----------------------------------------|" - - "L0.230[802,961] 2ns |-----------------------------------------L0.230-----------------------------------------|" - - "L0.242[802,961] 3ns |-----------------------------------------L0.242-----------------------------------------|" - - "L0.254[802,961] 4ns |-----------------------------------------L0.254-----------------------------------------|" - - "L0.266[802,961] 5ns |-----------------------------------------L0.266-----------------------------------------|" - - "L0.278[802,961] 6ns |-----------------------------------------L0.278-----------------------------------------|" - - "L0.290[802,961] 7ns |-----------------------------------------L0.290-----------------------------------------|" - - "L0.302[802,961] 8ns |-----------------------------------------L0.302-----------------------------------------|" - - "L0.314[802,961] 9ns |-----------------------------------------L0.314-----------------------------------------|" - - "L0.326[802,961] 10ns |-----------------------------------------L0.326-----------------------------------------|" - - "L0.338[802,961] 11ns |-----------------------------------------L0.338-----------------------------------------|" - - "L0.350[802,961] 12ns |-----------------------------------------L0.350-----------------------------------------|" - - "L0.362[802,961] 13ns |-----------------------------------------L0.362-----------------------------------------|" - - "L0.374[802,961] 14ns |-----------------------------------------L0.374-----------------------------------------|" - - "L0.386[802,961] 15ns |-----------------------------------------L0.386-----------------------------------------|" - - "L0.398[802,961] 16ns |-----------------------------------------L0.398-----------------------------------------|" - - "L0.410[802,961] 17ns |-----------------------------------------L0.410-----------------------------------------|" - - "L0.534[802,961] 18ns |-----------------------------------------L0.534-----------------------------------------|" - - "L0.546[802,961] 19ns |-----------------------------------------L0.546-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" - - "L0, all files 160mb " - - "L0.?[802,961] 19ns |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 213, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.1562[162,321] 100ns |----------------------------------------L0.1562-----------------------------------------|" + - "L0.1576[162,321] 101ns |----------------------------------------L0.1576-----------------------------------------|" + - "L0.1590[162,321] 102ns |----------------------------------------L0.1590-----------------------------------------|" + - "L0.1604[162,321] 103ns |----------------------------------------L0.1604-----------------------------------------|" + - "L0.1618[162,321] 104ns |----------------------------------------L0.1618-----------------------------------------|" + - "L0.1632[162,321] 105ns |----------------------------------------L0.1632-----------------------------------------|" + - "L0.1646[162,321] 106ns |----------------------------------------L0.1646-----------------------------------------|" + - "L0.1660[162,321] 107ns |----------------------------------------L0.1660-----------------------------------------|" + - "L0.1674[162,321] 108ns |----------------------------------------L0.1674-----------------------------------------|" + - "L0.1688[162,321] 109ns |----------------------------------------L0.1688-----------------------------------------|" + - "L0.1702[162,321] 110ns |----------------------------------------L0.1702-----------------------------------------|" + - "L0.1716[162,321] 111ns |----------------------------------------L0.1716-----------------------------------------|" + - "L0.1730[162,321] 112ns |----------------------------------------L0.1730-----------------------------------------|" + - "L0.1744[162,321] 113ns |----------------------------------------L0.1744-----------------------------------------|" + - "L0.1758[162,321] 114ns |----------------------------------------L0.1758-----------------------------------------|" + - "L0.1772[162,321] 115ns |----------------------------------------L0.1772-----------------------------------------|" + - "L0.1786[162,321] 116ns |----------------------------------------L0.1786-----------------------------------------|" + - "L0.1800[162,321] 117ns |----------------------------------------L0.1800-----------------------------------------|" + - "L0.1814[162,321] 118ns |----------------------------------------L0.1814-----------------------------------------|" + - "L0.1828[162,321] 119ns |----------------------------------------L0.1828-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[162,321] 119ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.206, L0.218, L0.230, L0.242, L0.254, L0.266, L0.278, L0.290, L0.302, L0.314, L0.326, L0.338, L0.350, L0.362, L0.374, L0.386, L0.398, L0.410, L0.534, L0.546" + - " Soft Deleting 20 files: L0.1562, L0.1576, L0.1590, L0.1604, L0.1618, L0.1632, L0.1646, L0.1660, L0.1674, L0.1688, L0.1702, L0.1716, L0.1730, L0.1744, L0.1758, L0.1772, L0.1786, L0.1800, L0.1814, L0.1828" - " Creating 1 files" - - "**** Simulation run 206, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" - - "L0, all files 8mb " - - "L0.207[962,1121] 0ns |-----------------------------------------L0.207-----------------------------------------|" - - "L0.219[962,1121] 1ns |-----------------------------------------L0.219-----------------------------------------|" - - "L0.231[962,1121] 2ns |-----------------------------------------L0.231-----------------------------------------|" - - "L0.243[962,1121] 3ns |-----------------------------------------L0.243-----------------------------------------|" - - "L0.255[962,1121] 4ns |-----------------------------------------L0.255-----------------------------------------|" - - "L0.267[962,1121] 5ns |-----------------------------------------L0.267-----------------------------------------|" - - "L0.279[962,1121] 6ns |-----------------------------------------L0.279-----------------------------------------|" - - "L0.291[962,1121] 7ns |-----------------------------------------L0.291-----------------------------------------|" - - "L0.303[962,1121] 8ns |-----------------------------------------L0.303-----------------------------------------|" - - "L0.315[962,1121] 9ns |-----------------------------------------L0.315-----------------------------------------|" - - "L0.327[962,1121] 10ns |-----------------------------------------L0.327-----------------------------------------|" - - "L0.339[962,1121] 11ns |-----------------------------------------L0.339-----------------------------------------|" - - "L0.351[962,1121] 12ns |-----------------------------------------L0.351-----------------------------------------|" - - "L0.363[962,1121] 13ns |-----------------------------------------L0.363-----------------------------------------|" - - "L0.375[962,1121] 14ns |-----------------------------------------L0.375-----------------------------------------|" - - "L0.387[962,1121] 15ns |-----------------------------------------L0.387-----------------------------------------|" - - "L0.399[962,1121] 16ns |-----------------------------------------L0.399-----------------------------------------|" - - "L0.411[962,1121] 17ns |-----------------------------------------L0.411-----------------------------------------|" - - "L0.535[962,1121] 18ns |-----------------------------------------L0.535-----------------------------------------|" - - "L0.547[962,1121] 19ns |-----------------------------------------L0.547-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" - - "L0, all files 160mb " - - "L0.?[962,1121] 19ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.207, L0.219, L0.231, L0.243, L0.255, L0.267, L0.279, L0.291, L0.303, L0.315, L0.327, L0.339, L0.351, L0.363, L0.375, L0.387, L0.399, L0.411, L0.535, L0.547" - - " Creating 1 files" - - "**** Simulation run 207, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" - - "L0, all files 8mb " - - "L0.208[1122,1281] 0ns |-----------------------------------------L0.208-----------------------------------------|" - - "L0.220[1122,1281] 1ns |-----------------------------------------L0.220-----------------------------------------|" - - "L0.232[1122,1281] 2ns |-----------------------------------------L0.232-----------------------------------------|" - - "L0.244[1122,1281] 3ns |-----------------------------------------L0.244-----------------------------------------|" - - "L0.256[1122,1281] 4ns |-----------------------------------------L0.256-----------------------------------------|" - - "L0.268[1122,1281] 5ns |-----------------------------------------L0.268-----------------------------------------|" - - "L0.280[1122,1281] 6ns |-----------------------------------------L0.280-----------------------------------------|" - - "L0.292[1122,1281] 7ns |-----------------------------------------L0.292-----------------------------------------|" - - "L0.304[1122,1281] 8ns |-----------------------------------------L0.304-----------------------------------------|" - - "L0.316[1122,1281] 9ns |-----------------------------------------L0.316-----------------------------------------|" - - "L0.328[1122,1281] 10ns |-----------------------------------------L0.328-----------------------------------------|" - - "L0.340[1122,1281] 11ns |-----------------------------------------L0.340-----------------------------------------|" - - "L0.352[1122,1281] 12ns |-----------------------------------------L0.352-----------------------------------------|" - - "L0.364[1122,1281] 13ns |-----------------------------------------L0.364-----------------------------------------|" - - "L0.376[1122,1281] 14ns |-----------------------------------------L0.376-----------------------------------------|" - - "L0.388[1122,1281] 15ns |-----------------------------------------L0.388-----------------------------------------|" - - "L0.400[1122,1281] 16ns |-----------------------------------------L0.400-----------------------------------------|" - - "L0.412[1122,1281] 17ns |-----------------------------------------L0.412-----------------------------------------|" - - "L0.536[1122,1281] 18ns |-----------------------------------------L0.536-----------------------------------------|" - - "L0.548[1122,1281] 19ns |-----------------------------------------L0.548-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" - - "L0, all files 160mb " - - "L0.?[1122,1281] 19ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.208, L0.220, L0.232, L0.244, L0.256, L0.268, L0.280, L0.292, L0.304, L0.316, L0.328, L0.340, L0.352, L0.364, L0.376, L0.388, L0.400, L0.412, L0.536, L0.548" - - " Creating 1 files" - - "**** Simulation run 208, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" - - "L0, all files 8mb " - - "L0.209[1282,1441] 0ns |-----------------------------------------L0.209-----------------------------------------|" - - "L0.221[1282,1441] 1ns |-----------------------------------------L0.221-----------------------------------------|" - - "L0.233[1282,1441] 2ns |-----------------------------------------L0.233-----------------------------------------|" - - "L0.245[1282,1441] 3ns |-----------------------------------------L0.245-----------------------------------------|" - - "L0.257[1282,1441] 4ns |-----------------------------------------L0.257-----------------------------------------|" - - "L0.269[1282,1441] 5ns |-----------------------------------------L0.269-----------------------------------------|" - - "L0.281[1282,1441] 6ns |-----------------------------------------L0.281-----------------------------------------|" - - "L0.293[1282,1441] 7ns |-----------------------------------------L0.293-----------------------------------------|" - - "L0.305[1282,1441] 8ns |-----------------------------------------L0.305-----------------------------------------|" - - "L0.317[1282,1441] 9ns |-----------------------------------------L0.317-----------------------------------------|" - - "L0.329[1282,1441] 10ns |-----------------------------------------L0.329-----------------------------------------|" - - "L0.341[1282,1441] 11ns |-----------------------------------------L0.341-----------------------------------------|" - - "L0.353[1282,1441] 12ns |-----------------------------------------L0.353-----------------------------------------|" - - "L0.365[1282,1441] 13ns |-----------------------------------------L0.365-----------------------------------------|" - - "L0.377[1282,1441] 14ns |-----------------------------------------L0.377-----------------------------------------|" - - "L0.389[1282,1441] 15ns |-----------------------------------------L0.389-----------------------------------------|" - - "L0.401[1282,1441] 16ns |-----------------------------------------L0.401-----------------------------------------|" - - "L0.413[1282,1441] 17ns |-----------------------------------------L0.413-----------------------------------------|" - - "L0.537[1282,1441] 18ns |-----------------------------------------L0.537-----------------------------------------|" - - "L0.549[1282,1441] 19ns |-----------------------------------------L0.549-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" - - "L0, all files 160mb " - - "L0.?[1282,1441] 19ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.209, L0.221, L0.233, L0.245, L0.257, L0.269, L0.281, L0.293, L0.305, L0.317, L0.329, L0.341, L0.353, L0.365, L0.377, L0.389, L0.401, L0.413, L0.537, L0.549" - - " Creating 1 files" - - "**** Simulation run 209, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" - - "L0, all files 8mb " - - "L0.210[1442,1601] 0ns |-----------------------------------------L0.210-----------------------------------------|" - - "L0.222[1442,1601] 1ns |-----------------------------------------L0.222-----------------------------------------|" - - "L0.234[1442,1601] 2ns |-----------------------------------------L0.234-----------------------------------------|" - - "L0.246[1442,1601] 3ns |-----------------------------------------L0.246-----------------------------------------|" - - "L0.258[1442,1601] 4ns |-----------------------------------------L0.258-----------------------------------------|" - - "L0.270[1442,1601] 5ns |-----------------------------------------L0.270-----------------------------------------|" - - "L0.282[1442,1601] 6ns |-----------------------------------------L0.282-----------------------------------------|" - - "L0.294[1442,1601] 7ns |-----------------------------------------L0.294-----------------------------------------|" - - "L0.306[1442,1601] 8ns |-----------------------------------------L0.306-----------------------------------------|" - - "L0.318[1442,1601] 9ns |-----------------------------------------L0.318-----------------------------------------|" - - "L0.330[1442,1601] 10ns |-----------------------------------------L0.330-----------------------------------------|" - - "L0.342[1442,1601] 11ns |-----------------------------------------L0.342-----------------------------------------|" - - "L0.354[1442,1601] 12ns |-----------------------------------------L0.354-----------------------------------------|" - - "L0.366[1442,1601] 13ns |-----------------------------------------L0.366-----------------------------------------|" - - "L0.378[1442,1601] 14ns |-----------------------------------------L0.378-----------------------------------------|" - - "L0.390[1442,1601] 15ns |-----------------------------------------L0.390-----------------------------------------|" - - "L0.402[1442,1601] 16ns |-----------------------------------------L0.402-----------------------------------------|" - - "L0.414[1442,1601] 17ns |-----------------------------------------L0.414-----------------------------------------|" - - "L0.538[1442,1601] 18ns |-----------------------------------------L0.538-----------------------------------------|" - - "L0.550[1442,1601] 19ns |-----------------------------------------L0.550-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" - - "L0, all files 160mb " - - "L0.?[1442,1601] 19ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.210, L0.222, L0.234, L0.246, L0.258, L0.270, L0.282, L0.294, L0.306, L0.318, L0.330, L0.342, L0.354, L0.366, L0.378, L0.390, L0.402, L0.414, L0.538, L0.550" - - " Creating 1 files" - - "**** Simulation run 210, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" - - "L0, all files 8mb " - - "L0.211[1602,1761] 0ns |-----------------------------------------L0.211-----------------------------------------|" - - "L0.223[1602,1761] 1ns |-----------------------------------------L0.223-----------------------------------------|" - - "L0.235[1602,1761] 2ns |-----------------------------------------L0.235-----------------------------------------|" - - "L0.247[1602,1761] 3ns |-----------------------------------------L0.247-----------------------------------------|" - - "L0.259[1602,1761] 4ns |-----------------------------------------L0.259-----------------------------------------|" - - "L0.271[1602,1761] 5ns |-----------------------------------------L0.271-----------------------------------------|" - - "L0.283[1602,1761] 6ns |-----------------------------------------L0.283-----------------------------------------|" - - "L0.295[1602,1761] 7ns |-----------------------------------------L0.295-----------------------------------------|" - - "L0.307[1602,1761] 8ns |-----------------------------------------L0.307-----------------------------------------|" - - "L0.319[1602,1761] 9ns |-----------------------------------------L0.319-----------------------------------------|" - - "L0.331[1602,1761] 10ns |-----------------------------------------L0.331-----------------------------------------|" - - "L0.343[1602,1761] 11ns |-----------------------------------------L0.343-----------------------------------------|" - - "L0.355[1602,1761] 12ns |-----------------------------------------L0.355-----------------------------------------|" - - "L0.367[1602,1761] 13ns |-----------------------------------------L0.367-----------------------------------------|" - - "L0.379[1602,1761] 14ns |-----------------------------------------L0.379-----------------------------------------|" - - "L0.391[1602,1761] 15ns |-----------------------------------------L0.391-----------------------------------------|" - - "L0.403[1602,1761] 16ns |-----------------------------------------L0.403-----------------------------------------|" - - "L0.415[1602,1761] 17ns |-----------------------------------------L0.415-----------------------------------------|" - - "L0.539[1602,1761] 18ns |-----------------------------------------L0.539-----------------------------------------|" - - "L0.551[1602,1761] 19ns |-----------------------------------------L0.551-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" - - "L0, all files 160mb " - - "L0.?[1602,1761] 19ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.211, L0.223, L0.235, L0.247, L0.259, L0.271, L0.283, L0.295, L0.307, L0.319, L0.331, L0.343, L0.355, L0.367, L0.379, L0.391, L0.403, L0.415, L0.539, L0.551" - - " Creating 1 files" - - "**** Simulation run 211, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 214, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.429[2001,2086] 20ns |-----------------------------------------L0.429-----------------------------------------|" - - "L0.443[2001,2086] 21ns |-----------------------------------------L0.443-----------------------------------------|" - - "L0.457[2001,2086] 22ns |-----------------------------------------L0.457-----------------------------------------|" - - "L0.471[2001,2086] 23ns |-----------------------------------------L0.471-----------------------------------------|" - - "L0.485[2001,2086] 24ns |-----------------------------------------L0.485-----------------------------------------|" - - "L0.499[2001,2086] 25ns |-----------------------------------------L0.499-----------------------------------------|" - - "L0.513[2001,2086] 26ns |-----------------------------------------L0.513-----------------------------------------|" - - "L0.527[2001,2086] 27ns |-----------------------------------------L0.527-----------------------------------------|" - - "L0.565[2001,2086] 28ns |-----------------------------------------L0.565-----------------------------------------|" - - "L0.579[2001,2086] 29ns |-----------------------------------------L0.579-----------------------------------------|" - - "L0.593[2001,2086] 30ns |-----------------------------------------L0.593-----------------------------------------|" - - "L0.607[2001,2086] 31ns |-----------------------------------------L0.607-----------------------------------------|" - - "L0.621[2001,2086] 32ns |-----------------------------------------L0.621-----------------------------------------|" - - "L0.635[2001,2086] 33ns |-----------------------------------------L0.635-----------------------------------------|" - - "L0.649[2001,2086] 34ns |-----------------------------------------L0.649-----------------------------------------|" - - "L0.663[2001,2086] 35ns |-----------------------------------------L0.663-----------------------------------------|" - - "L0.677[2001,2086] 36ns |-----------------------------------------L0.677-----------------------------------------|" - - "L0.691[2001,2086] 37ns |-----------------------------------------L0.691-----------------------------------------|" - - "L0.705[2001,2086] 38ns |-----------------------------------------L0.705-----------------------------------------|" - - "L0.719[2001,2086] 39ns |-----------------------------------------L0.719-----------------------------------------|" + - "L0.1842[162,321] 120ns |----------------------------------------L0.1842-----------------------------------------|" + - "L0.1856[162,321] 121ns |----------------------------------------L0.1856-----------------------------------------|" + - "L0.1870[162,321] 122ns |----------------------------------------L0.1870-----------------------------------------|" + - "L0.1884[162,321] 123ns |----------------------------------------L0.1884-----------------------------------------|" + - "L0.1898[162,321] 124ns |----------------------------------------L0.1898-----------------------------------------|" + - "L0.1912[162,321] 125ns |----------------------------------------L0.1912-----------------------------------------|" + - "L0.1926[162,321] 126ns |----------------------------------------L0.1926-----------------------------------------|" + - "L0.1940[162,321] 127ns |----------------------------------------L0.1940-----------------------------------------|" + - "L0.1954[162,321] 128ns |----------------------------------------L0.1954-----------------------------------------|" + - "L0.1968[162,321] 129ns |----------------------------------------L0.1968-----------------------------------------|" + - "L0.1982[162,321] 130ns |----------------------------------------L0.1982-----------------------------------------|" + - "L0.1996[162,321] 131ns |----------------------------------------L0.1996-----------------------------------------|" + - "L0.2010[162,321] 132ns |----------------------------------------L0.2010-----------------------------------------|" + - "L0.2024[162,321] 133ns |----------------------------------------L0.2024-----------------------------------------|" + - "L0.2150[162,321] 134ns |----------------------------------------L0.2150-----------------------------------------|" + - "L0.2164[162,321] 135ns |----------------------------------------L0.2164-----------------------------------------|" + - "L0.2038[162,321] 136ns |----------------------------------------L0.2038-----------------------------------------|" + - "L0.2052[162,321] 137ns |----------------------------------------L0.2052-----------------------------------------|" + - "L0.2066[162,321] 138ns |----------------------------------------L0.2066-----------------------------------------|" + - "L0.2080[162,321] 139ns |----------------------------------------L0.2080-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[2001,2086] 39ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[162,321] 139ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.429, L0.443, L0.457, L0.471, L0.485, L0.499, L0.513, L0.527, L0.565, L0.579, L0.593, L0.607, L0.621, L0.635, L0.649, L0.663, L0.677, L0.691, L0.705, L0.719" + - " Soft Deleting 20 files: L0.1842, L0.1856, L0.1870, L0.1884, L0.1898, L0.1912, L0.1926, L0.1940, L0.1954, L0.1968, L0.1982, L0.1996, L0.2010, L0.2024, L0.2038, L0.2052, L0.2066, L0.2080, L0.2150, L0.2164" - " Creating 1 files" - - "**** Simulation run 212, type=compact(ManySmallFiles). 20 Input Files, 200b total:" - - "L0, all files 10b " - - "L0.430[2087,200000] 20ns |------------------L0.430-------------------| " - - "L0.444[2087,210000] 21ns |--------------------L0.444--------------------| " - - "L0.458[2087,220000] 22ns |---------------------L0.458---------------------| " - - "L0.472[2087,230000] 23ns |----------------------L0.472----------------------| " - - "L0.486[2087,240000] 24ns |-----------------------L0.486------------------------| " - - "L0.500[2087,250000] 25ns |------------------------L0.500-------------------------| " - - "L0.514[2087,260000] 26ns |-------------------------L0.514--------------------------| " - - "L0.528[2087,270000] 27ns |---------------------------L0.528---------------------------| " - - "L0.566[2087,280000] 28ns |----------------------------L0.566----------------------------| " - - "L0.580[2087,290000] 29ns |-----------------------------L0.580-----------------------------| " - - "L0.594[2087,300000] 30ns |------------------------------L0.594-------------------------------| " - - "L0.608[2087,310000] 31ns |-------------------------------L0.608--------------------------------| " - - "L0.622[2087,320000] 32ns |--------------------------------L0.622---------------------------------| " - - "L0.636[2087,330000] 33ns |----------------------------------L0.636----------------------------------| " - - "L0.650[2087,340000] 34ns |-----------------------------------L0.650-----------------------------------| " - - "L0.664[2087,350000] 35ns |------------------------------------L0.664------------------------------------| " - - "L0.678[2087,360000] 36ns |-------------------------------------L0.678--------------------------------------| " - - "L0.692[2087,370000] 37ns |--------------------------------------L0.692---------------------------------------| " - - "L0.706[2087,380000] 38ns |---------------------------------------L0.706----------------------------------------| " - - "L0.720[2087,390000] 39ns |-----------------------------------------L0.720-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 200b total:" - - "L0, all files 200b " - - "L0.?[2087,390000] 39ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.430, L0.444, L0.458, L0.472, L0.486, L0.500, L0.514, L0.528, L0.566, L0.580, L0.594, L0.608, L0.622, L0.636, L0.650, L0.664, L0.678, L0.692, L0.706, L0.720" - - " Creating 1 files" - - "**** Simulation run 213, type=compact(ManySmallFiles). 20 Input Files, 239mb total:" - - "L0, all files 12mb " - - "L0.212[1762,2000] 0ns |-----------------------------------------L0.212-----------------------------------------|" - - "L0.224[1762,2000] 1ns |-----------------------------------------L0.224-----------------------------------------|" - - "L0.236[1762,2000] 2ns |-----------------------------------------L0.236-----------------------------------------|" - - "L0.248[1762,2000] 3ns |-----------------------------------------L0.248-----------------------------------------|" - - "L0.260[1762,2000] 4ns |-----------------------------------------L0.260-----------------------------------------|" - - "L0.272[1762,2000] 5ns |-----------------------------------------L0.272-----------------------------------------|" - - "L0.284[1762,2000] 6ns |-----------------------------------------L0.284-----------------------------------------|" - - "L0.296[1762,2000] 7ns |-----------------------------------------L0.296-----------------------------------------|" - - "L0.308[1762,2000] 8ns |-----------------------------------------L0.308-----------------------------------------|" - - "L0.320[1762,2000] 9ns |-----------------------------------------L0.320-----------------------------------------|" - - "L0.332[1762,2000] 10ns |-----------------------------------------L0.332-----------------------------------------|" - - "L0.344[1762,2000] 11ns |-----------------------------------------L0.344-----------------------------------------|" - - "L0.356[1762,2000] 12ns |-----------------------------------------L0.356-----------------------------------------|" - - "L0.368[1762,2000] 13ns |-----------------------------------------L0.368-----------------------------------------|" - - "L0.380[1762,2000] 14ns |-----------------------------------------L0.380-----------------------------------------|" - - "L0.392[1762,2000] 15ns |-----------------------------------------L0.392-----------------------------------------|" - - "L0.404[1762,2000] 16ns |-----------------------------------------L0.404-----------------------------------------|" - - "L0.416[1762,2000] 17ns |-----------------------------------------L0.416-----------------------------------------|" - - "L0.540[1762,2000] 18ns |-----------------------------------------L0.540-----------------------------------------|" - - "L0.552[1762,2000] 19ns |-----------------------------------------L0.552-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 239mb total:" - - "L0, all files 239mb " - - "L0.?[1762,2000] 19ns |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 215, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.2094[162,321] 140ns |----------------------------------------L0.2094-----------------------------------------|" + - "L0.2108[162,321] 141ns |----------------------------------------L0.2108-----------------------------------------|" + - "L0.2122[162,321] 142ns |----------------------------------------L0.2122-----------------------------------------|" + - "L0.2136[162,321] 143ns |----------------------------------------L0.2136-----------------------------------------|" + - "L0.2178[162,321] 144ns |----------------------------------------L0.2178-----------------------------------------|" + - "L0.2192[162,321] 145ns |----------------------------------------L0.2192-----------------------------------------|" + - "L0.2206[162,321] 146ns |----------------------------------------L0.2206-----------------------------------------|" + - "L0.2220[162,321] 147ns |----------------------------------------L0.2220-----------------------------------------|" + - "L0.2234[162,321] 148ns |----------------------------------------L0.2234-----------------------------------------|" + - "L0.2248[162,321] 149ns |----------------------------------------L0.2248-----------------------------------------|" + - "L0.2262[162,321] 150ns |----------------------------------------L0.2262-----------------------------------------|" + - "L0.2276[162,321] 151ns |----------------------------------------L0.2276-----------------------------------------|" + - "L0.2290[162,321] 152ns |----------------------------------------L0.2290-----------------------------------------|" + - "L0.2304[162,321] 153ns |----------------------------------------L0.2304-----------------------------------------|" + - "L0.2318[162,321] 154ns |----------------------------------------L0.2318-----------------------------------------|" + - "L0.2332[162,321] 155ns |----------------------------------------L0.2332-----------------------------------------|" + - "L0.2346[162,321] 156ns |----------------------------------------L0.2346-----------------------------------------|" + - "L0.2360[162,321] 157ns |----------------------------------------L0.2360-----------------------------------------|" + - "L0.2374[162,321] 158ns |----------------------------------------L0.2374-----------------------------------------|" + - "L0.2388[162,321] 159ns |----------------------------------------L0.2388-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[162,321] 159ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.212, L0.224, L0.236, L0.248, L0.260, L0.272, L0.284, L0.296, L0.308, L0.320, L0.332, L0.344, L0.356, L0.368, L0.380, L0.392, L0.404, L0.416, L0.540, L0.552" + - " Soft Deleting 20 files: L0.2094, L0.2108, L0.2122, L0.2136, L0.2178, L0.2192, L0.2206, L0.2220, L0.2234, L0.2248, L0.2262, L0.2276, L0.2290, L0.2304, L0.2318, L0.2332, L0.2346, L0.2360, L0.2374, L0.2388" - " Creating 1 files" - - "**** Simulation run 214, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 216, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.417[20,161] 20ns |-----------------------------------------L0.417-----------------------------------------|" - - "L0.431[21,161] 21ns |----------------------------------------L0.431-----------------------------------------| " - - "L0.445[22,161] 22ns |----------------------------------------L0.445----------------------------------------| " - - "L0.459[23,161] 23ns |----------------------------------------L0.459----------------------------------------| " - - "L0.473[24,161] 24ns |---------------------------------------L0.473----------------------------------------| " - - "L0.487[25,161] 25ns |---------------------------------------L0.487---------------------------------------| " - - "L0.501[26,161] 26ns |---------------------------------------L0.501---------------------------------------| " - - "L0.515[27,161] 27ns |--------------------------------------L0.515---------------------------------------| " - - "L0.553[28,161] 28ns |--------------------------------------L0.553--------------------------------------| " - - "L0.567[29,161] 29ns |--------------------------------------L0.567--------------------------------------| " - - "L0.581[30,161] 30ns |-------------------------------------L0.581--------------------------------------| " - - "L0.595[31,161] 31ns |-------------------------------------L0.595-------------------------------------| " - - "L0.609[32,161] 32ns |-------------------------------------L0.609-------------------------------------| " - - "L0.623[33,161] 33ns |------------------------------------L0.623-------------------------------------| " - - "L0.637[34,161] 34ns |------------------------------------L0.637-------------------------------------| " - - "L0.651[35,161] 35ns |------------------------------------L0.651------------------------------------| " - - "L0.665[36,161] 36ns |-----------------------------------L0.665------------------------------------| " - - "L0.679[37,161] 37ns |-----------------------------------L0.679------------------------------------| " - - "L0.693[38,161] 38ns |-----------------------------------L0.693-----------------------------------| " - - "L0.707[39,161] 39ns |----------------------------------L0.707-----------------------------------| " + - "L0.2402[162,321] 160ns |----------------------------------------L0.2402-----------------------------------------|" + - "L0.2416[162,321] 161ns |----------------------------------------L0.2416-----------------------------------------|" + - "L0.2429[162,321] 162ns |----------------------------------------L0.2429-----------------------------------------|" + - "L0.2442[163,321] 163ns |----------------------------------------L0.2442----------------------------------------| " + - "L0.2455[164,321] 164ns |---------------------------------------L0.2455----------------------------------------| " + - "L0.2468[165,321] 165ns |---------------------------------------L0.2468----------------------------------------| " + - "L0.2481[166,321] 166ns |---------------------------------------L0.2481---------------------------------------| " + - "L0.2494[167,321] 167ns |---------------------------------------L0.2494---------------------------------------| " + - "L0.2507[168,321] 168ns |--------------------------------------L0.2507---------------------------------------| " + - "L0.2520[169,321] 169ns |--------------------------------------L0.2520---------------------------------------| " + - "L0.2533[170,321] 170ns |--------------------------------------L0.2533--------------------------------------| " + - "L0.2546[171,321] 171ns |-------------------------------------L0.2546--------------------------------------| " + - "L0.2559[172,321] 172ns |-------------------------------------L0.2559--------------------------------------| " + - "L0.2572[173,321] 173ns |-------------------------------------L0.2572-------------------------------------| " + - "L0.2585[174,321] 174ns |-------------------------------------L0.2585-------------------------------------| " + - "L0.2598[175,321] 175ns |------------------------------------L0.2598-------------------------------------| " + - "L0.2611[176,321] 176ns |------------------------------------L0.2611-------------------------------------| " + - "L0.2624[177,321] 177ns |------------------------------------L0.2624------------------------------------| " + - "L0.2637[178,321] 178ns |-----------------------------------L0.2637------------------------------------| " + - "L0.2650[179,321] 179ns |-----------------------------------L0.2650------------------------------------| " + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[162,321] 179ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.2402, L0.2416, L0.2429, L0.2442, L0.2455, L0.2468, L0.2481, L0.2494, L0.2507, L0.2520, L0.2533, L0.2546, L0.2559, L0.2572, L0.2585, L0.2598, L0.2611, L0.2624, L0.2637, L0.2650" + - " Creating 1 files" + - "**** Simulation run 217, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.2663[180,321] 180ns |----------------------------------------L0.2663-----------------------------------------|" + - "L0.2676[181,321] 181ns |----------------------------------------L0.2676----------------------------------------| " + - "L0.2689[182,321] 182ns |---------------------------------------L0.2689----------------------------------------| " + - "L0.2702[183,321] 183ns |---------------------------------------L0.2702----------------------------------------| " + - "L0.2715[184,321] 184ns |---------------------------------------L0.2715---------------------------------------| " + - "L0.2728[185,321] 185ns |--------------------------------------L0.2728---------------------------------------| " + - "L0.2741[186,321] 186ns |--------------------------------------L0.2741---------------------------------------| " + - "L0.2754[187,321] 187ns |--------------------------------------L0.2754--------------------------------------| " + - "L0.2767[188,321] 188ns |-------------------------------------L0.2767--------------------------------------| " + - "L0.2780[189,321] 189ns |-------------------------------------L0.2780--------------------------------------| " + - "L0.2793[190,321] 190ns |-------------------------------------L0.2793-------------------------------------| " + - "L0.2806[191,321] 191ns |------------------------------------L0.2806-------------------------------------| " + - "L0.2819[192,321] 192ns |------------------------------------L0.2819-------------------------------------| " + - "L0.2832[193,321] 193ns |------------------------------------L0.2832------------------------------------| " + - "L0.2845[194,321] 194ns |------------------------------------L0.2845------------------------------------| " + - "L0.2858[195,321] 195ns |-----------------------------------L0.2858------------------------------------| " + - "L0.2871[196,321] 196ns |-----------------------------------L0.2871-----------------------------------| " + - "L0.2884[197,321] 197ns |-----------------------------------L0.2884-----------------------------------| " + - "L0.2897[198,321] 198ns |----------------------------------L0.2897-----------------------------------| " + - "L0.2910[199,321] 199ns |----------------------------------L0.2910----------------------------------| " - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[20,161] 39ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[180,321] 199ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.417, L0.431, L0.445, L0.459, L0.473, L0.487, L0.501, L0.515, L0.553, L0.567, L0.581, L0.595, L0.609, L0.623, L0.637, L0.651, L0.665, L0.679, L0.693, L0.707" + - " Soft Deleting 20 files: L0.2663, L0.2676, L0.2689, L0.2702, L0.2715, L0.2728, L0.2741, L0.2754, L0.2767, L0.2780, L0.2793, L0.2806, L0.2819, L0.2832, L0.2845, L0.2858, L0.2871, L0.2884, L0.2897, L0.2910" - " Creating 1 files" - - "**** Simulation run 215, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 218, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.418[162,321] 20ns |-----------------------------------------L0.418-----------------------------------------|" - - "L0.432[162,321] 21ns |-----------------------------------------L0.432-----------------------------------------|" - - "L0.446[162,321] 22ns |-----------------------------------------L0.446-----------------------------------------|" - - "L0.460[162,321] 23ns |-----------------------------------------L0.460-----------------------------------------|" - - "L0.474[162,321] 24ns |-----------------------------------------L0.474-----------------------------------------|" - - "L0.488[162,321] 25ns |-----------------------------------------L0.488-----------------------------------------|" - - "L0.502[162,321] 26ns |-----------------------------------------L0.502-----------------------------------------|" - - "L0.516[162,321] 27ns |-----------------------------------------L0.516-----------------------------------------|" - - "L0.554[162,321] 28ns |-----------------------------------------L0.554-----------------------------------------|" - - "L0.568[162,321] 29ns |-----------------------------------------L0.568-----------------------------------------|" - - "L0.582[162,321] 30ns |-----------------------------------------L0.582-----------------------------------------|" - - "L0.596[162,321] 31ns |-----------------------------------------L0.596-----------------------------------------|" - - "L0.610[162,321] 32ns |-----------------------------------------L0.610-----------------------------------------|" - - "L0.624[162,321] 33ns |-----------------------------------------L0.624-----------------------------------------|" - - "L0.638[162,321] 34ns |-----------------------------------------L0.638-----------------------------------------|" - - "L0.652[162,321] 35ns |-----------------------------------------L0.652-----------------------------------------|" - - "L0.666[162,321] 36ns |-----------------------------------------L0.666-----------------------------------------|" - - "L0.680[162,321] 37ns |-----------------------------------------L0.680-----------------------------------------|" - - "L0.694[162,321] 38ns |-----------------------------------------L0.694-----------------------------------------|" - - "L0.708[162,321] 39ns |-----------------------------------------L0.708-----------------------------------------|" + - "L0.722[162,321] 40ns |-----------------------------------------L0.722-----------------------------------------|" + - "L0.736[162,321] 41ns |-----------------------------------------L0.736-----------------------------------------|" + - "L0.750[162,321] 42ns |-----------------------------------------L0.750-----------------------------------------|" + - "L0.764[162,321] 43ns |-----------------------------------------L0.764-----------------------------------------|" + - "L0.778[162,321] 44ns |-----------------------------------------L0.778-----------------------------------------|" + - "L0.792[162,321] 45ns |-----------------------------------------L0.792-----------------------------------------|" + - "L0.806[162,321] 46ns |-----------------------------------------L0.806-----------------------------------------|" + - "L0.820[162,321] 47ns |-----------------------------------------L0.820-----------------------------------------|" + - "L0.834[162,321] 48ns |-----------------------------------------L0.834-----------------------------------------|" + - "L0.848[162,321] 49ns |-----------------------------------------L0.848-----------------------------------------|" + - "L0.862[162,321] 50ns |-----------------------------------------L0.862-----------------------------------------|" + - "L0.876[162,321] 51ns |-----------------------------------------L0.876-----------------------------------------|" + - "L0.890[162,321] 52ns |-----------------------------------------L0.890-----------------------------------------|" + - "L0.904[162,321] 53ns |-----------------------------------------L0.904-----------------------------------------|" + - "L0.918[162,321] 54ns |-----------------------------------------L0.918-----------------------------------------|" + - "L0.932[162,321] 55ns |-----------------------------------------L0.932-----------------------------------------|" + - "L0.946[162,321] 56ns |-----------------------------------------L0.946-----------------------------------------|" + - "L0.960[162,321] 57ns |-----------------------------------------L0.960-----------------------------------------|" + - "L0.974[162,321] 58ns |-----------------------------------------L0.974-----------------------------------------|" + - "L0.988[162,321] 59ns |-----------------------------------------L0.988-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[162,321] 39ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[162,321] 59ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.418, L0.432, L0.446, L0.460, L0.474, L0.488, L0.502, L0.516, L0.554, L0.568, L0.582, L0.596, L0.610, L0.624, L0.638, L0.652, L0.666, L0.680, L0.694, L0.708" + - " Soft Deleting 20 files: L0.722, L0.736, L0.750, L0.764, L0.778, L0.792, L0.806, L0.820, L0.834, L0.848, L0.862, L0.876, L0.890, L0.904, L0.918, L0.932, L0.946, L0.960, L0.974, L0.988" - " Creating 1 files" - - "**** Simulation run 216, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 219, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" + - "L0, all files 8mb " + - "L0.203[322,481] 0ns |-----------------------------------------L0.203-----------------------------------------|" + - "L0.215[322,481] 1ns |-----------------------------------------L0.215-----------------------------------------|" + - "L0.227[322,481] 2ns |-----------------------------------------L0.227-----------------------------------------|" + - "L0.239[322,481] 3ns |-----------------------------------------L0.239-----------------------------------------|" + - "L0.251[322,481] 4ns |-----------------------------------------L0.251-----------------------------------------|" + - "L0.263[322,481] 5ns |-----------------------------------------L0.263-----------------------------------------|" + - "L0.275[322,481] 6ns |-----------------------------------------L0.275-----------------------------------------|" + - "L0.287[322,481] 7ns |-----------------------------------------L0.287-----------------------------------------|" + - "L0.299[322,481] 8ns |-----------------------------------------L0.299-----------------------------------------|" + - "L0.311[322,481] 9ns |-----------------------------------------L0.311-----------------------------------------|" + - "L0.323[322,481] 10ns |-----------------------------------------L0.323-----------------------------------------|" + - "L0.335[322,481] 11ns |-----------------------------------------L0.335-----------------------------------------|" + - "L0.347[322,481] 12ns |-----------------------------------------L0.347-----------------------------------------|" + - "L0.359[322,481] 13ns |-----------------------------------------L0.359-----------------------------------------|" + - "L0.371[322,481] 14ns |-----------------------------------------L0.371-----------------------------------------|" + - "L0.383[322,481] 15ns |-----------------------------------------L0.383-----------------------------------------|" + - "L0.395[322,481] 16ns |-----------------------------------------L0.395-----------------------------------------|" + - "L0.407[322,481] 17ns |-----------------------------------------L0.407-----------------------------------------|" + - "L0.531[322,481] 18ns |-----------------------------------------L0.531-----------------------------------------|" + - "L0.543[322,481] 19ns |-----------------------------------------L0.543-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" + - "L0, all files 160mb " + - "L0.?[322,481] 19ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.203, L0.215, L0.227, L0.239, L0.251, L0.263, L0.275, L0.287, L0.299, L0.311, L0.323, L0.335, L0.347, L0.359, L0.371, L0.383, L0.395, L0.407, L0.531, L0.543" + - " Creating 1 files" + - "**** Simulation run 220, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - "L0.419[322,481] 20ns |-----------------------------------------L0.419-----------------------------------------|" - "L0.433[322,481] 21ns |-----------------------------------------L0.433-----------------------------------------|" @@ -5932,401 +6026,288 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 20 files: L0.419, L0.433, L0.447, L0.461, L0.475, L0.489, L0.503, L0.517, L0.555, L0.569, L0.583, L0.597, L0.611, L0.625, L0.639, L0.653, L0.667, L0.681, L0.695, L0.709" - " Creating 1 files" - - "**** Simulation run 217, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.420[482,641] 20ns |-----------------------------------------L0.420-----------------------------------------|" - - "L0.434[482,641] 21ns |-----------------------------------------L0.434-----------------------------------------|" - - "L0.448[482,641] 22ns |-----------------------------------------L0.448-----------------------------------------|" - - "L0.462[482,641] 23ns |-----------------------------------------L0.462-----------------------------------------|" - - "L0.476[482,641] 24ns |-----------------------------------------L0.476-----------------------------------------|" - - "L0.490[482,641] 25ns |-----------------------------------------L0.490-----------------------------------------|" - - "L0.504[482,641] 26ns |-----------------------------------------L0.504-----------------------------------------|" - - "L0.518[482,641] 27ns |-----------------------------------------L0.518-----------------------------------------|" - - "L0.556[482,641] 28ns |-----------------------------------------L0.556-----------------------------------------|" - - "L0.570[482,641] 29ns |-----------------------------------------L0.570-----------------------------------------|" - - "L0.584[482,641] 30ns |-----------------------------------------L0.584-----------------------------------------|" - - "L0.598[482,641] 31ns |-----------------------------------------L0.598-----------------------------------------|" - - "L0.612[482,641] 32ns |-----------------------------------------L0.612-----------------------------------------|" - - "L0.626[482,641] 33ns |-----------------------------------------L0.626-----------------------------------------|" - - "L0.640[482,641] 34ns |-----------------------------------------L0.640-----------------------------------------|" - - "L0.654[482,641] 35ns |-----------------------------------------L0.654-----------------------------------------|" - - "L0.668[482,641] 36ns |-----------------------------------------L0.668-----------------------------------------|" - - "L0.682[482,641] 37ns |-----------------------------------------L0.682-----------------------------------------|" - - "L0.696[482,641] 38ns |-----------------------------------------L0.696-----------------------------------------|" - - "L0.710[482,641] 39ns |-----------------------------------------L0.710-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[482,641] 39ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.420, L0.434, L0.448, L0.462, L0.476, L0.490, L0.504, L0.518, L0.556, L0.570, L0.584, L0.598, L0.612, L0.626, L0.640, L0.654, L0.668, L0.682, L0.696, L0.710" - - " Creating 1 files" - - "**** Simulation run 218, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.421[642,801] 20ns |-----------------------------------------L0.421-----------------------------------------|" - - "L0.435[642,801] 21ns |-----------------------------------------L0.435-----------------------------------------|" - - "L0.449[642,801] 22ns |-----------------------------------------L0.449-----------------------------------------|" - - "L0.463[642,801] 23ns |-----------------------------------------L0.463-----------------------------------------|" - - "L0.477[642,801] 24ns |-----------------------------------------L0.477-----------------------------------------|" - - "L0.491[642,801] 25ns |-----------------------------------------L0.491-----------------------------------------|" - - "L0.505[642,801] 26ns |-----------------------------------------L0.505-----------------------------------------|" - - "L0.519[642,801] 27ns |-----------------------------------------L0.519-----------------------------------------|" - - "L0.557[642,801] 28ns |-----------------------------------------L0.557-----------------------------------------|" - - "L0.571[642,801] 29ns |-----------------------------------------L0.571-----------------------------------------|" - - "L0.585[642,801] 30ns |-----------------------------------------L0.585-----------------------------------------|" - - "L0.599[642,801] 31ns |-----------------------------------------L0.599-----------------------------------------|" - - "L0.613[642,801] 32ns |-----------------------------------------L0.613-----------------------------------------|" - - "L0.627[642,801] 33ns |-----------------------------------------L0.627-----------------------------------------|" - - "L0.641[642,801] 34ns |-----------------------------------------L0.641-----------------------------------------|" - - "L0.655[642,801] 35ns |-----------------------------------------L0.655-----------------------------------------|" - - "L0.669[642,801] 36ns |-----------------------------------------L0.669-----------------------------------------|" - - "L0.683[642,801] 37ns |-----------------------------------------L0.683-----------------------------------------|" - - "L0.697[642,801] 38ns |-----------------------------------------L0.697-----------------------------------------|" - - "L0.711[642,801] 39ns |-----------------------------------------L0.711-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[642,801] 39ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.421, L0.435, L0.449, L0.463, L0.477, L0.491, L0.505, L0.519, L0.557, L0.571, L0.585, L0.599, L0.613, L0.627, L0.641, L0.655, L0.669, L0.683, L0.697, L0.711" - - " Creating 1 files" - - "**** Simulation run 219, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.422[802,961] 20ns |-----------------------------------------L0.422-----------------------------------------|" - - "L0.436[802,961] 21ns |-----------------------------------------L0.436-----------------------------------------|" - - "L0.450[802,961] 22ns |-----------------------------------------L0.450-----------------------------------------|" - - "L0.464[802,961] 23ns |-----------------------------------------L0.464-----------------------------------------|" - - "L0.478[802,961] 24ns |-----------------------------------------L0.478-----------------------------------------|" - - "L0.492[802,961] 25ns |-----------------------------------------L0.492-----------------------------------------|" - - "L0.506[802,961] 26ns |-----------------------------------------L0.506-----------------------------------------|" - - "L0.520[802,961] 27ns |-----------------------------------------L0.520-----------------------------------------|" - - "L0.558[802,961] 28ns |-----------------------------------------L0.558-----------------------------------------|" - - "L0.572[802,961] 29ns |-----------------------------------------L0.572-----------------------------------------|" - - "L0.586[802,961] 30ns |-----------------------------------------L0.586-----------------------------------------|" - - "L0.600[802,961] 31ns |-----------------------------------------L0.600-----------------------------------------|" - - "L0.614[802,961] 32ns |-----------------------------------------L0.614-----------------------------------------|" - - "L0.628[802,961] 33ns |-----------------------------------------L0.628-----------------------------------------|" - - "L0.642[802,961] 34ns |-----------------------------------------L0.642-----------------------------------------|" - - "L0.656[802,961] 35ns |-----------------------------------------L0.656-----------------------------------------|" - - "L0.670[802,961] 36ns |-----------------------------------------L0.670-----------------------------------------|" - - "L0.684[802,961] 37ns |-----------------------------------------L0.684-----------------------------------------|" - - "L0.698[802,961] 38ns |-----------------------------------------L0.698-----------------------------------------|" - - "L0.712[802,961] 39ns |-----------------------------------------L0.712-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[802,961] 39ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.422, L0.436, L0.450, L0.464, L0.478, L0.492, L0.506, L0.520, L0.558, L0.572, L0.586, L0.600, L0.614, L0.628, L0.642, L0.656, L0.670, L0.684, L0.698, L0.712" - - " Creating 1 files" - - "**** Simulation run 220, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.423[962,1121] 20ns |-----------------------------------------L0.423-----------------------------------------|" - - "L0.437[962,1121] 21ns |-----------------------------------------L0.437-----------------------------------------|" - - "L0.451[962,1121] 22ns |-----------------------------------------L0.451-----------------------------------------|" - - "L0.465[962,1121] 23ns |-----------------------------------------L0.465-----------------------------------------|" - - "L0.479[962,1121] 24ns |-----------------------------------------L0.479-----------------------------------------|" - - "L0.493[962,1121] 25ns |-----------------------------------------L0.493-----------------------------------------|" - - "L0.507[962,1121] 26ns |-----------------------------------------L0.507-----------------------------------------|" - - "L0.521[962,1121] 27ns |-----------------------------------------L0.521-----------------------------------------|" - - "L0.559[962,1121] 28ns |-----------------------------------------L0.559-----------------------------------------|" - - "L0.573[962,1121] 29ns |-----------------------------------------L0.573-----------------------------------------|" - - "L0.587[962,1121] 30ns |-----------------------------------------L0.587-----------------------------------------|" - - "L0.601[962,1121] 31ns |-----------------------------------------L0.601-----------------------------------------|" - - "L0.615[962,1121] 32ns |-----------------------------------------L0.615-----------------------------------------|" - - "L0.629[962,1121] 33ns |-----------------------------------------L0.629-----------------------------------------|" - - "L0.643[962,1121] 34ns |-----------------------------------------L0.643-----------------------------------------|" - - "L0.657[962,1121] 35ns |-----------------------------------------L0.657-----------------------------------------|" - - "L0.671[962,1121] 36ns |-----------------------------------------L0.671-----------------------------------------|" - - "L0.685[962,1121] 37ns |-----------------------------------------L0.685-----------------------------------------|" - - "L0.699[962,1121] 38ns |-----------------------------------------L0.699-----------------------------------------|" - - "L0.713[962,1121] 39ns |-----------------------------------------L0.713-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[962,1121] 39ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.423, L0.437, L0.451, L0.465, L0.479, L0.493, L0.507, L0.521, L0.559, L0.573, L0.587, L0.601, L0.615, L0.629, L0.643, L0.657, L0.671, L0.685, L0.699, L0.713" - - " Creating 1 files" - "**** Simulation run 221, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.424[1122,1281] 20ns |-----------------------------------------L0.424-----------------------------------------|" - - "L0.438[1122,1281] 21ns |-----------------------------------------L0.438-----------------------------------------|" - - "L0.452[1122,1281] 22ns |-----------------------------------------L0.452-----------------------------------------|" - - "L0.466[1122,1281] 23ns |-----------------------------------------L0.466-----------------------------------------|" - - "L0.480[1122,1281] 24ns |-----------------------------------------L0.480-----------------------------------------|" - - "L0.494[1122,1281] 25ns |-----------------------------------------L0.494-----------------------------------------|" - - "L0.508[1122,1281] 26ns |-----------------------------------------L0.508-----------------------------------------|" - - "L0.522[1122,1281] 27ns |-----------------------------------------L0.522-----------------------------------------|" - - "L0.560[1122,1281] 28ns |-----------------------------------------L0.560-----------------------------------------|" - - "L0.574[1122,1281] 29ns |-----------------------------------------L0.574-----------------------------------------|" - - "L0.588[1122,1281] 30ns |-----------------------------------------L0.588-----------------------------------------|" - - "L0.602[1122,1281] 31ns |-----------------------------------------L0.602-----------------------------------------|" - - "L0.616[1122,1281] 32ns |-----------------------------------------L0.616-----------------------------------------|" - - "L0.630[1122,1281] 33ns |-----------------------------------------L0.630-----------------------------------------|" - - "L0.644[1122,1281] 34ns |-----------------------------------------L0.644-----------------------------------------|" - - "L0.658[1122,1281] 35ns |-----------------------------------------L0.658-----------------------------------------|" - - "L0.672[1122,1281] 36ns |-----------------------------------------L0.672-----------------------------------------|" - - "L0.686[1122,1281] 37ns |-----------------------------------------L0.686-----------------------------------------|" - - "L0.700[1122,1281] 38ns |-----------------------------------------L0.700-----------------------------------------|" - - "L0.714[1122,1281] 39ns |-----------------------------------------L0.714-----------------------------------------|" + - "L0.723[322,481] 40ns |-----------------------------------------L0.723-----------------------------------------|" + - "L0.737[322,481] 41ns |-----------------------------------------L0.737-----------------------------------------|" + - "L0.751[322,481] 42ns |-----------------------------------------L0.751-----------------------------------------|" + - "L0.765[322,481] 43ns |-----------------------------------------L0.765-----------------------------------------|" + - "L0.779[322,481] 44ns |-----------------------------------------L0.779-----------------------------------------|" + - "L0.793[322,481] 45ns |-----------------------------------------L0.793-----------------------------------------|" + - "L0.807[322,481] 46ns |-----------------------------------------L0.807-----------------------------------------|" + - "L0.821[322,481] 47ns |-----------------------------------------L0.821-----------------------------------------|" + - "L0.835[322,481] 48ns |-----------------------------------------L0.835-----------------------------------------|" + - "L0.849[322,481] 49ns |-----------------------------------------L0.849-----------------------------------------|" + - "L0.863[322,481] 50ns |-----------------------------------------L0.863-----------------------------------------|" + - "L0.877[322,481] 51ns |-----------------------------------------L0.877-----------------------------------------|" + - "L0.891[322,481] 52ns |-----------------------------------------L0.891-----------------------------------------|" + - "L0.905[322,481] 53ns |-----------------------------------------L0.905-----------------------------------------|" + - "L0.919[322,481] 54ns |-----------------------------------------L0.919-----------------------------------------|" + - "L0.933[322,481] 55ns |-----------------------------------------L0.933-----------------------------------------|" + - "L0.947[322,481] 56ns |-----------------------------------------L0.947-----------------------------------------|" + - "L0.961[322,481] 57ns |-----------------------------------------L0.961-----------------------------------------|" + - "L0.975[322,481] 58ns |-----------------------------------------L0.975-----------------------------------------|" + - "L0.989[322,481] 59ns |-----------------------------------------L0.989-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1122,1281] 39ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[322,481] 59ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.424, L0.438, L0.452, L0.466, L0.480, L0.494, L0.508, L0.522, L0.560, L0.574, L0.588, L0.602, L0.616, L0.630, L0.644, L0.658, L0.672, L0.686, L0.700, L0.714" + - " Soft Deleting 20 files: L0.723, L0.737, L0.751, L0.765, L0.779, L0.793, L0.807, L0.821, L0.835, L0.849, L0.863, L0.877, L0.891, L0.905, L0.919, L0.933, L0.947, L0.961, L0.975, L0.989" - " Creating 1 files" - "**** Simulation run 222, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.425[1282,1441] 20ns |-----------------------------------------L0.425-----------------------------------------|" - - "L0.439[1282,1441] 21ns |-----------------------------------------L0.439-----------------------------------------|" - - "L0.453[1282,1441] 22ns |-----------------------------------------L0.453-----------------------------------------|" - - "L0.467[1282,1441] 23ns |-----------------------------------------L0.467-----------------------------------------|" - - "L0.481[1282,1441] 24ns |-----------------------------------------L0.481-----------------------------------------|" - - "L0.495[1282,1441] 25ns |-----------------------------------------L0.495-----------------------------------------|" - - "L0.509[1282,1441] 26ns |-----------------------------------------L0.509-----------------------------------------|" - - "L0.523[1282,1441] 27ns |-----------------------------------------L0.523-----------------------------------------|" - - "L0.561[1282,1441] 28ns |-----------------------------------------L0.561-----------------------------------------|" - - "L0.575[1282,1441] 29ns |-----------------------------------------L0.575-----------------------------------------|" - - "L0.589[1282,1441] 30ns |-----------------------------------------L0.589-----------------------------------------|" - - "L0.603[1282,1441] 31ns |-----------------------------------------L0.603-----------------------------------------|" - - "L0.617[1282,1441] 32ns |-----------------------------------------L0.617-----------------------------------------|" - - "L0.631[1282,1441] 33ns |-----------------------------------------L0.631-----------------------------------------|" - - "L0.645[1282,1441] 34ns |-----------------------------------------L0.645-----------------------------------------|" - - "L0.659[1282,1441] 35ns |-----------------------------------------L0.659-----------------------------------------|" - - "L0.673[1282,1441] 36ns |-----------------------------------------L0.673-----------------------------------------|" - - "L0.687[1282,1441] 37ns |-----------------------------------------L0.687-----------------------------------------|" - - "L0.701[1282,1441] 38ns |-----------------------------------------L0.701-----------------------------------------|" - - "L0.715[1282,1441] 39ns |-----------------------------------------L0.715-----------------------------------------|" + - "L0.1003[322,481] 60ns |----------------------------------------L0.1003-----------------------------------------|" + - "L0.1017[322,481] 61ns |----------------------------------------L0.1017-----------------------------------------|" + - "L0.1031[322,481] 62ns |----------------------------------------L0.1031-----------------------------------------|" + - "L0.1045[322,481] 63ns |----------------------------------------L0.1045-----------------------------------------|" + - "L0.1059[322,481] 64ns |----------------------------------------L0.1059-----------------------------------------|" + - "L0.1073[322,481] 65ns |----------------------------------------L0.1073-----------------------------------------|" + - "L0.1087[322,481] 66ns |----------------------------------------L0.1087-----------------------------------------|" + - "L0.1101[322,481] 67ns |----------------------------------------L0.1101-----------------------------------------|" + - "L0.1115[322,481] 68ns |----------------------------------------L0.1115-----------------------------------------|" + - "L0.1129[322,481] 69ns |----------------------------------------L0.1129-----------------------------------------|" + - "L0.1143[322,481] 70ns |----------------------------------------L0.1143-----------------------------------------|" + - "L0.1157[322,481] 71ns |----------------------------------------L0.1157-----------------------------------------|" + - "L0.1171[322,481] 72ns |----------------------------------------L0.1171-----------------------------------------|" + - "L0.1185[322,481] 73ns |----------------------------------------L0.1185-----------------------------------------|" + - "L0.1199[322,481] 74ns |----------------------------------------L0.1199-----------------------------------------|" + - "L0.1213[322,481] 75ns |----------------------------------------L0.1213-----------------------------------------|" + - "L0.1227[322,481] 76ns |----------------------------------------L0.1227-----------------------------------------|" + - "L0.1241[322,481] 77ns |----------------------------------------L0.1241-----------------------------------------|" + - "L0.1255[322,481] 78ns |----------------------------------------L0.1255-----------------------------------------|" + - "L0.1269[322,481] 79ns |----------------------------------------L0.1269-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1282,1441] 39ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[322,481] 79ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.425, L0.439, L0.453, L0.467, L0.481, L0.495, L0.509, L0.523, L0.561, L0.575, L0.589, L0.603, L0.617, L0.631, L0.645, L0.659, L0.673, L0.687, L0.701, L0.715" + - " Soft Deleting 20 files: L0.1003, L0.1017, L0.1031, L0.1045, L0.1059, L0.1073, L0.1087, L0.1101, L0.1115, L0.1129, L0.1143, L0.1157, L0.1171, L0.1185, L0.1199, L0.1213, L0.1227, L0.1241, L0.1255, L0.1269" - " Creating 1 files" - "**** Simulation run 223, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.426[1442,1601] 20ns |-----------------------------------------L0.426-----------------------------------------|" - - "L0.440[1442,1601] 21ns |-----------------------------------------L0.440-----------------------------------------|" - - "L0.454[1442,1601] 22ns |-----------------------------------------L0.454-----------------------------------------|" - - "L0.468[1442,1601] 23ns |-----------------------------------------L0.468-----------------------------------------|" - - "L0.482[1442,1601] 24ns |-----------------------------------------L0.482-----------------------------------------|" - - "L0.496[1442,1601] 25ns |-----------------------------------------L0.496-----------------------------------------|" - - "L0.510[1442,1601] 26ns |-----------------------------------------L0.510-----------------------------------------|" - - "L0.524[1442,1601] 27ns |-----------------------------------------L0.524-----------------------------------------|" - - "L0.562[1442,1601] 28ns |-----------------------------------------L0.562-----------------------------------------|" - - "L0.576[1442,1601] 29ns |-----------------------------------------L0.576-----------------------------------------|" - - "L0.590[1442,1601] 30ns |-----------------------------------------L0.590-----------------------------------------|" - - "L0.604[1442,1601] 31ns |-----------------------------------------L0.604-----------------------------------------|" - - "L0.618[1442,1601] 32ns |-----------------------------------------L0.618-----------------------------------------|" - - "L0.632[1442,1601] 33ns |-----------------------------------------L0.632-----------------------------------------|" - - "L0.646[1442,1601] 34ns |-----------------------------------------L0.646-----------------------------------------|" - - "L0.660[1442,1601] 35ns |-----------------------------------------L0.660-----------------------------------------|" - - "L0.674[1442,1601] 36ns |-----------------------------------------L0.674-----------------------------------------|" - - "L0.688[1442,1601] 37ns |-----------------------------------------L0.688-----------------------------------------|" - - "L0.702[1442,1601] 38ns |-----------------------------------------L0.702-----------------------------------------|" - - "L0.716[1442,1601] 39ns |-----------------------------------------L0.716-----------------------------------------|" + - "L0.1283[322,481] 80ns |----------------------------------------L0.1283-----------------------------------------|" + - "L0.1297[322,481] 81ns |----------------------------------------L0.1297-----------------------------------------|" + - "L0.1311[322,481] 82ns |----------------------------------------L0.1311-----------------------------------------|" + - "L0.1325[322,481] 83ns |----------------------------------------L0.1325-----------------------------------------|" + - "L0.1339[322,481] 84ns |----------------------------------------L0.1339-----------------------------------------|" + - "L0.1353[322,481] 85ns |----------------------------------------L0.1353-----------------------------------------|" + - "L0.1367[322,481] 86ns |----------------------------------------L0.1367-----------------------------------------|" + - "L0.1381[322,481] 87ns |----------------------------------------L0.1381-----------------------------------------|" + - "L0.1395[322,481] 88ns |----------------------------------------L0.1395-----------------------------------------|" + - "L0.1409[322,481] 89ns |----------------------------------------L0.1409-----------------------------------------|" + - "L0.1423[322,481] 90ns |----------------------------------------L0.1423-----------------------------------------|" + - "L0.1437[322,481] 91ns |----------------------------------------L0.1437-----------------------------------------|" + - "L0.1451[322,481] 92ns |----------------------------------------L0.1451-----------------------------------------|" + - "L0.1465[322,481] 93ns |----------------------------------------L0.1465-----------------------------------------|" + - "L0.1479[322,481] 94ns |----------------------------------------L0.1479-----------------------------------------|" + - "L0.1493[322,481] 95ns |----------------------------------------L0.1493-----------------------------------------|" + - "L0.1507[322,481] 96ns |----------------------------------------L0.1507-----------------------------------------|" + - "L0.1521[322,481] 97ns |----------------------------------------L0.1521-----------------------------------------|" + - "L0.1535[322,481] 98ns |----------------------------------------L0.1535-----------------------------------------|" + - "L0.1549[322,481] 99ns |----------------------------------------L0.1549-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1442,1601] 39ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[322,481] 99ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.426, L0.440, L0.454, L0.468, L0.482, L0.496, L0.510, L0.524, L0.562, L0.576, L0.590, L0.604, L0.618, L0.632, L0.646, L0.660, L0.674, L0.688, L0.702, L0.716" + - " Soft Deleting 20 files: L0.1283, L0.1297, L0.1311, L0.1325, L0.1339, L0.1353, L0.1367, L0.1381, L0.1395, L0.1409, L0.1423, L0.1437, L0.1451, L0.1465, L0.1479, L0.1493, L0.1507, L0.1521, L0.1535, L0.1549" - " Creating 1 files" - "**** Simulation run 224, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.427[1602,1761] 20ns |-----------------------------------------L0.427-----------------------------------------|" - - "L0.441[1602,1761] 21ns |-----------------------------------------L0.441-----------------------------------------|" - - "L0.455[1602,1761] 22ns |-----------------------------------------L0.455-----------------------------------------|" - - "L0.469[1602,1761] 23ns |-----------------------------------------L0.469-----------------------------------------|" - - "L0.483[1602,1761] 24ns |-----------------------------------------L0.483-----------------------------------------|" - - "L0.497[1602,1761] 25ns |-----------------------------------------L0.497-----------------------------------------|" - - "L0.511[1602,1761] 26ns |-----------------------------------------L0.511-----------------------------------------|" - - "L0.525[1602,1761] 27ns |-----------------------------------------L0.525-----------------------------------------|" - - "L0.563[1602,1761] 28ns |-----------------------------------------L0.563-----------------------------------------|" - - "L0.577[1602,1761] 29ns |-----------------------------------------L0.577-----------------------------------------|" - - "L0.591[1602,1761] 30ns |-----------------------------------------L0.591-----------------------------------------|" - - "L0.605[1602,1761] 31ns |-----------------------------------------L0.605-----------------------------------------|" - - "L0.619[1602,1761] 32ns |-----------------------------------------L0.619-----------------------------------------|" - - "L0.633[1602,1761] 33ns |-----------------------------------------L0.633-----------------------------------------|" - - "L0.647[1602,1761] 34ns |-----------------------------------------L0.647-----------------------------------------|" - - "L0.661[1602,1761] 35ns |-----------------------------------------L0.661-----------------------------------------|" - - "L0.675[1602,1761] 36ns |-----------------------------------------L0.675-----------------------------------------|" - - "L0.689[1602,1761] 37ns |-----------------------------------------L0.689-----------------------------------------|" - - "L0.703[1602,1761] 38ns |-----------------------------------------L0.703-----------------------------------------|" - - "L0.717[1602,1761] 39ns |-----------------------------------------L0.717-----------------------------------------|" + - "L0.1563[322,481] 100ns |----------------------------------------L0.1563-----------------------------------------|" + - "L0.1577[322,481] 101ns |----------------------------------------L0.1577-----------------------------------------|" + - "L0.1591[322,481] 102ns |----------------------------------------L0.1591-----------------------------------------|" + - "L0.1605[322,481] 103ns |----------------------------------------L0.1605-----------------------------------------|" + - "L0.1619[322,481] 104ns |----------------------------------------L0.1619-----------------------------------------|" + - "L0.1633[322,481] 105ns |----------------------------------------L0.1633-----------------------------------------|" + - "L0.1647[322,481] 106ns |----------------------------------------L0.1647-----------------------------------------|" + - "L0.1661[322,481] 107ns |----------------------------------------L0.1661-----------------------------------------|" + - "L0.1675[322,481] 108ns |----------------------------------------L0.1675-----------------------------------------|" + - "L0.1689[322,481] 109ns |----------------------------------------L0.1689-----------------------------------------|" + - "L0.1703[322,481] 110ns |----------------------------------------L0.1703-----------------------------------------|" + - "L0.1717[322,481] 111ns |----------------------------------------L0.1717-----------------------------------------|" + - "L0.1731[322,481] 112ns |----------------------------------------L0.1731-----------------------------------------|" + - "L0.1745[322,481] 113ns |----------------------------------------L0.1745-----------------------------------------|" + - "L0.1759[322,481] 114ns |----------------------------------------L0.1759-----------------------------------------|" + - "L0.1773[322,481] 115ns |----------------------------------------L0.1773-----------------------------------------|" + - "L0.1787[322,481] 116ns |----------------------------------------L0.1787-----------------------------------------|" + - "L0.1801[322,481] 117ns |----------------------------------------L0.1801-----------------------------------------|" + - "L0.1815[322,481] 118ns |----------------------------------------L0.1815-----------------------------------------|" + - "L0.1829[322,481] 119ns |----------------------------------------L0.1829-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1602,1761] 39ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[322,481] 119ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.427, L0.441, L0.455, L0.469, L0.483, L0.497, L0.511, L0.525, L0.563, L0.577, L0.591, L0.605, L0.619, L0.633, L0.647, L0.661, L0.675, L0.689, L0.703, L0.717" + - " Soft Deleting 20 files: L0.1563, L0.1577, L0.1591, L0.1605, L0.1619, L0.1633, L0.1647, L0.1661, L0.1675, L0.1689, L0.1703, L0.1717, L0.1731, L0.1745, L0.1759, L0.1773, L0.1787, L0.1801, L0.1815, L0.1829" - " Creating 1 files" - "**** Simulation run 225, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.428[1762,2000] 20ns |-----------------------------------------L0.428-----------------------------------------|" - - "L0.442[1762,2000] 21ns |-----------------------------------------L0.442-----------------------------------------|" - - "L0.456[1762,2000] 22ns |-----------------------------------------L0.456-----------------------------------------|" - - "L0.470[1762,2000] 23ns |-----------------------------------------L0.470-----------------------------------------|" - - "L0.484[1762,2000] 24ns |-----------------------------------------L0.484-----------------------------------------|" - - "L0.498[1762,2000] 25ns |-----------------------------------------L0.498-----------------------------------------|" - - "L0.512[1762,2000] 26ns |-----------------------------------------L0.512-----------------------------------------|" - - "L0.526[1762,2000] 27ns |-----------------------------------------L0.526-----------------------------------------|" - - "L0.564[1762,2000] 28ns |-----------------------------------------L0.564-----------------------------------------|" - - "L0.578[1762,2000] 29ns |-----------------------------------------L0.578-----------------------------------------|" - - "L0.592[1762,2000] 30ns |-----------------------------------------L0.592-----------------------------------------|" - - "L0.606[1762,2000] 31ns |-----------------------------------------L0.606-----------------------------------------|" - - "L0.620[1762,2000] 32ns |-----------------------------------------L0.620-----------------------------------------|" - - "L0.634[1762,2000] 33ns |-----------------------------------------L0.634-----------------------------------------|" - - "L0.648[1762,2000] 34ns |-----------------------------------------L0.648-----------------------------------------|" - - "L0.662[1762,2000] 35ns |-----------------------------------------L0.662-----------------------------------------|" - - "L0.676[1762,2000] 36ns |-----------------------------------------L0.676-----------------------------------------|" - - "L0.690[1762,2000] 37ns |-----------------------------------------L0.690-----------------------------------------|" - - "L0.704[1762,2000] 38ns |-----------------------------------------L0.704-----------------------------------------|" - - "L0.718[1762,2000] 39ns |-----------------------------------------L0.718-----------------------------------------|" + - "L0.1843[322,481] 120ns |----------------------------------------L0.1843-----------------------------------------|" + - "L0.1857[322,481] 121ns |----------------------------------------L0.1857-----------------------------------------|" + - "L0.1871[322,481] 122ns |----------------------------------------L0.1871-----------------------------------------|" + - "L0.1885[322,481] 123ns |----------------------------------------L0.1885-----------------------------------------|" + - "L0.1899[322,481] 124ns |----------------------------------------L0.1899-----------------------------------------|" + - "L0.1913[322,481] 125ns |----------------------------------------L0.1913-----------------------------------------|" + - "L0.1927[322,481] 126ns |----------------------------------------L0.1927-----------------------------------------|" + - "L0.1941[322,481] 127ns |----------------------------------------L0.1941-----------------------------------------|" + - "L0.1955[322,481] 128ns |----------------------------------------L0.1955-----------------------------------------|" + - "L0.1969[322,481] 129ns |----------------------------------------L0.1969-----------------------------------------|" + - "L0.1983[322,481] 130ns |----------------------------------------L0.1983-----------------------------------------|" + - "L0.1997[322,481] 131ns |----------------------------------------L0.1997-----------------------------------------|" + - "L0.2011[322,481] 132ns |----------------------------------------L0.2011-----------------------------------------|" + - "L0.2025[322,481] 133ns |----------------------------------------L0.2025-----------------------------------------|" + - "L0.2151[322,481] 134ns |----------------------------------------L0.2151-----------------------------------------|" + - "L0.2165[322,481] 135ns |----------------------------------------L0.2165-----------------------------------------|" + - "L0.2039[322,481] 136ns |----------------------------------------L0.2039-----------------------------------------|" + - "L0.2053[322,481] 137ns |----------------------------------------L0.2053-----------------------------------------|" + - "L0.2067[322,481] 138ns |----------------------------------------L0.2067-----------------------------------------|" + - "L0.2081[322,481] 139ns |----------------------------------------L0.2081-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1762,2000] 39ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[322,481] 139ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.428, L0.442, L0.456, L0.470, L0.484, L0.498, L0.512, L0.526, L0.564, L0.578, L0.592, L0.606, L0.620, L0.634, L0.648, L0.662, L0.676, L0.690, L0.704, L0.718" + - " Soft Deleting 20 files: L0.1843, L0.1857, L0.1871, L0.1885, L0.1899, L0.1913, L0.1927, L0.1941, L0.1955, L0.1969, L0.1983, L0.1997, L0.2011, L0.2025, L0.2039, L0.2053, L0.2067, L0.2081, L0.2151, L0.2165" - " Creating 1 files" - "**** Simulation run 226, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2934[2001,2086] 39ns |----------------------------------------L0.2934-----------------------------------------|" - - "L0.733[2001,2086] 40ns |-----------------------------------------L0.733-----------------------------------------|" - - "L0.747[2001,2086] 41ns |-----------------------------------------L0.747-----------------------------------------|" - - "L0.761[2001,2086] 42ns |-----------------------------------------L0.761-----------------------------------------|" - - "L0.775[2001,2086] 43ns |-----------------------------------------L0.775-----------------------------------------|" - - "L0.789[2001,2086] 44ns |-----------------------------------------L0.789-----------------------------------------|" - - "L0.803[2001,2086] 45ns |-----------------------------------------L0.803-----------------------------------------|" - - "L0.817[2001,2086] 46ns |-----------------------------------------L0.817-----------------------------------------|" - - "L0.831[2001,2086] 47ns |-----------------------------------------L0.831-----------------------------------------|" - - "L0.845[2001,2086] 48ns |-----------------------------------------L0.845-----------------------------------------|" - - "L0.859[2001,2086] 49ns |-----------------------------------------L0.859-----------------------------------------|" - - "L0.873[2001,2086] 50ns |-----------------------------------------L0.873-----------------------------------------|" - - "L0.887[2001,2086] 51ns |-----------------------------------------L0.887-----------------------------------------|" - - "L0.901[2001,2086] 52ns |-----------------------------------------L0.901-----------------------------------------|" - - "L0.915[2001,2086] 53ns |-----------------------------------------L0.915-----------------------------------------|" - - "L0.929[2001,2086] 54ns |-----------------------------------------L0.929-----------------------------------------|" - - "L0.943[2001,2086] 55ns |-----------------------------------------L0.943-----------------------------------------|" - - "L0.957[2001,2086] 56ns |-----------------------------------------L0.957-----------------------------------------|" - - "L0.971[2001,2086] 57ns |-----------------------------------------L0.971-----------------------------------------|" - - "L0.985[2001,2086] 58ns |-----------------------------------------L0.985-----------------------------------------|" + - "L0.2095[322,481] 140ns |----------------------------------------L0.2095-----------------------------------------|" + - "L0.2109[322,481] 141ns |----------------------------------------L0.2109-----------------------------------------|" + - "L0.2123[322,481] 142ns |----------------------------------------L0.2123-----------------------------------------|" + - "L0.2137[322,481] 143ns |----------------------------------------L0.2137-----------------------------------------|" + - "L0.2179[322,481] 144ns |----------------------------------------L0.2179-----------------------------------------|" + - "L0.2193[322,481] 145ns |----------------------------------------L0.2193-----------------------------------------|" + - "L0.2207[322,481] 146ns |----------------------------------------L0.2207-----------------------------------------|" + - "L0.2221[322,481] 147ns |----------------------------------------L0.2221-----------------------------------------|" + - "L0.2235[322,481] 148ns |----------------------------------------L0.2235-----------------------------------------|" + - "L0.2249[322,481] 149ns |----------------------------------------L0.2249-----------------------------------------|" + - "L0.2263[322,481] 150ns |----------------------------------------L0.2263-----------------------------------------|" + - "L0.2277[322,481] 151ns |----------------------------------------L0.2277-----------------------------------------|" + - "L0.2291[322,481] 152ns |----------------------------------------L0.2291-----------------------------------------|" + - "L0.2305[322,481] 153ns |----------------------------------------L0.2305-----------------------------------------|" + - "L0.2319[322,481] 154ns |----------------------------------------L0.2319-----------------------------------------|" + - "L0.2333[322,481] 155ns |----------------------------------------L0.2333-----------------------------------------|" + - "L0.2347[322,481] 156ns |----------------------------------------L0.2347-----------------------------------------|" + - "L0.2361[322,481] 157ns |----------------------------------------L0.2361-----------------------------------------|" + - "L0.2375[322,481] 158ns |----------------------------------------L0.2375-----------------------------------------|" + - "L0.2389[322,481] 159ns |----------------------------------------L0.2389-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[2001,2086] 58ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.733, L0.747, L0.761, L0.775, L0.789, L0.803, L0.817, L0.831, L0.845, L0.859, L0.873, L0.887, L0.901, L0.915, L0.929, L0.943, L0.957, L0.971, L0.985, L0.2934" - - " Creating 1 files" - - "**** Simulation run 227, type=compact(ManySmallFiles). 20 Input Files, 390b total:" - - "L0 " - - "L0.2935[2087,390000] 39ns 200b|-------------------------L0.2935--------------------------| " - - "L0.734[2087,400000] 40ns 10b|--------------------------L0.734---------------------------| " - - "L0.748[2087,410000] 41ns 10b|---------------------------L0.748----------------------------| " - - "L0.762[2087,420000] 42ns 10b|----------------------------L0.762-----------------------------| " - - "L0.776[2087,430000] 43ns 10b|-----------------------------L0.776-----------------------------| " - - "L0.790[2087,440000] 44ns 10b|------------------------------L0.790------------------------------| " - - "L0.804[2087,450000] 45ns 10b|------------------------------L0.804-------------------------------| " - - "L0.818[2087,460000] 46ns 10b|-------------------------------L0.818--------------------------------| " - - "L0.832[2087,470000] 47ns 10b|--------------------------------L0.832--------------------------------| " - - "L0.846[2087,480000] 48ns 10b|---------------------------------L0.846---------------------------------| " - - "L0.860[2087,490000] 49ns 10b|---------------------------------L0.860----------------------------------| " - - "L0.874[2087,500000] 50ns 10b|----------------------------------L0.874-----------------------------------| " - - "L0.888[2087,510000] 51ns 10b|-----------------------------------L0.888------------------------------------| " - - "L0.902[2087,520000] 52ns 10b|------------------------------------L0.902------------------------------------| " - - "L0.916[2087,530000] 53ns 10b|-------------------------------------L0.916-------------------------------------| " - - "L0.930[2087,540000] 54ns 10b|-------------------------------------L0.930--------------------------------------| " - - "L0.944[2087,550000] 55ns 10b|--------------------------------------L0.944---------------------------------------| " - - "L0.958[2087,560000] 56ns 10b|---------------------------------------L0.958---------------------------------------| " - - "L0.972[2087,570000] 57ns 10b|----------------------------------------L0.972----------------------------------------| " - - "L0.986[2087,580000] 58ns 10b|-----------------------------------------L0.986-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 390b total:" - - "L0, all files 390b " - - "L0.?[2087,580000] 58ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.734, L0.748, L0.762, L0.776, L0.790, L0.804, L0.818, L0.832, L0.846, L0.860, L0.874, L0.888, L0.902, L0.916, L0.930, L0.944, L0.958, L0.972, L0.986, L0.2935" + - "L0.?[322,481] 159ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.2095, L0.2109, L0.2123, L0.2137, L0.2179, L0.2193, L0.2207, L0.2221, L0.2235, L0.2249, L0.2263, L0.2277, L0.2291, L0.2305, L0.2319, L0.2333, L0.2347, L0.2361, L0.2375, L0.2389" - " Creating 1 files" - - "**** Simulation run 228, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 227, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2937[20,161] 39ns |----------------------------------------L0.2937-----------------------------------------|" - - "L0.721[40,161] 40ns |----------------------------------L0.721-----------------------------------| " - - "L0.735[41,161] 41ns |----------------------------------L0.735----------------------------------| " - - "L0.749[42,161] 42ns |---------------------------------L0.749----------------------------------| " - - "L0.763[43,161] 43ns |---------------------------------L0.763----------------------------------| " - - "L0.777[44,161] 44ns |---------------------------------L0.777---------------------------------| " - - "L0.791[45,161] 45ns |---------------------------------L0.791---------------------------------| " - - "L0.805[46,161] 46ns |--------------------------------L0.805---------------------------------| " - - "L0.819[47,161] 47ns |--------------------------------L0.819--------------------------------| " - - "L0.833[48,161] 48ns |--------------------------------L0.833--------------------------------| " - - "L0.847[49,161] 49ns |-------------------------------L0.847--------------------------------| " - - "L0.861[50,161] 50ns |-------------------------------L0.861-------------------------------| " - - "L0.875[51,161] 51ns |-------------------------------L0.875-------------------------------| " - - "L0.889[52,161] 52ns |------------------------------L0.889-------------------------------| " - - "L0.903[53,161] 53ns |------------------------------L0.903------------------------------| " - - "L0.917[54,161] 54ns |------------------------------L0.917------------------------------| " - - "L0.931[55,161] 55ns |-----------------------------L0.931------------------------------| " - - "L0.945[56,161] 56ns |-----------------------------L0.945------------------------------| " - - "L0.959[57,161] 57ns |-----------------------------L0.959-----------------------------| " - - "L0.973[58,161] 58ns |----------------------------L0.973-----------------------------| " + - "L0.2403[322,481] 160ns |----------------------------------------L0.2403-----------------------------------------|" + - "L0.2417[322,481] 161ns |----------------------------------------L0.2417-----------------------------------------|" + - "L0.2430[322,481] 162ns |----------------------------------------L0.2430-----------------------------------------|" + - "L0.2443[322,481] 163ns |----------------------------------------L0.2443-----------------------------------------|" + - "L0.2456[322,481] 164ns |----------------------------------------L0.2456-----------------------------------------|" + - "L0.2469[322,481] 165ns |----------------------------------------L0.2469-----------------------------------------|" + - "L0.2482[322,481] 166ns |----------------------------------------L0.2482-----------------------------------------|" + - "L0.2495[322,481] 167ns |----------------------------------------L0.2495-----------------------------------------|" + - "L0.2508[322,481] 168ns |----------------------------------------L0.2508-----------------------------------------|" + - "L0.2521[322,481] 169ns |----------------------------------------L0.2521-----------------------------------------|" + - "L0.2534[322,481] 170ns |----------------------------------------L0.2534-----------------------------------------|" + - "L0.2547[322,481] 171ns |----------------------------------------L0.2547-----------------------------------------|" + - "L0.2560[322,481] 172ns |----------------------------------------L0.2560-----------------------------------------|" + - "L0.2573[322,481] 173ns |----------------------------------------L0.2573-----------------------------------------|" + - "L0.2586[322,481] 174ns |----------------------------------------L0.2586-----------------------------------------|" + - "L0.2599[322,481] 175ns |----------------------------------------L0.2599-----------------------------------------|" + - "L0.2612[322,481] 176ns |----------------------------------------L0.2612-----------------------------------------|" + - "L0.2625[322,481] 177ns |----------------------------------------L0.2625-----------------------------------------|" + - "L0.2638[322,481] 178ns |----------------------------------------L0.2638-----------------------------------------|" + - "L0.2651[322,481] 179ns |----------------------------------------L0.2651-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[20,161] 58ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[322,481] 179ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.721, L0.735, L0.749, L0.763, L0.777, L0.791, L0.805, L0.819, L0.833, L0.847, L0.861, L0.875, L0.889, L0.903, L0.917, L0.931, L0.945, L0.959, L0.973, L0.2937" + - " Soft Deleting 20 files: L0.2403, L0.2417, L0.2430, L0.2443, L0.2456, L0.2469, L0.2482, L0.2495, L0.2508, L0.2521, L0.2534, L0.2547, L0.2560, L0.2573, L0.2586, L0.2599, L0.2612, L0.2625, L0.2638, L0.2651" - " Creating 1 files" - - "**** Simulation run 229, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 228, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2938[162,321] 39ns |----------------------------------------L0.2938-----------------------------------------|" - - "L0.722[162,321] 40ns |-----------------------------------------L0.722-----------------------------------------|" - - "L0.736[162,321] 41ns |-----------------------------------------L0.736-----------------------------------------|" - - "L0.750[162,321] 42ns |-----------------------------------------L0.750-----------------------------------------|" - - "L0.764[162,321] 43ns |-----------------------------------------L0.764-----------------------------------------|" - - "L0.778[162,321] 44ns |-----------------------------------------L0.778-----------------------------------------|" - - "L0.792[162,321] 45ns |-----------------------------------------L0.792-----------------------------------------|" - - "L0.806[162,321] 46ns |-----------------------------------------L0.806-----------------------------------------|" - - "L0.820[162,321] 47ns |-----------------------------------------L0.820-----------------------------------------|" - - "L0.834[162,321] 48ns |-----------------------------------------L0.834-----------------------------------------|" - - "L0.848[162,321] 49ns |-----------------------------------------L0.848-----------------------------------------|" - - "L0.862[162,321] 50ns |-----------------------------------------L0.862-----------------------------------------|" - - "L0.876[162,321] 51ns |-----------------------------------------L0.876-----------------------------------------|" - - "L0.890[162,321] 52ns |-----------------------------------------L0.890-----------------------------------------|" - - "L0.904[162,321] 53ns |-----------------------------------------L0.904-----------------------------------------|" - - "L0.918[162,321] 54ns |-----------------------------------------L0.918-----------------------------------------|" - - "L0.932[162,321] 55ns |-----------------------------------------L0.932-----------------------------------------|" - - "L0.946[162,321] 56ns |-----------------------------------------L0.946-----------------------------------------|" - - "L0.960[162,321] 57ns |-----------------------------------------L0.960-----------------------------------------|" - - "L0.974[162,321] 58ns |-----------------------------------------L0.974-----------------------------------------|" + - "L0.2664[322,481] 180ns |----------------------------------------L0.2664-----------------------------------------|" + - "L0.2677[322,481] 181ns |----------------------------------------L0.2677-----------------------------------------|" + - "L0.2690[322,481] 182ns |----------------------------------------L0.2690-----------------------------------------|" + - "L0.2703[322,481] 183ns |----------------------------------------L0.2703-----------------------------------------|" + - "L0.2716[322,481] 184ns |----------------------------------------L0.2716-----------------------------------------|" + - "L0.2729[322,481] 185ns |----------------------------------------L0.2729-----------------------------------------|" + - "L0.2742[322,481] 186ns |----------------------------------------L0.2742-----------------------------------------|" + - "L0.2755[322,481] 187ns |----------------------------------------L0.2755-----------------------------------------|" + - "L0.2768[322,481] 188ns |----------------------------------------L0.2768-----------------------------------------|" + - "L0.2781[322,481] 189ns |----------------------------------------L0.2781-----------------------------------------|" + - "L0.2794[322,481] 190ns |----------------------------------------L0.2794-----------------------------------------|" + - "L0.2807[322,481] 191ns |----------------------------------------L0.2807-----------------------------------------|" + - "L0.2820[322,481] 192ns |----------------------------------------L0.2820-----------------------------------------|" + - "L0.2833[322,481] 193ns |----------------------------------------L0.2833-----------------------------------------|" + - "L0.2846[322,481] 194ns |----------------------------------------L0.2846-----------------------------------------|" + - "L0.2859[322,481] 195ns |----------------------------------------L0.2859-----------------------------------------|" + - "L0.2872[322,481] 196ns |----------------------------------------L0.2872-----------------------------------------|" + - "L0.2885[322,481] 197ns |----------------------------------------L0.2885-----------------------------------------|" + - "L0.2898[322,481] 198ns |----------------------------------------L0.2898-----------------------------------------|" + - "L0.2911[322,481] 199ns |----------------------------------------L0.2911-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[162,321] 58ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[322,481] 199ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.2664, L0.2677, L0.2690, L0.2703, L0.2716, L0.2729, L0.2742, L0.2755, L0.2768, L0.2781, L0.2794, L0.2807, L0.2820, L0.2833, L0.2846, L0.2859, L0.2872, L0.2885, L0.2898, L0.2911" + - " Creating 1 files" + - "**** Simulation run 229, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" + - "L0, all files 8mb " + - "L0.204[482,641] 0ns |-----------------------------------------L0.204-----------------------------------------|" + - "L0.216[482,641] 1ns |-----------------------------------------L0.216-----------------------------------------|" + - "L0.228[482,641] 2ns |-----------------------------------------L0.228-----------------------------------------|" + - "L0.240[482,641] 3ns |-----------------------------------------L0.240-----------------------------------------|" + - "L0.252[482,641] 4ns |-----------------------------------------L0.252-----------------------------------------|" + - "L0.264[482,641] 5ns |-----------------------------------------L0.264-----------------------------------------|" + - "L0.276[482,641] 6ns |-----------------------------------------L0.276-----------------------------------------|" + - "L0.288[482,641] 7ns |-----------------------------------------L0.288-----------------------------------------|" + - "L0.300[482,641] 8ns |-----------------------------------------L0.300-----------------------------------------|" + - "L0.312[482,641] 9ns |-----------------------------------------L0.312-----------------------------------------|" + - "L0.324[482,641] 10ns |-----------------------------------------L0.324-----------------------------------------|" + - "L0.336[482,641] 11ns |-----------------------------------------L0.336-----------------------------------------|" + - "L0.348[482,641] 12ns |-----------------------------------------L0.348-----------------------------------------|" + - "L0.360[482,641] 13ns |-----------------------------------------L0.360-----------------------------------------|" + - "L0.372[482,641] 14ns |-----------------------------------------L0.372-----------------------------------------|" + - "L0.384[482,641] 15ns |-----------------------------------------L0.384-----------------------------------------|" + - "L0.396[482,641] 16ns |-----------------------------------------L0.396-----------------------------------------|" + - "L0.408[482,641] 17ns |-----------------------------------------L0.408-----------------------------------------|" + - "L0.532[482,641] 18ns |-----------------------------------------L0.532-----------------------------------------|" + - "L0.544[482,641] 19ns |-----------------------------------------L0.544-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" + - "L0, all files 160mb " + - "L0.?[482,641] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.722, L0.736, L0.750, L0.764, L0.778, L0.792, L0.806, L0.820, L0.834, L0.848, L0.862, L0.876, L0.890, L0.904, L0.918, L0.932, L0.946, L0.960, L0.974, L0.2938" + - " Soft Deleting 20 files: L0.204, L0.216, L0.228, L0.240, L0.252, L0.264, L0.276, L0.288, L0.300, L0.312, L0.324, L0.336, L0.348, L0.360, L0.372, L0.384, L0.396, L0.408, L0.532, L0.544" - " Creating 1 files" - "**** Simulation run 230, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2939[322,481] 39ns |----------------------------------------L0.2939-----------------------------------------|" - - "L0.723[322,481] 40ns |-----------------------------------------L0.723-----------------------------------------|" - - "L0.737[322,481] 41ns |-----------------------------------------L0.737-----------------------------------------|" - - "L0.751[322,481] 42ns |-----------------------------------------L0.751-----------------------------------------|" - - "L0.765[322,481] 43ns |-----------------------------------------L0.765-----------------------------------------|" - - "L0.779[322,481] 44ns |-----------------------------------------L0.779-----------------------------------------|" - - "L0.793[322,481] 45ns |-----------------------------------------L0.793-----------------------------------------|" - - "L0.807[322,481] 46ns |-----------------------------------------L0.807-----------------------------------------|" - - "L0.821[322,481] 47ns |-----------------------------------------L0.821-----------------------------------------|" - - "L0.835[322,481] 48ns |-----------------------------------------L0.835-----------------------------------------|" - - "L0.849[322,481] 49ns |-----------------------------------------L0.849-----------------------------------------|" - - "L0.863[322,481] 50ns |-----------------------------------------L0.863-----------------------------------------|" - - "L0.877[322,481] 51ns |-----------------------------------------L0.877-----------------------------------------|" - - "L0.891[322,481] 52ns |-----------------------------------------L0.891-----------------------------------------|" - - "L0.905[322,481] 53ns |-----------------------------------------L0.905-----------------------------------------|" - - "L0.919[322,481] 54ns |-----------------------------------------L0.919-----------------------------------------|" - - "L0.933[322,481] 55ns |-----------------------------------------L0.933-----------------------------------------|" - - "L0.947[322,481] 56ns |-----------------------------------------L0.947-----------------------------------------|" - - "L0.961[322,481] 57ns |-----------------------------------------L0.961-----------------------------------------|" - - "L0.975[322,481] 58ns |-----------------------------------------L0.975-----------------------------------------|" + - "L0.420[482,641] 20ns |-----------------------------------------L0.420-----------------------------------------|" + - "L0.434[482,641] 21ns |-----------------------------------------L0.434-----------------------------------------|" + - "L0.448[482,641] 22ns |-----------------------------------------L0.448-----------------------------------------|" + - "L0.462[482,641] 23ns |-----------------------------------------L0.462-----------------------------------------|" + - "L0.476[482,641] 24ns |-----------------------------------------L0.476-----------------------------------------|" + - "L0.490[482,641] 25ns |-----------------------------------------L0.490-----------------------------------------|" + - "L0.504[482,641] 26ns |-----------------------------------------L0.504-----------------------------------------|" + - "L0.518[482,641] 27ns |-----------------------------------------L0.518-----------------------------------------|" + - "L0.556[482,641] 28ns |-----------------------------------------L0.556-----------------------------------------|" + - "L0.570[482,641] 29ns |-----------------------------------------L0.570-----------------------------------------|" + - "L0.584[482,641] 30ns |-----------------------------------------L0.584-----------------------------------------|" + - "L0.598[482,641] 31ns |-----------------------------------------L0.598-----------------------------------------|" + - "L0.612[482,641] 32ns |-----------------------------------------L0.612-----------------------------------------|" + - "L0.626[482,641] 33ns |-----------------------------------------L0.626-----------------------------------------|" + - "L0.640[482,641] 34ns |-----------------------------------------L0.640-----------------------------------------|" + - "L0.654[482,641] 35ns |-----------------------------------------L0.654-----------------------------------------|" + - "L0.668[482,641] 36ns |-----------------------------------------L0.668-----------------------------------------|" + - "L0.682[482,641] 37ns |-----------------------------------------L0.682-----------------------------------------|" + - "L0.696[482,641] 38ns |-----------------------------------------L0.696-----------------------------------------|" + - "L0.710[482,641] 39ns |-----------------------------------------L0.710-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[322,481] 58ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[482,641] 39ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.723, L0.737, L0.751, L0.765, L0.779, L0.793, L0.807, L0.821, L0.835, L0.849, L0.863, L0.877, L0.891, L0.905, L0.919, L0.933, L0.947, L0.961, L0.975, L0.2939" + - " Soft Deleting 20 files: L0.420, L0.434, L0.448, L0.462, L0.476, L0.490, L0.504, L0.518, L0.556, L0.570, L0.584, L0.598, L0.612, L0.626, L0.640, L0.654, L0.668, L0.682, L0.696, L0.710" - " Creating 1 files" - "**** Simulation run 231, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2940[482,641] 39ns |----------------------------------------L0.2940-----------------------------------------|" - "L0.724[482,641] 40ns |-----------------------------------------L0.724-----------------------------------------|" - "L0.738[482,641] 41ns |-----------------------------------------L0.738-----------------------------------------|" - "L0.752[482,641] 42ns |-----------------------------------------L0.752-----------------------------------------|" @@ -6346,408 +6327,295 @@ async fn stuck_l0_large_l0s() { - "L0.948[482,641] 56ns |-----------------------------------------L0.948-----------------------------------------|" - "L0.962[482,641] 57ns |-----------------------------------------L0.962-----------------------------------------|" - "L0.976[482,641] 58ns |-----------------------------------------L0.976-----------------------------------------|" + - "L0.990[482,641] 59ns |-----------------------------------------L0.990-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[482,641] 58ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[482,641] 59ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.724, L0.738, L0.752, L0.766, L0.780, L0.794, L0.808, L0.822, L0.836, L0.850, L0.864, L0.878, L0.892, L0.906, L0.920, L0.934, L0.948, L0.962, L0.976, L0.2940" + - " Soft Deleting 20 files: L0.724, L0.738, L0.752, L0.766, L0.780, L0.794, L0.808, L0.822, L0.836, L0.850, L0.864, L0.878, L0.892, L0.906, L0.920, L0.934, L0.948, L0.962, L0.976, L0.990" - " Creating 1 files" - "**** Simulation run 232, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2941[642,801] 39ns |----------------------------------------L0.2941-----------------------------------------|" - - "L0.725[642,801] 40ns |-----------------------------------------L0.725-----------------------------------------|" - - "L0.739[642,801] 41ns |-----------------------------------------L0.739-----------------------------------------|" - - "L0.753[642,801] 42ns |-----------------------------------------L0.753-----------------------------------------|" - - "L0.767[642,801] 43ns |-----------------------------------------L0.767-----------------------------------------|" - - "L0.781[642,801] 44ns |-----------------------------------------L0.781-----------------------------------------|" - - "L0.795[642,801] 45ns |-----------------------------------------L0.795-----------------------------------------|" - - "L0.809[642,801] 46ns |-----------------------------------------L0.809-----------------------------------------|" - - "L0.823[642,801] 47ns |-----------------------------------------L0.823-----------------------------------------|" - - "L0.837[642,801] 48ns |-----------------------------------------L0.837-----------------------------------------|" - - "L0.851[642,801] 49ns |-----------------------------------------L0.851-----------------------------------------|" - - "L0.865[642,801] 50ns |-----------------------------------------L0.865-----------------------------------------|" - - "L0.879[642,801] 51ns |-----------------------------------------L0.879-----------------------------------------|" - - "L0.893[642,801] 52ns |-----------------------------------------L0.893-----------------------------------------|" - - "L0.907[642,801] 53ns |-----------------------------------------L0.907-----------------------------------------|" - - "L0.921[642,801] 54ns |-----------------------------------------L0.921-----------------------------------------|" - - "L0.935[642,801] 55ns |-----------------------------------------L0.935-----------------------------------------|" - - "L0.949[642,801] 56ns |-----------------------------------------L0.949-----------------------------------------|" - - "L0.963[642,801] 57ns |-----------------------------------------L0.963-----------------------------------------|" - - "L0.977[642,801] 58ns |-----------------------------------------L0.977-----------------------------------------|" + - "L0.1004[482,641] 60ns |----------------------------------------L0.1004-----------------------------------------|" + - "L0.1018[482,641] 61ns |----------------------------------------L0.1018-----------------------------------------|" + - "L0.1032[482,641] 62ns |----------------------------------------L0.1032-----------------------------------------|" + - "L0.1046[482,641] 63ns |----------------------------------------L0.1046-----------------------------------------|" + - "L0.1060[482,641] 64ns |----------------------------------------L0.1060-----------------------------------------|" + - "L0.1074[482,641] 65ns |----------------------------------------L0.1074-----------------------------------------|" + - "L0.1088[482,641] 66ns |----------------------------------------L0.1088-----------------------------------------|" + - "L0.1102[482,641] 67ns |----------------------------------------L0.1102-----------------------------------------|" + - "L0.1116[482,641] 68ns |----------------------------------------L0.1116-----------------------------------------|" + - "L0.1130[482,641] 69ns |----------------------------------------L0.1130-----------------------------------------|" + - "L0.1144[482,641] 70ns |----------------------------------------L0.1144-----------------------------------------|" + - "L0.1158[482,641] 71ns |----------------------------------------L0.1158-----------------------------------------|" + - "L0.1172[482,641] 72ns |----------------------------------------L0.1172-----------------------------------------|" + - "L0.1186[482,641] 73ns |----------------------------------------L0.1186-----------------------------------------|" + - "L0.1200[482,641] 74ns |----------------------------------------L0.1200-----------------------------------------|" + - "L0.1214[482,641] 75ns |----------------------------------------L0.1214-----------------------------------------|" + - "L0.1228[482,641] 76ns |----------------------------------------L0.1228-----------------------------------------|" + - "L0.1242[482,641] 77ns |----------------------------------------L0.1242-----------------------------------------|" + - "L0.1256[482,641] 78ns |----------------------------------------L0.1256-----------------------------------------|" + - "L0.1270[482,641] 79ns |----------------------------------------L0.1270-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[642,801] 58ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[482,641] 79ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.725, L0.739, L0.753, L0.767, L0.781, L0.795, L0.809, L0.823, L0.837, L0.851, L0.865, L0.879, L0.893, L0.907, L0.921, L0.935, L0.949, L0.963, L0.977, L0.2941" + - " Soft Deleting 20 files: L0.1004, L0.1018, L0.1032, L0.1046, L0.1060, L0.1074, L0.1088, L0.1102, L0.1116, L0.1130, L0.1144, L0.1158, L0.1172, L0.1186, L0.1200, L0.1214, L0.1228, L0.1242, L0.1256, L0.1270" - " Creating 1 files" - "**** Simulation run 233, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2942[802,961] 39ns |----------------------------------------L0.2942-----------------------------------------|" - - "L0.726[802,961] 40ns |-----------------------------------------L0.726-----------------------------------------|" - - "L0.740[802,961] 41ns |-----------------------------------------L0.740-----------------------------------------|" - - "L0.754[802,961] 42ns |-----------------------------------------L0.754-----------------------------------------|" - - "L0.768[802,961] 43ns |-----------------------------------------L0.768-----------------------------------------|" - - "L0.782[802,961] 44ns |-----------------------------------------L0.782-----------------------------------------|" - - "L0.796[802,961] 45ns |-----------------------------------------L0.796-----------------------------------------|" - - "L0.810[802,961] 46ns |-----------------------------------------L0.810-----------------------------------------|" - - "L0.824[802,961] 47ns |-----------------------------------------L0.824-----------------------------------------|" - - "L0.838[802,961] 48ns |-----------------------------------------L0.838-----------------------------------------|" - - "L0.852[802,961] 49ns |-----------------------------------------L0.852-----------------------------------------|" - - "L0.866[802,961] 50ns |-----------------------------------------L0.866-----------------------------------------|" - - "L0.880[802,961] 51ns |-----------------------------------------L0.880-----------------------------------------|" - - "L0.894[802,961] 52ns |-----------------------------------------L0.894-----------------------------------------|" - - "L0.908[802,961] 53ns |-----------------------------------------L0.908-----------------------------------------|" - - "L0.922[802,961] 54ns |-----------------------------------------L0.922-----------------------------------------|" - - "L0.936[802,961] 55ns |-----------------------------------------L0.936-----------------------------------------|" - - "L0.950[802,961] 56ns |-----------------------------------------L0.950-----------------------------------------|" - - "L0.964[802,961] 57ns |-----------------------------------------L0.964-----------------------------------------|" - - "L0.978[802,961] 58ns |-----------------------------------------L0.978-----------------------------------------|" + - "L0.1284[482,641] 80ns |----------------------------------------L0.1284-----------------------------------------|" + - "L0.1298[482,641] 81ns |----------------------------------------L0.1298-----------------------------------------|" + - "L0.1312[482,641] 82ns |----------------------------------------L0.1312-----------------------------------------|" + - "L0.1326[482,641] 83ns |----------------------------------------L0.1326-----------------------------------------|" + - "L0.1340[482,641] 84ns |----------------------------------------L0.1340-----------------------------------------|" + - "L0.1354[482,641] 85ns |----------------------------------------L0.1354-----------------------------------------|" + - "L0.1368[482,641] 86ns |----------------------------------------L0.1368-----------------------------------------|" + - "L0.1382[482,641] 87ns |----------------------------------------L0.1382-----------------------------------------|" + - "L0.1396[482,641] 88ns |----------------------------------------L0.1396-----------------------------------------|" + - "L0.1410[482,641] 89ns |----------------------------------------L0.1410-----------------------------------------|" + - "L0.1424[482,641] 90ns |----------------------------------------L0.1424-----------------------------------------|" + - "L0.1438[482,641] 91ns |----------------------------------------L0.1438-----------------------------------------|" + - "L0.1452[482,641] 92ns |----------------------------------------L0.1452-----------------------------------------|" + - "L0.1466[482,641] 93ns |----------------------------------------L0.1466-----------------------------------------|" + - "L0.1480[482,641] 94ns |----------------------------------------L0.1480-----------------------------------------|" + - "L0.1494[482,641] 95ns |----------------------------------------L0.1494-----------------------------------------|" + - "L0.1508[482,641] 96ns |----------------------------------------L0.1508-----------------------------------------|" + - "L0.1522[482,641] 97ns |----------------------------------------L0.1522-----------------------------------------|" + - "L0.1536[482,641] 98ns |----------------------------------------L0.1536-----------------------------------------|" + - "L0.1550[482,641] 99ns |----------------------------------------L0.1550-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[802,961] 58ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[482,641] 99ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.726, L0.740, L0.754, L0.768, L0.782, L0.796, L0.810, L0.824, L0.838, L0.852, L0.866, L0.880, L0.894, L0.908, L0.922, L0.936, L0.950, L0.964, L0.978, L0.2942" + - " Soft Deleting 20 files: L0.1284, L0.1298, L0.1312, L0.1326, L0.1340, L0.1354, L0.1368, L0.1382, L0.1396, L0.1410, L0.1424, L0.1438, L0.1452, L0.1466, L0.1480, L0.1494, L0.1508, L0.1522, L0.1536, L0.1550" - " Creating 1 files" - "**** Simulation run 234, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2943[962,1121] 39ns |----------------------------------------L0.2943-----------------------------------------|" - - "L0.727[962,1121] 40ns |-----------------------------------------L0.727-----------------------------------------|" - - "L0.741[962,1121] 41ns |-----------------------------------------L0.741-----------------------------------------|" - - "L0.755[962,1121] 42ns |-----------------------------------------L0.755-----------------------------------------|" - - "L0.769[962,1121] 43ns |-----------------------------------------L0.769-----------------------------------------|" - - "L0.783[962,1121] 44ns |-----------------------------------------L0.783-----------------------------------------|" - - "L0.797[962,1121] 45ns |-----------------------------------------L0.797-----------------------------------------|" - - "L0.811[962,1121] 46ns |-----------------------------------------L0.811-----------------------------------------|" - - "L0.825[962,1121] 47ns |-----------------------------------------L0.825-----------------------------------------|" - - "L0.839[962,1121] 48ns |-----------------------------------------L0.839-----------------------------------------|" - - "L0.853[962,1121] 49ns |-----------------------------------------L0.853-----------------------------------------|" - - "L0.867[962,1121] 50ns |-----------------------------------------L0.867-----------------------------------------|" - - "L0.881[962,1121] 51ns |-----------------------------------------L0.881-----------------------------------------|" - - "L0.895[962,1121] 52ns |-----------------------------------------L0.895-----------------------------------------|" - - "L0.909[962,1121] 53ns |-----------------------------------------L0.909-----------------------------------------|" - - "L0.923[962,1121] 54ns |-----------------------------------------L0.923-----------------------------------------|" - - "L0.937[962,1121] 55ns |-----------------------------------------L0.937-----------------------------------------|" - - "L0.951[962,1121] 56ns |-----------------------------------------L0.951-----------------------------------------|" - - "L0.965[962,1121] 57ns |-----------------------------------------L0.965-----------------------------------------|" - - "L0.979[962,1121] 58ns |-----------------------------------------L0.979-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[962,1121] 58ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.727, L0.741, L0.755, L0.769, L0.783, L0.797, L0.811, L0.825, L0.839, L0.853, L0.867, L0.881, L0.895, L0.909, L0.923, L0.937, L0.951, L0.965, L0.979, L0.2943" - - " Creating 1 files" - - "**** Simulation run 235, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.2944[1122,1281] 39ns |----------------------------------------L0.2944-----------------------------------------|" - - "L0.728[1122,1281] 40ns |-----------------------------------------L0.728-----------------------------------------|" - - "L0.742[1122,1281] 41ns |-----------------------------------------L0.742-----------------------------------------|" - - "L0.756[1122,1281] 42ns |-----------------------------------------L0.756-----------------------------------------|" - - "L0.770[1122,1281] 43ns |-----------------------------------------L0.770-----------------------------------------|" - - "L0.784[1122,1281] 44ns |-----------------------------------------L0.784-----------------------------------------|" - - "L0.798[1122,1281] 45ns |-----------------------------------------L0.798-----------------------------------------|" - - "L0.812[1122,1281] 46ns |-----------------------------------------L0.812-----------------------------------------|" - - "L0.826[1122,1281] 47ns |-----------------------------------------L0.826-----------------------------------------|" - - "L0.840[1122,1281] 48ns |-----------------------------------------L0.840-----------------------------------------|" - - "L0.854[1122,1281] 49ns |-----------------------------------------L0.854-----------------------------------------|" - - "L0.868[1122,1281] 50ns |-----------------------------------------L0.868-----------------------------------------|" - - "L0.882[1122,1281] 51ns |-----------------------------------------L0.882-----------------------------------------|" - - "L0.896[1122,1281] 52ns |-----------------------------------------L0.896-----------------------------------------|" - - "L0.910[1122,1281] 53ns |-----------------------------------------L0.910-----------------------------------------|" - - "L0.924[1122,1281] 54ns |-----------------------------------------L0.924-----------------------------------------|" - - "L0.938[1122,1281] 55ns |-----------------------------------------L0.938-----------------------------------------|" - - "L0.952[1122,1281] 56ns |-----------------------------------------L0.952-----------------------------------------|" - - "L0.966[1122,1281] 57ns |-----------------------------------------L0.966-----------------------------------------|" - - "L0.980[1122,1281] 58ns |-----------------------------------------L0.980-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[1122,1281] 58ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.728, L0.742, L0.756, L0.770, L0.784, L0.798, L0.812, L0.826, L0.840, L0.854, L0.868, L0.882, L0.896, L0.910, L0.924, L0.938, L0.952, L0.966, L0.980, L0.2944" - - " Creating 1 files" - - "**** Simulation run 236, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.2945[1282,1441] 39ns |----------------------------------------L0.2945-----------------------------------------|" - - "L0.729[1282,1441] 40ns |-----------------------------------------L0.729-----------------------------------------|" - - "L0.743[1282,1441] 41ns |-----------------------------------------L0.743-----------------------------------------|" - - "L0.757[1282,1441] 42ns |-----------------------------------------L0.757-----------------------------------------|" - - "L0.771[1282,1441] 43ns |-----------------------------------------L0.771-----------------------------------------|" - - "L0.785[1282,1441] 44ns |-----------------------------------------L0.785-----------------------------------------|" - - "L0.799[1282,1441] 45ns |-----------------------------------------L0.799-----------------------------------------|" - - "L0.813[1282,1441] 46ns |-----------------------------------------L0.813-----------------------------------------|" - - "L0.827[1282,1441] 47ns |-----------------------------------------L0.827-----------------------------------------|" - - "L0.841[1282,1441] 48ns |-----------------------------------------L0.841-----------------------------------------|" - - "L0.855[1282,1441] 49ns |-----------------------------------------L0.855-----------------------------------------|" - - "L0.869[1282,1441] 50ns |-----------------------------------------L0.869-----------------------------------------|" - - "L0.883[1282,1441] 51ns |-----------------------------------------L0.883-----------------------------------------|" - - "L0.897[1282,1441] 52ns |-----------------------------------------L0.897-----------------------------------------|" - - "L0.911[1282,1441] 53ns |-----------------------------------------L0.911-----------------------------------------|" - - "L0.925[1282,1441] 54ns |-----------------------------------------L0.925-----------------------------------------|" - - "L0.939[1282,1441] 55ns |-----------------------------------------L0.939-----------------------------------------|" - - "L0.953[1282,1441] 56ns |-----------------------------------------L0.953-----------------------------------------|" - - "L0.967[1282,1441] 57ns |-----------------------------------------L0.967-----------------------------------------|" - - "L0.981[1282,1441] 58ns |-----------------------------------------L0.981-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[1282,1441] 58ns |------------------------------------------L0.?------------------------------------------|" - - "**** Simulation run 237, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.2947[1602,1761] 39ns |----------------------------------------L0.2947-----------------------------------------|" - - "L0.731[1602,1761] 40ns |-----------------------------------------L0.731-----------------------------------------|" - - "L0.745[1602,1761] 41ns |-----------------------------------------L0.745-----------------------------------------|" - - "L0.759[1602,1761] 42ns |-----------------------------------------L0.759-----------------------------------------|" - - "L0.773[1602,1761] 43ns |-----------------------------------------L0.773-----------------------------------------|" - - "L0.787[1602,1761] 44ns |-----------------------------------------L0.787-----------------------------------------|" - - "L0.801[1602,1761] 45ns |-----------------------------------------L0.801-----------------------------------------|" - - "L0.815[1602,1761] 46ns |-----------------------------------------L0.815-----------------------------------------|" - - "L0.829[1602,1761] 47ns |-----------------------------------------L0.829-----------------------------------------|" - - "L0.843[1602,1761] 48ns |-----------------------------------------L0.843-----------------------------------------|" - - "L0.857[1602,1761] 49ns |-----------------------------------------L0.857-----------------------------------------|" - - "L0.871[1602,1761] 50ns |-----------------------------------------L0.871-----------------------------------------|" - - "L0.885[1602,1761] 51ns |-----------------------------------------L0.885-----------------------------------------|" - - "L0.899[1602,1761] 52ns |-----------------------------------------L0.899-----------------------------------------|" - - "L0.913[1602,1761] 53ns |-----------------------------------------L0.913-----------------------------------------|" - - "L0.927[1602,1761] 54ns |-----------------------------------------L0.927-----------------------------------------|" - - "L0.941[1602,1761] 55ns |-----------------------------------------L0.941-----------------------------------------|" - - "L0.955[1602,1761] 56ns |-----------------------------------------L0.955-----------------------------------------|" - - "L0.969[1602,1761] 57ns |-----------------------------------------L0.969-----------------------------------------|" - - "L0.983[1602,1761] 58ns |-----------------------------------------L0.983-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[1602,1761] 58ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.731, L0.745, L0.759, L0.773, L0.787, L0.801, L0.815, L0.829, L0.843, L0.857, L0.871, L0.885, L0.899, L0.913, L0.927, L0.941, L0.955, L0.969, L0.983, L0.2947" - - " Creating 1 files" - - "**** Simulation run 238, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.2948[1762,2000] 39ns |----------------------------------------L0.2948-----------------------------------------|" - - "L0.732[1762,2000] 40ns |-----------------------------------------L0.732-----------------------------------------|" - - "L0.746[1762,2000] 41ns |-----------------------------------------L0.746-----------------------------------------|" - - "L0.760[1762,2000] 42ns |-----------------------------------------L0.760-----------------------------------------|" - - "L0.774[1762,2000] 43ns |-----------------------------------------L0.774-----------------------------------------|" - - "L0.788[1762,2000] 44ns |-----------------------------------------L0.788-----------------------------------------|" - - "L0.802[1762,2000] 45ns |-----------------------------------------L0.802-----------------------------------------|" - - "L0.816[1762,2000] 46ns |-----------------------------------------L0.816-----------------------------------------|" - - "L0.830[1762,2000] 47ns |-----------------------------------------L0.830-----------------------------------------|" - - "L0.844[1762,2000] 48ns |-----------------------------------------L0.844-----------------------------------------|" - - "L0.858[1762,2000] 49ns |-----------------------------------------L0.858-----------------------------------------|" - - "L0.872[1762,2000] 50ns |-----------------------------------------L0.872-----------------------------------------|" - - "L0.886[1762,2000] 51ns |-----------------------------------------L0.886-----------------------------------------|" - - "L0.900[1762,2000] 52ns |-----------------------------------------L0.900-----------------------------------------|" - - "L0.914[1762,2000] 53ns |-----------------------------------------L0.914-----------------------------------------|" - - "L0.928[1762,2000] 54ns |-----------------------------------------L0.928-----------------------------------------|" - - "L0.942[1762,2000] 55ns |-----------------------------------------L0.942-----------------------------------------|" - - "L0.956[1762,2000] 56ns |-----------------------------------------L0.956-----------------------------------------|" - - "L0.970[1762,2000] 57ns |-----------------------------------------L0.970-----------------------------------------|" - - "L0.984[1762,2000] 58ns |-----------------------------------------L0.984-----------------------------------------|" + - "L0.1564[482,641] 100ns |----------------------------------------L0.1564-----------------------------------------|" + - "L0.1578[482,641] 101ns |----------------------------------------L0.1578-----------------------------------------|" + - "L0.1592[482,641] 102ns |----------------------------------------L0.1592-----------------------------------------|" + - "L0.1606[482,641] 103ns |----------------------------------------L0.1606-----------------------------------------|" + - "L0.1620[482,641] 104ns |----------------------------------------L0.1620-----------------------------------------|" + - "L0.1634[482,641] 105ns |----------------------------------------L0.1634-----------------------------------------|" + - "L0.1648[482,641] 106ns |----------------------------------------L0.1648-----------------------------------------|" + - "L0.1662[482,641] 107ns |----------------------------------------L0.1662-----------------------------------------|" + - "L0.1676[482,641] 108ns |----------------------------------------L0.1676-----------------------------------------|" + - "L0.1690[482,641] 109ns |----------------------------------------L0.1690-----------------------------------------|" + - "L0.1704[482,641] 110ns |----------------------------------------L0.1704-----------------------------------------|" + - "L0.1718[482,641] 111ns |----------------------------------------L0.1718-----------------------------------------|" + - "L0.1732[482,641] 112ns |----------------------------------------L0.1732-----------------------------------------|" + - "L0.1746[482,641] 113ns |----------------------------------------L0.1746-----------------------------------------|" + - "L0.1760[482,641] 114ns |----------------------------------------L0.1760-----------------------------------------|" + - "L0.1774[482,641] 115ns |----------------------------------------L0.1774-----------------------------------------|" + - "L0.1788[482,641] 116ns |----------------------------------------L0.1788-----------------------------------------|" + - "L0.1802[482,641] 117ns |----------------------------------------L0.1802-----------------------------------------|" + - "L0.1816[482,641] 118ns |----------------------------------------L0.1816-----------------------------------------|" + - "L0.1830[482,641] 119ns |----------------------------------------L0.1830-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1762,2000] 58ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[482,641] 119ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.732, L0.746, L0.760, L0.774, L0.788, L0.802, L0.816, L0.830, L0.844, L0.858, L0.872, L0.886, L0.900, L0.914, L0.928, L0.942, L0.956, L0.970, L0.984, L0.2948" + - " Soft Deleting 20 files: L0.1564, L0.1578, L0.1592, L0.1606, L0.1620, L0.1634, L0.1648, L0.1662, L0.1676, L0.1690, L0.1704, L0.1718, L0.1732, L0.1746, L0.1760, L0.1774, L0.1788, L0.1802, L0.1816, L0.1830" - " Creating 1 files" - - "**** Simulation run 239, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 235, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2949[2001,2086] 58ns |----------------------------------------L0.2949-----------------------------------------|" - - "L0.999[2001,2086] 59ns |-----------------------------------------L0.999-----------------------------------------|" - - "L0.1013[2001,2086] 60ns |----------------------------------------L0.1013-----------------------------------------|" - - "L0.1027[2001,2086] 61ns |----------------------------------------L0.1027-----------------------------------------|" - - "L0.1041[2001,2086] 62ns |----------------------------------------L0.1041-----------------------------------------|" - - "L0.1055[2001,2086] 63ns |----------------------------------------L0.1055-----------------------------------------|" - - "L0.1069[2001,2086] 64ns |----------------------------------------L0.1069-----------------------------------------|" - - "L0.1083[2001,2086] 65ns |----------------------------------------L0.1083-----------------------------------------|" - - "L0.1097[2001,2086] 66ns |----------------------------------------L0.1097-----------------------------------------|" - - "L0.1111[2001,2086] 67ns |----------------------------------------L0.1111-----------------------------------------|" - - "L0.1125[2001,2086] 68ns |----------------------------------------L0.1125-----------------------------------------|" - - "L0.1139[2001,2086] 69ns |----------------------------------------L0.1139-----------------------------------------|" - - "L0.1153[2001,2086] 70ns |----------------------------------------L0.1153-----------------------------------------|" - - "L0.1167[2001,2086] 71ns |----------------------------------------L0.1167-----------------------------------------|" - - "L0.1181[2001,2086] 72ns |----------------------------------------L0.1181-----------------------------------------|" - - "L0.1195[2001,2086] 73ns |----------------------------------------L0.1195-----------------------------------------|" - - "L0.1209[2001,2086] 74ns |----------------------------------------L0.1209-----------------------------------------|" - - "L0.1223[2001,2086] 75ns |----------------------------------------L0.1223-----------------------------------------|" - - "L0.1237[2001,2086] 76ns |----------------------------------------L0.1237-----------------------------------------|" - - "L0.1251[2001,2086] 77ns |----------------------------------------L0.1251-----------------------------------------|" + - "L0.1844[482,641] 120ns |----------------------------------------L0.1844-----------------------------------------|" + - "L0.1858[482,641] 121ns |----------------------------------------L0.1858-----------------------------------------|" + - "L0.1872[482,641] 122ns |----------------------------------------L0.1872-----------------------------------------|" + - "L0.1886[482,641] 123ns |----------------------------------------L0.1886-----------------------------------------|" + - "L0.1900[482,641] 124ns |----------------------------------------L0.1900-----------------------------------------|" + - "L0.1914[482,641] 125ns |----------------------------------------L0.1914-----------------------------------------|" + - "L0.1928[482,641] 126ns |----------------------------------------L0.1928-----------------------------------------|" + - "L0.1942[482,641] 127ns |----------------------------------------L0.1942-----------------------------------------|" + - "L0.1956[482,641] 128ns |----------------------------------------L0.1956-----------------------------------------|" + - "L0.1970[482,641] 129ns |----------------------------------------L0.1970-----------------------------------------|" + - "L0.1984[482,641] 130ns |----------------------------------------L0.1984-----------------------------------------|" + - "L0.1998[482,641] 131ns |----------------------------------------L0.1998-----------------------------------------|" + - "L0.2012[482,641] 132ns |----------------------------------------L0.2012-----------------------------------------|" + - "L0.2026[482,641] 133ns |----------------------------------------L0.2026-----------------------------------------|" + - "L0.2152[482,641] 134ns |----------------------------------------L0.2152-----------------------------------------|" + - "L0.2166[482,641] 135ns |----------------------------------------L0.2166-----------------------------------------|" + - "L0.2040[482,641] 136ns |----------------------------------------L0.2040-----------------------------------------|" + - "L0.2054[482,641] 137ns |----------------------------------------L0.2054-----------------------------------------|" + - "L0.2068[482,641] 138ns |----------------------------------------L0.2068-----------------------------------------|" + - "L0.2082[482,641] 139ns |----------------------------------------L0.2082-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[2001,2086] 77ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.999, L0.1013, L0.1027, L0.1041, L0.1055, L0.1069, L0.1083, L0.1097, L0.1111, L0.1125, L0.1139, L0.1153, L0.1167, L0.1181, L0.1195, L0.1209, L0.1223, L0.1237, L0.1251, L0.2949" - - " Creating 1 files" - - "**** Simulation run 240, type=compact(ManySmallFiles). 20 Input Files, 580b total:" - - "L0 " - - "L0.2950[2087,580000] 58ns 390b|-----------------------------L0.2950-----------------------------| " - - "L0.1000[2087,590000] 59ns 10b|-----------------------------L0.1000------------------------------| " - - "L0.1014[2087,600000] 60ns 10b|------------------------------L0.1014-------------------------------| " - - "L0.1028[2087,610000] 61ns 10b|-------------------------------L0.1028-------------------------------| " - - "L0.1042[2087,620000] 62ns 10b|-------------------------------L0.1042--------------------------------| " - - "L0.1056[2087,630000] 63ns 10b|--------------------------------L0.1056--------------------------------| " - - "L0.1070[2087,640000] 64ns 10b|--------------------------------L0.1070---------------------------------| " - - "L0.1084[2087,650000] 65ns 10b|---------------------------------L0.1084---------------------------------| " - - "L0.1098[2087,660000] 66ns 10b|----------------------------------L0.1098----------------------------------| " - - "L0.1112[2087,670000] 67ns 10b|----------------------------------L0.1112-----------------------------------| " - - "L0.1126[2087,680000] 68ns 10b|-----------------------------------L0.1126-----------------------------------| " - - "L0.1140[2087,690000] 69ns 10b|-----------------------------------L0.1140------------------------------------| " - - "L0.1154[2087,700000] 70ns 10b|------------------------------------L0.1154------------------------------------| " - - "L0.1168[2087,710000] 71ns 10b|------------------------------------L0.1168-------------------------------------| " - - "L0.1182[2087,720000] 72ns 10b|-------------------------------------L0.1182--------------------------------------| " - - "L0.1196[2087,730000] 73ns 10b|--------------------------------------L0.1196--------------------------------------| " - - "L0.1210[2087,740000] 74ns 10b|--------------------------------------L0.1210---------------------------------------| " - - "L0.1224[2087,750000] 75ns 10b|---------------------------------------L0.1224---------------------------------------| " - - "L0.1238[2087,760000] 76ns 10b|---------------------------------------L0.1238----------------------------------------| " - - "L0.1252[2087,770000] 77ns 10b|----------------------------------------L0.1252-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 580b total:" - - "L0, all files 580b " - - "L0.?[2087,770000] 77ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1000, L0.1014, L0.1028, L0.1042, L0.1056, L0.1070, L0.1084, L0.1098, L0.1112, L0.1126, L0.1140, L0.1154, L0.1168, L0.1182, L0.1196, L0.1210, L0.1224, L0.1238, L0.1252, L0.2950" - - " Creating 1 files" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.729, L0.743, L0.757, L0.771, L0.785, L0.799, L0.813, L0.827, L0.841, L0.855, L0.869, L0.883, L0.897, L0.911, L0.925, L0.939, L0.953, L0.967, L0.981, L0.2945" + - "L0.?[482,641] 139ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1844, L0.1858, L0.1872, L0.1886, L0.1900, L0.1914, L0.1928, L0.1942, L0.1956, L0.1970, L0.1984, L0.1998, L0.2012, L0.2026, L0.2040, L0.2054, L0.2068, L0.2082, L0.2152, L0.2166" - " Creating 1 files" - - "**** Simulation run 241, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 236, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2946[1442,1601] 39ns |----------------------------------------L0.2946-----------------------------------------|" - - "L0.730[1442,1601] 40ns |-----------------------------------------L0.730-----------------------------------------|" - - "L0.744[1442,1601] 41ns |-----------------------------------------L0.744-----------------------------------------|" - - "L0.758[1442,1601] 42ns |-----------------------------------------L0.758-----------------------------------------|" - - "L0.772[1442,1601] 43ns |-----------------------------------------L0.772-----------------------------------------|" - - "L0.786[1442,1601] 44ns |-----------------------------------------L0.786-----------------------------------------|" - - "L0.800[1442,1601] 45ns |-----------------------------------------L0.800-----------------------------------------|" - - "L0.814[1442,1601] 46ns |-----------------------------------------L0.814-----------------------------------------|" - - "L0.828[1442,1601] 47ns |-----------------------------------------L0.828-----------------------------------------|" - - "L0.842[1442,1601] 48ns |-----------------------------------------L0.842-----------------------------------------|" - - "L0.856[1442,1601] 49ns |-----------------------------------------L0.856-----------------------------------------|" - - "L0.870[1442,1601] 50ns |-----------------------------------------L0.870-----------------------------------------|" - - "L0.884[1442,1601] 51ns |-----------------------------------------L0.884-----------------------------------------|" - - "L0.898[1442,1601] 52ns |-----------------------------------------L0.898-----------------------------------------|" - - "L0.912[1442,1601] 53ns |-----------------------------------------L0.912-----------------------------------------|" - - "L0.926[1442,1601] 54ns |-----------------------------------------L0.926-----------------------------------------|" - - "L0.940[1442,1601] 55ns |-----------------------------------------L0.940-----------------------------------------|" - - "L0.954[1442,1601] 56ns |-----------------------------------------L0.954-----------------------------------------|" - - "L0.968[1442,1601] 57ns |-----------------------------------------L0.968-----------------------------------------|" - - "L0.982[1442,1601] 58ns |-----------------------------------------L0.982-----------------------------------------|" + - "L0.2096[482,641] 140ns |----------------------------------------L0.2096-----------------------------------------|" + - "L0.2110[482,641] 141ns |----------------------------------------L0.2110-----------------------------------------|" + - "L0.2124[482,641] 142ns |----------------------------------------L0.2124-----------------------------------------|" + - "L0.2138[482,641] 143ns |----------------------------------------L0.2138-----------------------------------------|" + - "L0.2180[482,641] 144ns |----------------------------------------L0.2180-----------------------------------------|" + - "L0.2194[482,641] 145ns |----------------------------------------L0.2194-----------------------------------------|" + - "L0.2208[482,641] 146ns |----------------------------------------L0.2208-----------------------------------------|" + - "L0.2222[482,641] 147ns |----------------------------------------L0.2222-----------------------------------------|" + - "L0.2236[482,641] 148ns |----------------------------------------L0.2236-----------------------------------------|" + - "L0.2250[482,641] 149ns |----------------------------------------L0.2250-----------------------------------------|" + - "L0.2264[482,641] 150ns |----------------------------------------L0.2264-----------------------------------------|" + - "L0.2278[482,641] 151ns |----------------------------------------L0.2278-----------------------------------------|" + - "L0.2292[482,641] 152ns |----------------------------------------L0.2292-----------------------------------------|" + - "L0.2306[482,641] 153ns |----------------------------------------L0.2306-----------------------------------------|" + - "L0.2320[482,641] 154ns |----------------------------------------L0.2320-----------------------------------------|" + - "L0.2334[482,641] 155ns |----------------------------------------L0.2334-----------------------------------------|" + - "L0.2348[482,641] 156ns |----------------------------------------L0.2348-----------------------------------------|" + - "L0.2362[482,641] 157ns |----------------------------------------L0.2362-----------------------------------------|" + - "L0.2376[482,641] 158ns |----------------------------------------L0.2376-----------------------------------------|" + - "L0.2390[482,641] 159ns |----------------------------------------L0.2390-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1442,1601] 58ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.730, L0.744, L0.758, L0.772, L0.786, L0.800, L0.814, L0.828, L0.842, L0.856, L0.870, L0.884, L0.898, L0.912, L0.926, L0.940, L0.954, L0.968, L0.982, L0.2946" - - " Creating 1 files" - - "**** Simulation run 242, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0.?[482,641] 159ns |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 237, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2951[20,161] 58ns |----------------------------------------L0.2951-----------------------------------------|" - - "L0.987[59,161] 59ns |----------------------------L0.987-----------------------------| " - - "L0.1001[60,161] 60ns |---------------------------L0.1001----------------------------| " - - "L0.1015[61,161] 61ns |---------------------------L0.1015---------------------------| " - - "L0.1029[62,161] 62ns |---------------------------L0.1029---------------------------| " - - "L0.1043[63,161] 63ns |--------------------------L0.1043---------------------------| " - - "L0.1057[64,161] 64ns |--------------------------L0.1057--------------------------| " - - "L0.1071[65,161] 65ns |--------------------------L0.1071--------------------------| " - - "L0.1085[66,161] 66ns |-------------------------L0.1085--------------------------| " - - "L0.1099[67,161] 67ns |-------------------------L0.1099--------------------------|" - - "L0.1113[68,161] 68ns |-------------------------L0.1113-------------------------| " - - "L0.1127[69,161] 69ns |------------------------L0.1127-------------------------| " - - "L0.1141[70,161] 70ns |------------------------L0.1141-------------------------| " - - "L0.1155[71,161] 71ns |------------------------L0.1155------------------------| " - - "L0.1169[72,161] 72ns |-----------------------L0.1169------------------------| " - - "L0.1183[73,161] 73ns |-----------------------L0.1183------------------------| " - - "L0.1197[74,161] 74ns |-----------------------L0.1197-----------------------| " - - "L0.1211[75,161] 75ns |----------------------L0.1211-----------------------| " - - "L0.1225[76,161] 76ns |----------------------L0.1225-----------------------| " - - "L0.1239[77,161] 77ns |----------------------L0.1239----------------------| " + - "L0.2665[482,641] 180ns |----------------------------------------L0.2665-----------------------------------------|" + - "L0.2678[482,641] 181ns |----------------------------------------L0.2678-----------------------------------------|" + - "L0.2691[482,641] 182ns |----------------------------------------L0.2691-----------------------------------------|" + - "L0.2704[482,641] 183ns |----------------------------------------L0.2704-----------------------------------------|" + - "L0.2717[482,641] 184ns |----------------------------------------L0.2717-----------------------------------------|" + - "L0.2730[482,641] 185ns |----------------------------------------L0.2730-----------------------------------------|" + - "L0.2743[482,641] 186ns |----------------------------------------L0.2743-----------------------------------------|" + - "L0.2756[482,641] 187ns |----------------------------------------L0.2756-----------------------------------------|" + - "L0.2769[482,641] 188ns |----------------------------------------L0.2769-----------------------------------------|" + - "L0.2782[482,641] 189ns |----------------------------------------L0.2782-----------------------------------------|" + - "L0.2795[482,641] 190ns |----------------------------------------L0.2795-----------------------------------------|" + - "L0.2808[482,641] 191ns |----------------------------------------L0.2808-----------------------------------------|" + - "L0.2821[482,641] 192ns |----------------------------------------L0.2821-----------------------------------------|" + - "L0.2834[482,641] 193ns |----------------------------------------L0.2834-----------------------------------------|" + - "L0.2847[482,641] 194ns |----------------------------------------L0.2847-----------------------------------------|" + - "L0.2860[482,641] 195ns |----------------------------------------L0.2860-----------------------------------------|" + - "L0.2873[482,641] 196ns |----------------------------------------L0.2873-----------------------------------------|" + - "L0.2886[482,641] 197ns |----------------------------------------L0.2886-----------------------------------------|" + - "L0.2899[482,641] 198ns |----------------------------------------L0.2899-----------------------------------------|" + - "L0.2912[482,641] 199ns |----------------------------------------L0.2912-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[20,161] 77ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[482,641] 199ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.987, L0.1001, L0.1015, L0.1029, L0.1043, L0.1057, L0.1071, L0.1085, L0.1099, L0.1113, L0.1127, L0.1141, L0.1155, L0.1169, L0.1183, L0.1197, L0.1211, L0.1225, L0.1239, L0.2951" + - " Soft Deleting 20 files: L0.2665, L0.2678, L0.2691, L0.2704, L0.2717, L0.2730, L0.2743, L0.2756, L0.2769, L0.2782, L0.2795, L0.2808, L0.2821, L0.2834, L0.2847, L0.2860, L0.2873, L0.2886, L0.2899, L0.2912" - " Creating 1 files" - - "**** Simulation run 243, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.2096, L0.2110, L0.2124, L0.2138, L0.2180, L0.2194, L0.2208, L0.2222, L0.2236, L0.2250, L0.2264, L0.2278, L0.2292, L0.2306, L0.2320, L0.2334, L0.2348, L0.2362, L0.2376, L0.2390" + - " Creating 1 files" + - "**** Simulation run 238, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2952[162,321] 58ns |----------------------------------------L0.2952-----------------------------------------|" - - "L0.988[162,321] 59ns |-----------------------------------------L0.988-----------------------------------------|" - - "L0.1002[162,321] 60ns |----------------------------------------L0.1002-----------------------------------------|" - - "L0.1016[162,321] 61ns |----------------------------------------L0.1016-----------------------------------------|" - - "L0.1030[162,321] 62ns |----------------------------------------L0.1030-----------------------------------------|" - - "L0.1044[162,321] 63ns |----------------------------------------L0.1044-----------------------------------------|" - - "L0.1058[162,321] 64ns |----------------------------------------L0.1058-----------------------------------------|" - - "L0.1072[162,321] 65ns |----------------------------------------L0.1072-----------------------------------------|" - - "L0.1086[162,321] 66ns |----------------------------------------L0.1086-----------------------------------------|" - - "L0.1100[162,321] 67ns |----------------------------------------L0.1100-----------------------------------------|" - - "L0.1114[162,321] 68ns |----------------------------------------L0.1114-----------------------------------------|" - - "L0.1128[162,321] 69ns |----------------------------------------L0.1128-----------------------------------------|" - - "L0.1142[162,321] 70ns |----------------------------------------L0.1142-----------------------------------------|" - - "L0.1156[162,321] 71ns |----------------------------------------L0.1156-----------------------------------------|" - - "L0.1170[162,321] 72ns |----------------------------------------L0.1170-----------------------------------------|" - - "L0.1184[162,321] 73ns |----------------------------------------L0.1184-----------------------------------------|" - - "L0.1198[162,321] 74ns |----------------------------------------L0.1198-----------------------------------------|" - - "L0.1212[162,321] 75ns |----------------------------------------L0.1212-----------------------------------------|" - - "L0.1226[162,321] 76ns |----------------------------------------L0.1226-----------------------------------------|" - - "L0.1240[162,321] 77ns |----------------------------------------L0.1240-----------------------------------------|" + - "L0.2404[482,641] 160ns |----------------------------------------L0.2404-----------------------------------------|" + - "L0.2418[482,641] 161ns |----------------------------------------L0.2418-----------------------------------------|" + - "L0.2431[482,641] 162ns |----------------------------------------L0.2431-----------------------------------------|" + - "L0.2444[482,641] 163ns |----------------------------------------L0.2444-----------------------------------------|" + - "L0.2457[482,641] 164ns |----------------------------------------L0.2457-----------------------------------------|" + - "L0.2470[482,641] 165ns |----------------------------------------L0.2470-----------------------------------------|" + - "L0.2483[482,641] 166ns |----------------------------------------L0.2483-----------------------------------------|" + - "L0.2496[482,641] 167ns |----------------------------------------L0.2496-----------------------------------------|" + - "L0.2509[482,641] 168ns |----------------------------------------L0.2509-----------------------------------------|" + - "L0.2522[482,641] 169ns |----------------------------------------L0.2522-----------------------------------------|" + - "L0.2535[482,641] 170ns |----------------------------------------L0.2535-----------------------------------------|" + - "L0.2548[482,641] 171ns |----------------------------------------L0.2548-----------------------------------------|" + - "L0.2561[482,641] 172ns |----------------------------------------L0.2561-----------------------------------------|" + - "L0.2574[482,641] 173ns |----------------------------------------L0.2574-----------------------------------------|" + - "L0.2587[482,641] 174ns |----------------------------------------L0.2587-----------------------------------------|" + - "L0.2600[482,641] 175ns |----------------------------------------L0.2600-----------------------------------------|" + - "L0.2613[482,641] 176ns |----------------------------------------L0.2613-----------------------------------------|" + - "L0.2626[482,641] 177ns |----------------------------------------L0.2626-----------------------------------------|" + - "L0.2639[482,641] 178ns |----------------------------------------L0.2639-----------------------------------------|" + - "L0.2652[482,641] 179ns |----------------------------------------L0.2652-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[162,321] 77ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[482,641] 179ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.988, L0.1002, L0.1016, L0.1030, L0.1044, L0.1058, L0.1072, L0.1086, L0.1100, L0.1114, L0.1128, L0.1142, L0.1156, L0.1170, L0.1184, L0.1198, L0.1212, L0.1226, L0.1240, L0.2952" + - " Soft Deleting 20 files: L0.2404, L0.2418, L0.2431, L0.2444, L0.2457, L0.2470, L0.2483, L0.2496, L0.2509, L0.2522, L0.2535, L0.2548, L0.2561, L0.2574, L0.2587, L0.2600, L0.2613, L0.2626, L0.2639, L0.2652" - " Creating 1 files" - - "**** Simulation run 244, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 239, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" + - "L0, all files 8mb " + - "L0.205[642,801] 0ns |-----------------------------------------L0.205-----------------------------------------|" + - "L0.217[642,801] 1ns |-----------------------------------------L0.217-----------------------------------------|" + - "L0.229[642,801] 2ns |-----------------------------------------L0.229-----------------------------------------|" + - "L0.241[642,801] 3ns |-----------------------------------------L0.241-----------------------------------------|" + - "L0.253[642,801] 4ns |-----------------------------------------L0.253-----------------------------------------|" + - "L0.265[642,801] 5ns |-----------------------------------------L0.265-----------------------------------------|" + - "L0.277[642,801] 6ns |-----------------------------------------L0.277-----------------------------------------|" + - "L0.289[642,801] 7ns |-----------------------------------------L0.289-----------------------------------------|" + - "L0.301[642,801] 8ns |-----------------------------------------L0.301-----------------------------------------|" + - "L0.313[642,801] 9ns |-----------------------------------------L0.313-----------------------------------------|" + - "L0.325[642,801] 10ns |-----------------------------------------L0.325-----------------------------------------|" + - "L0.337[642,801] 11ns |-----------------------------------------L0.337-----------------------------------------|" + - "L0.349[642,801] 12ns |-----------------------------------------L0.349-----------------------------------------|" + - "L0.361[642,801] 13ns |-----------------------------------------L0.361-----------------------------------------|" + - "L0.373[642,801] 14ns |-----------------------------------------L0.373-----------------------------------------|" + - "L0.385[642,801] 15ns |-----------------------------------------L0.385-----------------------------------------|" + - "L0.397[642,801] 16ns |-----------------------------------------L0.397-----------------------------------------|" + - "L0.409[642,801] 17ns |-----------------------------------------L0.409-----------------------------------------|" + - "L0.533[642,801] 18ns |-----------------------------------------L0.533-----------------------------------------|" + - "L0.545[642,801] 19ns |-----------------------------------------L0.545-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" + - "L0, all files 160mb " + - "L0.?[642,801] 19ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.205, L0.217, L0.229, L0.241, L0.253, L0.265, L0.277, L0.289, L0.301, L0.313, L0.325, L0.337, L0.349, L0.361, L0.373, L0.385, L0.397, L0.409, L0.533, L0.545" + - " Creating 1 files" + - "**** Simulation run 240, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2953[322,481] 58ns |----------------------------------------L0.2953-----------------------------------------|" - - "L0.989[322,481] 59ns |-----------------------------------------L0.989-----------------------------------------|" - - "L0.1003[322,481] 60ns |----------------------------------------L0.1003-----------------------------------------|" - - "L0.1017[322,481] 61ns |----------------------------------------L0.1017-----------------------------------------|" - - "L0.1031[322,481] 62ns |----------------------------------------L0.1031-----------------------------------------|" - - "L0.1045[322,481] 63ns |----------------------------------------L0.1045-----------------------------------------|" - - "L0.1059[322,481] 64ns |----------------------------------------L0.1059-----------------------------------------|" - - "L0.1073[322,481] 65ns |----------------------------------------L0.1073-----------------------------------------|" - - "L0.1087[322,481] 66ns |----------------------------------------L0.1087-----------------------------------------|" - - "L0.1101[322,481] 67ns |----------------------------------------L0.1101-----------------------------------------|" - - "L0.1115[322,481] 68ns |----------------------------------------L0.1115-----------------------------------------|" - - "L0.1129[322,481] 69ns |----------------------------------------L0.1129-----------------------------------------|" - - "L0.1143[322,481] 70ns |----------------------------------------L0.1143-----------------------------------------|" - - "L0.1157[322,481] 71ns |----------------------------------------L0.1157-----------------------------------------|" - - "L0.1171[322,481] 72ns |----------------------------------------L0.1171-----------------------------------------|" - - "L0.1185[322,481] 73ns |----------------------------------------L0.1185-----------------------------------------|" - - "L0.1199[322,481] 74ns |----------------------------------------L0.1199-----------------------------------------|" - - "L0.1213[322,481] 75ns |----------------------------------------L0.1213-----------------------------------------|" - - "L0.1227[322,481] 76ns |----------------------------------------L0.1227-----------------------------------------|" - - "L0.1241[322,481] 77ns |----------------------------------------L0.1241-----------------------------------------|" + - "L0.421[642,801] 20ns |-----------------------------------------L0.421-----------------------------------------|" + - "L0.435[642,801] 21ns |-----------------------------------------L0.435-----------------------------------------|" + - "L0.449[642,801] 22ns |-----------------------------------------L0.449-----------------------------------------|" + - "L0.463[642,801] 23ns |-----------------------------------------L0.463-----------------------------------------|" + - "L0.477[642,801] 24ns |-----------------------------------------L0.477-----------------------------------------|" + - "L0.491[642,801] 25ns |-----------------------------------------L0.491-----------------------------------------|" + - "L0.505[642,801] 26ns |-----------------------------------------L0.505-----------------------------------------|" + - "L0.519[642,801] 27ns |-----------------------------------------L0.519-----------------------------------------|" + - "L0.557[642,801] 28ns |-----------------------------------------L0.557-----------------------------------------|" + - "L0.571[642,801] 29ns |-----------------------------------------L0.571-----------------------------------------|" + - "L0.585[642,801] 30ns |-----------------------------------------L0.585-----------------------------------------|" + - "L0.599[642,801] 31ns |-----------------------------------------L0.599-----------------------------------------|" + - "L0.613[642,801] 32ns |-----------------------------------------L0.613-----------------------------------------|" + - "L0.627[642,801] 33ns |-----------------------------------------L0.627-----------------------------------------|" + - "L0.641[642,801] 34ns |-----------------------------------------L0.641-----------------------------------------|" + - "L0.655[642,801] 35ns |-----------------------------------------L0.655-----------------------------------------|" + - "L0.669[642,801] 36ns |-----------------------------------------L0.669-----------------------------------------|" + - "L0.683[642,801] 37ns |-----------------------------------------L0.683-----------------------------------------|" + - "L0.697[642,801] 38ns |-----------------------------------------L0.697-----------------------------------------|" + - "L0.711[642,801] 39ns |-----------------------------------------L0.711-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[322,481] 77ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[642,801] 39ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.989, L0.1003, L0.1017, L0.1031, L0.1045, L0.1059, L0.1073, L0.1087, L0.1101, L0.1115, L0.1129, L0.1143, L0.1157, L0.1171, L0.1185, L0.1199, L0.1213, L0.1227, L0.1241, L0.2953" + - " Soft Deleting 20 files: L0.421, L0.435, L0.449, L0.463, L0.477, L0.491, L0.505, L0.519, L0.557, L0.571, L0.585, L0.599, L0.613, L0.627, L0.641, L0.655, L0.669, L0.683, L0.697, L0.711" - " Creating 1 files" - - "**** Simulation run 245, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 241, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2954[482,641] 58ns |----------------------------------------L0.2954-----------------------------------------|" - - "L0.990[482,641] 59ns |-----------------------------------------L0.990-----------------------------------------|" - - "L0.1004[482,641] 60ns |----------------------------------------L0.1004-----------------------------------------|" - - "L0.1018[482,641] 61ns |----------------------------------------L0.1018-----------------------------------------|" - - "L0.1032[482,641] 62ns |----------------------------------------L0.1032-----------------------------------------|" - - "L0.1046[482,641] 63ns |----------------------------------------L0.1046-----------------------------------------|" - - "L0.1060[482,641] 64ns |----------------------------------------L0.1060-----------------------------------------|" - - "L0.1074[482,641] 65ns |----------------------------------------L0.1074-----------------------------------------|" - - "L0.1088[482,641] 66ns |----------------------------------------L0.1088-----------------------------------------|" - - "L0.1102[482,641] 67ns |----------------------------------------L0.1102-----------------------------------------|" - - "L0.1116[482,641] 68ns |----------------------------------------L0.1116-----------------------------------------|" - - "L0.1130[482,641] 69ns |----------------------------------------L0.1130-----------------------------------------|" - - "L0.1144[482,641] 70ns |----------------------------------------L0.1144-----------------------------------------|" - - "L0.1158[482,641] 71ns |----------------------------------------L0.1158-----------------------------------------|" - - "L0.1172[482,641] 72ns |----------------------------------------L0.1172-----------------------------------------|" - - "L0.1186[482,641] 73ns |----------------------------------------L0.1186-----------------------------------------|" - - "L0.1200[482,641] 74ns |----------------------------------------L0.1200-----------------------------------------|" - - "L0.1214[482,641] 75ns |----------------------------------------L0.1214-----------------------------------------|" - - "L0.1228[482,641] 76ns |----------------------------------------L0.1228-----------------------------------------|" - - "L0.1242[482,641] 77ns |----------------------------------------L0.1242-----------------------------------------|" + - "L0.725[642,801] 40ns |-----------------------------------------L0.725-----------------------------------------|" + - "L0.739[642,801] 41ns |-----------------------------------------L0.739-----------------------------------------|" + - "L0.753[642,801] 42ns |-----------------------------------------L0.753-----------------------------------------|" + - "L0.767[642,801] 43ns |-----------------------------------------L0.767-----------------------------------------|" + - "L0.781[642,801] 44ns |-----------------------------------------L0.781-----------------------------------------|" + - "L0.795[642,801] 45ns |-----------------------------------------L0.795-----------------------------------------|" + - "L0.809[642,801] 46ns |-----------------------------------------L0.809-----------------------------------------|" + - "L0.823[642,801] 47ns |-----------------------------------------L0.823-----------------------------------------|" + - "L0.837[642,801] 48ns |-----------------------------------------L0.837-----------------------------------------|" + - "L0.851[642,801] 49ns |-----------------------------------------L0.851-----------------------------------------|" + - "L0.865[642,801] 50ns |-----------------------------------------L0.865-----------------------------------------|" + - "L0.879[642,801] 51ns |-----------------------------------------L0.879-----------------------------------------|" + - "L0.893[642,801] 52ns |-----------------------------------------L0.893-----------------------------------------|" + - "L0.907[642,801] 53ns |-----------------------------------------L0.907-----------------------------------------|" + - "L0.921[642,801] 54ns |-----------------------------------------L0.921-----------------------------------------|" + - "L0.935[642,801] 55ns |-----------------------------------------L0.935-----------------------------------------|" + - "L0.949[642,801] 56ns |-----------------------------------------L0.949-----------------------------------------|" + - "L0.963[642,801] 57ns |-----------------------------------------L0.963-----------------------------------------|" + - "L0.977[642,801] 58ns |-----------------------------------------L0.977-----------------------------------------|" + - "L0.991[642,801] 59ns |-----------------------------------------L0.991-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[482,641] 77ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[642,801] 59ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.990, L0.1004, L0.1018, L0.1032, L0.1046, L0.1060, L0.1074, L0.1088, L0.1102, L0.1116, L0.1130, L0.1144, L0.1158, L0.1172, L0.1186, L0.1200, L0.1214, L0.1228, L0.1242, L0.2954" + - " Soft Deleting 20 files: L0.725, L0.739, L0.753, L0.767, L0.781, L0.795, L0.809, L0.823, L0.837, L0.851, L0.865, L0.879, L0.893, L0.907, L0.921, L0.935, L0.949, L0.963, L0.977, L0.991" - " Creating 1 files" - - "**** Simulation run 246, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 242, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2955[642,801] 58ns |----------------------------------------L0.2955-----------------------------------------|" - - "L0.991[642,801] 59ns |-----------------------------------------L0.991-----------------------------------------|" - "L0.1005[642,801] 60ns |----------------------------------------L0.1005-----------------------------------------|" - "L0.1019[642,801] 61ns |----------------------------------------L0.1019-----------------------------------------|" - "L0.1033[642,801] 62ns |----------------------------------------L0.1033-----------------------------------------|" @@ -6766,605 +6634,716 @@ async fn stuck_l0_large_l0s() { - "L0.1215[642,801] 75ns |----------------------------------------L0.1215-----------------------------------------|" - "L0.1229[642,801] 76ns |----------------------------------------L0.1229-----------------------------------------|" - "L0.1243[642,801] 77ns |----------------------------------------L0.1243-----------------------------------------|" + - "L0.1257[642,801] 78ns |----------------------------------------L0.1257-----------------------------------------|" + - "L0.1271[642,801] 79ns |----------------------------------------L0.1271-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[642,801] 77ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[642,801] 79ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.991, L0.1005, L0.1019, L0.1033, L0.1047, L0.1061, L0.1075, L0.1089, L0.1103, L0.1117, L0.1131, L0.1145, L0.1159, L0.1173, L0.1187, L0.1201, L0.1215, L0.1229, L0.1243, L0.2955" + - " Soft Deleting 20 files: L0.1005, L0.1019, L0.1033, L0.1047, L0.1061, L0.1075, L0.1089, L0.1103, L0.1117, L0.1131, L0.1145, L0.1159, L0.1173, L0.1187, L0.1201, L0.1215, L0.1229, L0.1243, L0.1257, L0.1271" - " Creating 1 files" - - "**** Simulation run 247, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 243, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2956[802,961] 58ns |----------------------------------------L0.2956-----------------------------------------|" - - "L0.992[802,961] 59ns |-----------------------------------------L0.992-----------------------------------------|" - - "L0.1006[802,961] 60ns |----------------------------------------L0.1006-----------------------------------------|" - - "L0.1020[802,961] 61ns |----------------------------------------L0.1020-----------------------------------------|" - - "L0.1034[802,961] 62ns |----------------------------------------L0.1034-----------------------------------------|" - - "L0.1048[802,961] 63ns |----------------------------------------L0.1048-----------------------------------------|" - - "L0.1062[802,961] 64ns |----------------------------------------L0.1062-----------------------------------------|" - - "L0.1076[802,961] 65ns |----------------------------------------L0.1076-----------------------------------------|" - - "L0.1090[802,961] 66ns |----------------------------------------L0.1090-----------------------------------------|" - - "L0.1104[802,961] 67ns |----------------------------------------L0.1104-----------------------------------------|" - - "L0.1118[802,961] 68ns |----------------------------------------L0.1118-----------------------------------------|" - - "L0.1132[802,961] 69ns |----------------------------------------L0.1132-----------------------------------------|" - - "L0.1146[802,961] 70ns |----------------------------------------L0.1146-----------------------------------------|" - - "L0.1160[802,961] 71ns |----------------------------------------L0.1160-----------------------------------------|" - - "L0.1174[802,961] 72ns |----------------------------------------L0.1174-----------------------------------------|" - - "L0.1188[802,961] 73ns |----------------------------------------L0.1188-----------------------------------------|" - - "L0.1202[802,961] 74ns |----------------------------------------L0.1202-----------------------------------------|" - - "L0.1216[802,961] 75ns |----------------------------------------L0.1216-----------------------------------------|" - - "L0.1230[802,961] 76ns |----------------------------------------L0.1230-----------------------------------------|" - - "L0.1244[802,961] 77ns |----------------------------------------L0.1244-----------------------------------------|" + - "L0.1285[642,801] 80ns |----------------------------------------L0.1285-----------------------------------------|" + - "L0.1299[642,801] 81ns |----------------------------------------L0.1299-----------------------------------------|" + - "L0.1313[642,801] 82ns |----------------------------------------L0.1313-----------------------------------------|" + - "L0.1327[642,801] 83ns |----------------------------------------L0.1327-----------------------------------------|" + - "L0.1341[642,801] 84ns |----------------------------------------L0.1341-----------------------------------------|" + - "L0.1355[642,801] 85ns |----------------------------------------L0.1355-----------------------------------------|" + - "L0.1369[642,801] 86ns |----------------------------------------L0.1369-----------------------------------------|" + - "L0.1383[642,801] 87ns |----------------------------------------L0.1383-----------------------------------------|" + - "L0.1397[642,801] 88ns |----------------------------------------L0.1397-----------------------------------------|" + - "L0.1411[642,801] 89ns |----------------------------------------L0.1411-----------------------------------------|" + - "L0.1425[642,801] 90ns |----------------------------------------L0.1425-----------------------------------------|" + - "L0.1439[642,801] 91ns |----------------------------------------L0.1439-----------------------------------------|" + - "L0.1453[642,801] 92ns |----------------------------------------L0.1453-----------------------------------------|" + - "L0.1467[642,801] 93ns |----------------------------------------L0.1467-----------------------------------------|" + - "L0.1481[642,801] 94ns |----------------------------------------L0.1481-----------------------------------------|" + - "L0.1495[642,801] 95ns |----------------------------------------L0.1495-----------------------------------------|" + - "L0.1509[642,801] 96ns |----------------------------------------L0.1509-----------------------------------------|" + - "L0.1523[642,801] 97ns |----------------------------------------L0.1523-----------------------------------------|" + - "L0.1537[642,801] 98ns |----------------------------------------L0.1537-----------------------------------------|" + - "L0.1551[642,801] 99ns |----------------------------------------L0.1551-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[802,961] 77ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[642,801] 99ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.992, L0.1006, L0.1020, L0.1034, L0.1048, L0.1062, L0.1076, L0.1090, L0.1104, L0.1118, L0.1132, L0.1146, L0.1160, L0.1174, L0.1188, L0.1202, L0.1216, L0.1230, L0.1244, L0.2956" + - " Soft Deleting 20 files: L0.1285, L0.1299, L0.1313, L0.1327, L0.1341, L0.1355, L0.1369, L0.1383, L0.1397, L0.1411, L0.1425, L0.1439, L0.1453, L0.1467, L0.1481, L0.1495, L0.1509, L0.1523, L0.1537, L0.1551" - " Creating 1 files" - - "**** Simulation run 248, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 244, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2957[962,1121] 58ns |----------------------------------------L0.2957-----------------------------------------|" - - "L0.993[962,1121] 59ns |-----------------------------------------L0.993-----------------------------------------|" - - "L0.1007[962,1121] 60ns |----------------------------------------L0.1007-----------------------------------------|" - - "L0.1021[962,1121] 61ns |----------------------------------------L0.1021-----------------------------------------|" - - "L0.1035[962,1121] 62ns |----------------------------------------L0.1035-----------------------------------------|" - - "L0.1049[962,1121] 63ns |----------------------------------------L0.1049-----------------------------------------|" - - "L0.1063[962,1121] 64ns |----------------------------------------L0.1063-----------------------------------------|" - - "L0.1077[962,1121] 65ns |----------------------------------------L0.1077-----------------------------------------|" - - "L0.1091[962,1121] 66ns |----------------------------------------L0.1091-----------------------------------------|" - - "L0.1105[962,1121] 67ns |----------------------------------------L0.1105-----------------------------------------|" - - "L0.1119[962,1121] 68ns |----------------------------------------L0.1119-----------------------------------------|" - - "L0.1133[962,1121] 69ns |----------------------------------------L0.1133-----------------------------------------|" - - "L0.1147[962,1121] 70ns |----------------------------------------L0.1147-----------------------------------------|" - - "L0.1161[962,1121] 71ns |----------------------------------------L0.1161-----------------------------------------|" - - "L0.1175[962,1121] 72ns |----------------------------------------L0.1175-----------------------------------------|" - - "L0.1189[962,1121] 73ns |----------------------------------------L0.1189-----------------------------------------|" - - "L0.1203[962,1121] 74ns |----------------------------------------L0.1203-----------------------------------------|" - - "L0.1217[962,1121] 75ns |----------------------------------------L0.1217-----------------------------------------|" - - "L0.1231[962,1121] 76ns |----------------------------------------L0.1231-----------------------------------------|" - - "L0.1245[962,1121] 77ns |----------------------------------------L0.1245-----------------------------------------|" + - "L0.1565[642,801] 100ns |----------------------------------------L0.1565-----------------------------------------|" + - "L0.1579[642,801] 101ns |----------------------------------------L0.1579-----------------------------------------|" + - "L0.1593[642,801] 102ns |----------------------------------------L0.1593-----------------------------------------|" + - "L0.1607[642,801] 103ns |----------------------------------------L0.1607-----------------------------------------|" + - "L0.1621[642,801] 104ns |----------------------------------------L0.1621-----------------------------------------|" + - "L0.1635[642,801] 105ns |----------------------------------------L0.1635-----------------------------------------|" + - "L0.1649[642,801] 106ns |----------------------------------------L0.1649-----------------------------------------|" + - "L0.1663[642,801] 107ns |----------------------------------------L0.1663-----------------------------------------|" + - "L0.1677[642,801] 108ns |----------------------------------------L0.1677-----------------------------------------|" + - "L0.1691[642,801] 109ns |----------------------------------------L0.1691-----------------------------------------|" + - "L0.1705[642,801] 110ns |----------------------------------------L0.1705-----------------------------------------|" + - "L0.1719[642,801] 111ns |----------------------------------------L0.1719-----------------------------------------|" + - "L0.1733[642,801] 112ns |----------------------------------------L0.1733-----------------------------------------|" + - "L0.1747[642,801] 113ns |----------------------------------------L0.1747-----------------------------------------|" + - "L0.1761[642,801] 114ns |----------------------------------------L0.1761-----------------------------------------|" + - "L0.1775[642,801] 115ns |----------------------------------------L0.1775-----------------------------------------|" + - "L0.1789[642,801] 116ns |----------------------------------------L0.1789-----------------------------------------|" + - "L0.1803[642,801] 117ns |----------------------------------------L0.1803-----------------------------------------|" + - "L0.1817[642,801] 118ns |----------------------------------------L0.1817-----------------------------------------|" + - "L0.1831[642,801] 119ns |----------------------------------------L0.1831-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[962,1121] 77ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[642,801] 119ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.993, L0.1007, L0.1021, L0.1035, L0.1049, L0.1063, L0.1077, L0.1091, L0.1105, L0.1119, L0.1133, L0.1147, L0.1161, L0.1175, L0.1189, L0.1203, L0.1217, L0.1231, L0.1245, L0.2957" + - " Soft Deleting 20 files: L0.1565, L0.1579, L0.1593, L0.1607, L0.1621, L0.1635, L0.1649, L0.1663, L0.1677, L0.1691, L0.1705, L0.1719, L0.1733, L0.1747, L0.1761, L0.1775, L0.1789, L0.1803, L0.1817, L0.1831" - " Creating 1 files" - - "**** Simulation run 249, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 245, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2958[1122,1281] 58ns |----------------------------------------L0.2958-----------------------------------------|" - - "L0.994[1122,1281] 59ns |-----------------------------------------L0.994-----------------------------------------|" - - "L0.1008[1122,1281] 60ns |----------------------------------------L0.1008-----------------------------------------|" - - "L0.1022[1122,1281] 61ns |----------------------------------------L0.1022-----------------------------------------|" - - "L0.1036[1122,1281] 62ns |----------------------------------------L0.1036-----------------------------------------|" - - "L0.1050[1122,1281] 63ns |----------------------------------------L0.1050-----------------------------------------|" - - "L0.1064[1122,1281] 64ns |----------------------------------------L0.1064-----------------------------------------|" - - "L0.1078[1122,1281] 65ns |----------------------------------------L0.1078-----------------------------------------|" - - "L0.1092[1122,1281] 66ns |----------------------------------------L0.1092-----------------------------------------|" - - "L0.1106[1122,1281] 67ns |----------------------------------------L0.1106-----------------------------------------|" - - "L0.1120[1122,1281] 68ns |----------------------------------------L0.1120-----------------------------------------|" - - "L0.1134[1122,1281] 69ns |----------------------------------------L0.1134-----------------------------------------|" - - "L0.1148[1122,1281] 70ns |----------------------------------------L0.1148-----------------------------------------|" - - "L0.1162[1122,1281] 71ns |----------------------------------------L0.1162-----------------------------------------|" - - "L0.1176[1122,1281] 72ns |----------------------------------------L0.1176-----------------------------------------|" - - "L0.1190[1122,1281] 73ns |----------------------------------------L0.1190-----------------------------------------|" - - "L0.1204[1122,1281] 74ns |----------------------------------------L0.1204-----------------------------------------|" - - "L0.1218[1122,1281] 75ns |----------------------------------------L0.1218-----------------------------------------|" - - "L0.1232[1122,1281] 76ns |----------------------------------------L0.1232-----------------------------------------|" - - "L0.1246[1122,1281] 77ns |----------------------------------------L0.1246-----------------------------------------|" + - "L0.1845[642,801] 120ns |----------------------------------------L0.1845-----------------------------------------|" + - "L0.1859[642,801] 121ns |----------------------------------------L0.1859-----------------------------------------|" + - "L0.1873[642,801] 122ns |----------------------------------------L0.1873-----------------------------------------|" + - "L0.1887[642,801] 123ns |----------------------------------------L0.1887-----------------------------------------|" + - "L0.1901[642,801] 124ns |----------------------------------------L0.1901-----------------------------------------|" + - "L0.1915[642,801] 125ns |----------------------------------------L0.1915-----------------------------------------|" + - "L0.1929[642,801] 126ns |----------------------------------------L0.1929-----------------------------------------|" + - "L0.1943[642,801] 127ns |----------------------------------------L0.1943-----------------------------------------|" + - "L0.1957[642,801] 128ns |----------------------------------------L0.1957-----------------------------------------|" + - "L0.1971[642,801] 129ns |----------------------------------------L0.1971-----------------------------------------|" + - "L0.1985[642,801] 130ns |----------------------------------------L0.1985-----------------------------------------|" + - "L0.1999[642,801] 131ns |----------------------------------------L0.1999-----------------------------------------|" + - "L0.2013[642,801] 132ns |----------------------------------------L0.2013-----------------------------------------|" + - "L0.2027[642,801] 133ns |----------------------------------------L0.2027-----------------------------------------|" + - "L0.2153[642,801] 134ns |----------------------------------------L0.2153-----------------------------------------|" + - "L0.2167[642,801] 135ns |----------------------------------------L0.2167-----------------------------------------|" + - "L0.2041[642,801] 136ns |----------------------------------------L0.2041-----------------------------------------|" + - "L0.2055[642,801] 137ns |----------------------------------------L0.2055-----------------------------------------|" + - "L0.2069[642,801] 138ns |----------------------------------------L0.2069-----------------------------------------|" + - "L0.2083[642,801] 139ns |----------------------------------------L0.2083-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[642,801] 139ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1845, L0.1859, L0.1873, L0.1887, L0.1901, L0.1915, L0.1929, L0.1943, L0.1957, L0.1971, L0.1985, L0.1999, L0.2013, L0.2027, L0.2041, L0.2055, L0.2069, L0.2083, L0.2153, L0.2167" + - " Creating 1 files" + - "**** Simulation run 246, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.2097[642,801] 140ns |----------------------------------------L0.2097-----------------------------------------|" + - "L0.2111[642,801] 141ns |----------------------------------------L0.2111-----------------------------------------|" + - "L0.2125[642,801] 142ns |----------------------------------------L0.2125-----------------------------------------|" + - "L0.2139[642,801] 143ns |----------------------------------------L0.2139-----------------------------------------|" + - "L0.2181[642,801] 144ns |----------------------------------------L0.2181-----------------------------------------|" + - "L0.2195[642,801] 145ns |----------------------------------------L0.2195-----------------------------------------|" + - "L0.2209[642,801] 146ns |----------------------------------------L0.2209-----------------------------------------|" + - "L0.2223[642,801] 147ns |----------------------------------------L0.2223-----------------------------------------|" + - "L0.2237[642,801] 148ns |----------------------------------------L0.2237-----------------------------------------|" + - "L0.2251[642,801] 149ns |----------------------------------------L0.2251-----------------------------------------|" + - "L0.2265[642,801] 150ns |----------------------------------------L0.2265-----------------------------------------|" + - "L0.2279[642,801] 151ns |----------------------------------------L0.2279-----------------------------------------|" + - "L0.2293[642,801] 152ns |----------------------------------------L0.2293-----------------------------------------|" + - "L0.2307[642,801] 153ns |----------------------------------------L0.2307-----------------------------------------|" + - "L0.2321[642,801] 154ns |----------------------------------------L0.2321-----------------------------------------|" + - "L0.2335[642,801] 155ns |----------------------------------------L0.2335-----------------------------------------|" + - "L0.2349[642,801] 156ns |----------------------------------------L0.2349-----------------------------------------|" + - "L0.2363[642,801] 157ns |----------------------------------------L0.2363-----------------------------------------|" + - "L0.2377[642,801] 158ns |----------------------------------------L0.2377-----------------------------------------|" + - "L0.2391[642,801] 159ns |----------------------------------------L0.2391-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[642,801] 159ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.2097, L0.2111, L0.2125, L0.2139, L0.2181, L0.2195, L0.2209, L0.2223, L0.2237, L0.2251, L0.2265, L0.2279, L0.2293, L0.2307, L0.2321, L0.2335, L0.2349, L0.2363, L0.2377, L0.2391" + - " Creating 1 files" + - "**** Simulation run 247, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.2405[642,801] 160ns |----------------------------------------L0.2405-----------------------------------------|" + - "L0.2419[642,801] 161ns |----------------------------------------L0.2419-----------------------------------------|" + - "L0.2432[642,801] 162ns |----------------------------------------L0.2432-----------------------------------------|" + - "L0.2445[642,801] 163ns |----------------------------------------L0.2445-----------------------------------------|" + - "L0.2458[642,801] 164ns |----------------------------------------L0.2458-----------------------------------------|" + - "L0.2471[642,801] 165ns |----------------------------------------L0.2471-----------------------------------------|" + - "L0.2484[642,801] 166ns |----------------------------------------L0.2484-----------------------------------------|" + - "L0.2497[642,801] 167ns |----------------------------------------L0.2497-----------------------------------------|" + - "L0.2510[642,801] 168ns |----------------------------------------L0.2510-----------------------------------------|" + - "L0.2523[642,801] 169ns |----------------------------------------L0.2523-----------------------------------------|" + - "L0.2536[642,801] 170ns |----------------------------------------L0.2536-----------------------------------------|" + - "L0.2549[642,801] 171ns |----------------------------------------L0.2549-----------------------------------------|" + - "L0.2562[642,801] 172ns |----------------------------------------L0.2562-----------------------------------------|" + - "L0.2575[642,801] 173ns |----------------------------------------L0.2575-----------------------------------------|" + - "L0.2588[642,801] 174ns |----------------------------------------L0.2588-----------------------------------------|" + - "L0.2601[642,801] 175ns |----------------------------------------L0.2601-----------------------------------------|" + - "L0.2614[642,801] 176ns |----------------------------------------L0.2614-----------------------------------------|" + - "L0.2627[642,801] 177ns |----------------------------------------L0.2627-----------------------------------------|" + - "L0.2640[642,801] 178ns |----------------------------------------L0.2640-----------------------------------------|" + - "L0.2653[642,801] 179ns |----------------------------------------L0.2653-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[642,801] 179ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.2405, L0.2419, L0.2432, L0.2445, L0.2458, L0.2471, L0.2484, L0.2497, L0.2510, L0.2523, L0.2536, L0.2549, L0.2562, L0.2575, L0.2588, L0.2601, L0.2614, L0.2627, L0.2640, L0.2653" + - " Creating 1 files" + - "**** Simulation run 248, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.2666[642,801] 180ns |----------------------------------------L0.2666-----------------------------------------|" + - "L0.2679[642,801] 181ns |----------------------------------------L0.2679-----------------------------------------|" + - "L0.2692[642,801] 182ns |----------------------------------------L0.2692-----------------------------------------|" + - "L0.2705[642,801] 183ns |----------------------------------------L0.2705-----------------------------------------|" + - "L0.2718[642,801] 184ns |----------------------------------------L0.2718-----------------------------------------|" + - "L0.2731[642,801] 185ns |----------------------------------------L0.2731-----------------------------------------|" + - "L0.2744[642,801] 186ns |----------------------------------------L0.2744-----------------------------------------|" + - "L0.2757[642,801] 187ns |----------------------------------------L0.2757-----------------------------------------|" + - "L0.2770[642,801] 188ns |----------------------------------------L0.2770-----------------------------------------|" + - "L0.2783[642,801] 189ns |----------------------------------------L0.2783-----------------------------------------|" + - "L0.2796[642,801] 190ns |----------------------------------------L0.2796-----------------------------------------|" + - "L0.2809[642,801] 191ns |----------------------------------------L0.2809-----------------------------------------|" + - "L0.2822[642,801] 192ns |----------------------------------------L0.2822-----------------------------------------|" + - "L0.2835[642,801] 193ns |----------------------------------------L0.2835-----------------------------------------|" + - "L0.2848[642,801] 194ns |----------------------------------------L0.2848-----------------------------------------|" + - "L0.2861[642,801] 195ns |----------------------------------------L0.2861-----------------------------------------|" + - "L0.2874[642,801] 196ns |----------------------------------------L0.2874-----------------------------------------|" + - "L0.2887[642,801] 197ns |----------------------------------------L0.2887-----------------------------------------|" + - "L0.2900[642,801] 198ns |----------------------------------------L0.2900-----------------------------------------|" + - "L0.2913[642,801] 199ns |----------------------------------------L0.2913-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1122,1281] 77ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[642,801] 199ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.2666, L0.2679, L0.2692, L0.2705, L0.2718, L0.2731, L0.2744, L0.2757, L0.2770, L0.2783, L0.2796, L0.2809, L0.2822, L0.2835, L0.2848, L0.2861, L0.2874, L0.2887, L0.2900, L0.2913" + - " Creating 1 files" + - "**** Simulation run 249, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" + - "L0, all files 8mb " + - "L0.206[802,961] 0ns |-----------------------------------------L0.206-----------------------------------------|" + - "L0.218[802,961] 1ns |-----------------------------------------L0.218-----------------------------------------|" + - "L0.230[802,961] 2ns |-----------------------------------------L0.230-----------------------------------------|" + - "L0.242[802,961] 3ns |-----------------------------------------L0.242-----------------------------------------|" + - "L0.254[802,961] 4ns |-----------------------------------------L0.254-----------------------------------------|" + - "L0.266[802,961] 5ns |-----------------------------------------L0.266-----------------------------------------|" + - "L0.278[802,961] 6ns |-----------------------------------------L0.278-----------------------------------------|" + - "L0.290[802,961] 7ns |-----------------------------------------L0.290-----------------------------------------|" + - "L0.302[802,961] 8ns |-----------------------------------------L0.302-----------------------------------------|" + - "L0.314[802,961] 9ns |-----------------------------------------L0.314-----------------------------------------|" + - "L0.326[802,961] 10ns |-----------------------------------------L0.326-----------------------------------------|" + - "L0.338[802,961] 11ns |-----------------------------------------L0.338-----------------------------------------|" + - "L0.350[802,961] 12ns |-----------------------------------------L0.350-----------------------------------------|" + - "L0.362[802,961] 13ns |-----------------------------------------L0.362-----------------------------------------|" + - "L0.374[802,961] 14ns |-----------------------------------------L0.374-----------------------------------------|" + - "L0.386[802,961] 15ns |-----------------------------------------L0.386-----------------------------------------|" + - "L0.398[802,961] 16ns |-----------------------------------------L0.398-----------------------------------------|" + - "L0.410[802,961] 17ns |-----------------------------------------L0.410-----------------------------------------|" + - "L0.534[802,961] 18ns |-----------------------------------------L0.534-----------------------------------------|" + - "L0.546[802,961] 19ns |-----------------------------------------L0.546-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" + - "L0, all files 160mb " + - "L0.?[802,961] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.994, L0.1008, L0.1022, L0.1036, L0.1050, L0.1064, L0.1078, L0.1092, L0.1106, L0.1120, L0.1134, L0.1148, L0.1162, L0.1176, L0.1190, L0.1204, L0.1218, L0.1232, L0.1246, L0.2958" + - " Soft Deleting 20 files: L0.206, L0.218, L0.230, L0.242, L0.254, L0.266, L0.278, L0.290, L0.302, L0.314, L0.326, L0.338, L0.350, L0.362, L0.374, L0.386, L0.398, L0.410, L0.534, L0.546" - " Creating 1 files" - "**** Simulation run 250, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2963[1282,1441] 58ns |----------------------------------------L0.2963-----------------------------------------|" - - "L0.995[1282,1441] 59ns |-----------------------------------------L0.995-----------------------------------------|" - - "L0.1009[1282,1441] 60ns |----------------------------------------L0.1009-----------------------------------------|" - - "L0.1023[1282,1441] 61ns |----------------------------------------L0.1023-----------------------------------------|" - - "L0.1037[1282,1441] 62ns |----------------------------------------L0.1037-----------------------------------------|" - - "L0.1051[1282,1441] 63ns |----------------------------------------L0.1051-----------------------------------------|" - - "L0.1065[1282,1441] 64ns |----------------------------------------L0.1065-----------------------------------------|" - - "L0.1079[1282,1441] 65ns |----------------------------------------L0.1079-----------------------------------------|" - - "L0.1093[1282,1441] 66ns |----------------------------------------L0.1093-----------------------------------------|" - - "L0.1107[1282,1441] 67ns |----------------------------------------L0.1107-----------------------------------------|" - - "L0.1121[1282,1441] 68ns |----------------------------------------L0.1121-----------------------------------------|" - - "L0.1135[1282,1441] 69ns |----------------------------------------L0.1135-----------------------------------------|" - - "L0.1149[1282,1441] 70ns |----------------------------------------L0.1149-----------------------------------------|" - - "L0.1163[1282,1441] 71ns |----------------------------------------L0.1163-----------------------------------------|" - - "L0.1177[1282,1441] 72ns |----------------------------------------L0.1177-----------------------------------------|" - - "L0.1191[1282,1441] 73ns |----------------------------------------L0.1191-----------------------------------------|" - - "L0.1205[1282,1441] 74ns |----------------------------------------L0.1205-----------------------------------------|" - - "L0.1219[1282,1441] 75ns |----------------------------------------L0.1219-----------------------------------------|" - - "L0.1233[1282,1441] 76ns |----------------------------------------L0.1233-----------------------------------------|" - - "L0.1247[1282,1441] 77ns |----------------------------------------L0.1247-----------------------------------------|" + - "L0.422[802,961] 20ns |-----------------------------------------L0.422-----------------------------------------|" + - "L0.436[802,961] 21ns |-----------------------------------------L0.436-----------------------------------------|" + - "L0.450[802,961] 22ns |-----------------------------------------L0.450-----------------------------------------|" + - "L0.464[802,961] 23ns |-----------------------------------------L0.464-----------------------------------------|" + - "L0.478[802,961] 24ns |-----------------------------------------L0.478-----------------------------------------|" + - "L0.492[802,961] 25ns |-----------------------------------------L0.492-----------------------------------------|" + - "L0.506[802,961] 26ns |-----------------------------------------L0.506-----------------------------------------|" + - "L0.520[802,961] 27ns |-----------------------------------------L0.520-----------------------------------------|" + - "L0.558[802,961] 28ns |-----------------------------------------L0.558-----------------------------------------|" + - "L0.572[802,961] 29ns |-----------------------------------------L0.572-----------------------------------------|" + - "L0.586[802,961] 30ns |-----------------------------------------L0.586-----------------------------------------|" + - "L0.600[802,961] 31ns |-----------------------------------------L0.600-----------------------------------------|" + - "L0.614[802,961] 32ns |-----------------------------------------L0.614-----------------------------------------|" + - "L0.628[802,961] 33ns |-----------------------------------------L0.628-----------------------------------------|" + - "L0.642[802,961] 34ns |-----------------------------------------L0.642-----------------------------------------|" + - "L0.656[802,961] 35ns |-----------------------------------------L0.656-----------------------------------------|" + - "L0.670[802,961] 36ns |-----------------------------------------L0.670-----------------------------------------|" + - "L0.684[802,961] 37ns |-----------------------------------------L0.684-----------------------------------------|" + - "L0.698[802,961] 38ns |-----------------------------------------L0.698-----------------------------------------|" + - "L0.712[802,961] 39ns |-----------------------------------------L0.712-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1282,1441] 77ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[802,961] 39ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.995, L0.1009, L0.1023, L0.1037, L0.1051, L0.1065, L0.1079, L0.1093, L0.1107, L0.1121, L0.1135, L0.1149, L0.1163, L0.1177, L0.1191, L0.1205, L0.1219, L0.1233, L0.1247, L0.2963" + - " Soft Deleting 20 files: L0.422, L0.436, L0.450, L0.464, L0.478, L0.492, L0.506, L0.520, L0.558, L0.572, L0.586, L0.600, L0.614, L0.628, L0.642, L0.656, L0.670, L0.684, L0.698, L0.712" - " Creating 1 files" - "**** Simulation run 251, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2964[1442,1601] 58ns |----------------------------------------L0.2964-----------------------------------------|" - - "L0.996[1442,1601] 59ns |-----------------------------------------L0.996-----------------------------------------|" - - "L0.1010[1442,1601] 60ns |----------------------------------------L0.1010-----------------------------------------|" - - "L0.1024[1442,1601] 61ns |----------------------------------------L0.1024-----------------------------------------|" - - "L0.1038[1442,1601] 62ns |----------------------------------------L0.1038-----------------------------------------|" - - "L0.1052[1442,1601] 63ns |----------------------------------------L0.1052-----------------------------------------|" - - "L0.1066[1442,1601] 64ns |----------------------------------------L0.1066-----------------------------------------|" - - "L0.1080[1442,1601] 65ns |----------------------------------------L0.1080-----------------------------------------|" - - "L0.1094[1442,1601] 66ns |----------------------------------------L0.1094-----------------------------------------|" - - "L0.1108[1442,1601] 67ns |----------------------------------------L0.1108-----------------------------------------|" - - "L0.1122[1442,1601] 68ns |----------------------------------------L0.1122-----------------------------------------|" - - "L0.1136[1442,1601] 69ns |----------------------------------------L0.1136-----------------------------------------|" - - "L0.1150[1442,1601] 70ns |----------------------------------------L0.1150-----------------------------------------|" - - "L0.1164[1442,1601] 71ns |----------------------------------------L0.1164-----------------------------------------|" - - "L0.1178[1442,1601] 72ns |----------------------------------------L0.1178-----------------------------------------|" - - "L0.1192[1442,1601] 73ns |----------------------------------------L0.1192-----------------------------------------|" - - "L0.1206[1442,1601] 74ns |----------------------------------------L0.1206-----------------------------------------|" - - "L0.1220[1442,1601] 75ns |----------------------------------------L0.1220-----------------------------------------|" - - "L0.1234[1442,1601] 76ns |----------------------------------------L0.1234-----------------------------------------|" - - "L0.1248[1442,1601] 77ns |----------------------------------------L0.1248-----------------------------------------|" + - "L0.726[802,961] 40ns |-----------------------------------------L0.726-----------------------------------------|" + - "L0.740[802,961] 41ns |-----------------------------------------L0.740-----------------------------------------|" + - "L0.754[802,961] 42ns |-----------------------------------------L0.754-----------------------------------------|" + - "L0.768[802,961] 43ns |-----------------------------------------L0.768-----------------------------------------|" + - "L0.782[802,961] 44ns |-----------------------------------------L0.782-----------------------------------------|" + - "L0.796[802,961] 45ns |-----------------------------------------L0.796-----------------------------------------|" + - "L0.810[802,961] 46ns |-----------------------------------------L0.810-----------------------------------------|" + - "L0.824[802,961] 47ns |-----------------------------------------L0.824-----------------------------------------|" + - "L0.838[802,961] 48ns |-----------------------------------------L0.838-----------------------------------------|" + - "L0.852[802,961] 49ns |-----------------------------------------L0.852-----------------------------------------|" + - "L0.866[802,961] 50ns |-----------------------------------------L0.866-----------------------------------------|" + - "L0.880[802,961] 51ns |-----------------------------------------L0.880-----------------------------------------|" + - "L0.894[802,961] 52ns |-----------------------------------------L0.894-----------------------------------------|" + - "L0.908[802,961] 53ns |-----------------------------------------L0.908-----------------------------------------|" + - "L0.922[802,961] 54ns |-----------------------------------------L0.922-----------------------------------------|" + - "L0.936[802,961] 55ns |-----------------------------------------L0.936-----------------------------------------|" + - "L0.950[802,961] 56ns |-----------------------------------------L0.950-----------------------------------------|" + - "L0.964[802,961] 57ns |-----------------------------------------L0.964-----------------------------------------|" + - "L0.978[802,961] 58ns |-----------------------------------------L0.978-----------------------------------------|" + - "L0.992[802,961] 59ns |-----------------------------------------L0.992-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1442,1601] 77ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[802,961] 59ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.996, L0.1010, L0.1024, L0.1038, L0.1052, L0.1066, L0.1080, L0.1094, L0.1108, L0.1122, L0.1136, L0.1150, L0.1164, L0.1178, L0.1192, L0.1206, L0.1220, L0.1234, L0.1248, L0.2964" + - " Soft Deleting 20 files: L0.726, L0.740, L0.754, L0.768, L0.782, L0.796, L0.810, L0.824, L0.838, L0.852, L0.866, L0.880, L0.894, L0.908, L0.922, L0.936, L0.950, L0.964, L0.978, L0.992" - " Creating 1 files" - "**** Simulation run 252, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2959[1602,1761] 58ns |----------------------------------------L0.2959-----------------------------------------|" - - "L0.997[1602,1761] 59ns |-----------------------------------------L0.997-----------------------------------------|" - - "L0.1011[1602,1761] 60ns |----------------------------------------L0.1011-----------------------------------------|" - - "L0.1025[1602,1761] 61ns |----------------------------------------L0.1025-----------------------------------------|" - - "L0.1039[1602,1761] 62ns |----------------------------------------L0.1039-----------------------------------------|" - - "L0.1053[1602,1761] 63ns |----------------------------------------L0.1053-----------------------------------------|" - - "L0.1067[1602,1761] 64ns |----------------------------------------L0.1067-----------------------------------------|" - - "L0.1081[1602,1761] 65ns |----------------------------------------L0.1081-----------------------------------------|" - - "L0.1095[1602,1761] 66ns |----------------------------------------L0.1095-----------------------------------------|" - - "L0.1109[1602,1761] 67ns |----------------------------------------L0.1109-----------------------------------------|" - - "L0.1123[1602,1761] 68ns |----------------------------------------L0.1123-----------------------------------------|" - - "L0.1137[1602,1761] 69ns |----------------------------------------L0.1137-----------------------------------------|" - - "L0.1151[1602,1761] 70ns |----------------------------------------L0.1151-----------------------------------------|" - - "L0.1165[1602,1761] 71ns |----------------------------------------L0.1165-----------------------------------------|" - - "L0.1179[1602,1761] 72ns |----------------------------------------L0.1179-----------------------------------------|" - - "L0.1193[1602,1761] 73ns |----------------------------------------L0.1193-----------------------------------------|" - - "L0.1207[1602,1761] 74ns |----------------------------------------L0.1207-----------------------------------------|" - - "L0.1221[1602,1761] 75ns |----------------------------------------L0.1221-----------------------------------------|" - - "L0.1235[1602,1761] 76ns |----------------------------------------L0.1235-----------------------------------------|" - - "L0.1249[1602,1761] 77ns |----------------------------------------L0.1249-----------------------------------------|" + - "L0.1006[802,961] 60ns |----------------------------------------L0.1006-----------------------------------------|" + - "L0.1020[802,961] 61ns |----------------------------------------L0.1020-----------------------------------------|" + - "L0.1034[802,961] 62ns |----------------------------------------L0.1034-----------------------------------------|" + - "L0.1048[802,961] 63ns |----------------------------------------L0.1048-----------------------------------------|" + - "L0.1062[802,961] 64ns |----------------------------------------L0.1062-----------------------------------------|" + - "L0.1076[802,961] 65ns |----------------------------------------L0.1076-----------------------------------------|" + - "L0.1090[802,961] 66ns |----------------------------------------L0.1090-----------------------------------------|" + - "L0.1104[802,961] 67ns |----------------------------------------L0.1104-----------------------------------------|" + - "L0.1118[802,961] 68ns |----------------------------------------L0.1118-----------------------------------------|" + - "L0.1132[802,961] 69ns |----------------------------------------L0.1132-----------------------------------------|" + - "L0.1146[802,961] 70ns |----------------------------------------L0.1146-----------------------------------------|" + - "L0.1160[802,961] 71ns |----------------------------------------L0.1160-----------------------------------------|" + - "L0.1174[802,961] 72ns |----------------------------------------L0.1174-----------------------------------------|" + - "L0.1188[802,961] 73ns |----------------------------------------L0.1188-----------------------------------------|" + - "L0.1202[802,961] 74ns |----------------------------------------L0.1202-----------------------------------------|" + - "L0.1216[802,961] 75ns |----------------------------------------L0.1216-----------------------------------------|" + - "L0.1230[802,961] 76ns |----------------------------------------L0.1230-----------------------------------------|" + - "L0.1244[802,961] 77ns |----------------------------------------L0.1244-----------------------------------------|" + - "L0.1258[802,961] 78ns |----------------------------------------L0.1258-----------------------------------------|" + - "L0.1272[802,961] 79ns |----------------------------------------L0.1272-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1602,1761] 77ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[802,961] 79ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.997, L0.1011, L0.1025, L0.1039, L0.1053, L0.1067, L0.1081, L0.1095, L0.1109, L0.1123, L0.1137, L0.1151, L0.1165, L0.1179, L0.1193, L0.1207, L0.1221, L0.1235, L0.1249, L0.2959" + - " Soft Deleting 20 files: L0.1006, L0.1020, L0.1034, L0.1048, L0.1062, L0.1076, L0.1090, L0.1104, L0.1118, L0.1132, L0.1146, L0.1160, L0.1174, L0.1188, L0.1202, L0.1216, L0.1230, L0.1244, L0.1258, L0.1272" - " Creating 1 files" - "**** Simulation run 253, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2960[1762,2000] 58ns |----------------------------------------L0.2960-----------------------------------------|" - - "L0.998[1762,2000] 59ns |-----------------------------------------L0.998-----------------------------------------|" - - "L0.1012[1762,2000] 60ns |----------------------------------------L0.1012-----------------------------------------|" - - "L0.1026[1762,2000] 61ns |----------------------------------------L0.1026-----------------------------------------|" - - "L0.1040[1762,2000] 62ns |----------------------------------------L0.1040-----------------------------------------|" - - "L0.1054[1762,2000] 63ns |----------------------------------------L0.1054-----------------------------------------|" - - "L0.1068[1762,2000] 64ns |----------------------------------------L0.1068-----------------------------------------|" - - "L0.1082[1762,2000] 65ns |----------------------------------------L0.1082-----------------------------------------|" - - "L0.1096[1762,2000] 66ns |----------------------------------------L0.1096-----------------------------------------|" - - "L0.1110[1762,2000] 67ns |----------------------------------------L0.1110-----------------------------------------|" - - "L0.1124[1762,2000] 68ns |----------------------------------------L0.1124-----------------------------------------|" - - "L0.1138[1762,2000] 69ns |----------------------------------------L0.1138-----------------------------------------|" - - "L0.1152[1762,2000] 70ns |----------------------------------------L0.1152-----------------------------------------|" - - "L0.1166[1762,2000] 71ns |----------------------------------------L0.1166-----------------------------------------|" - - "L0.1180[1762,2000] 72ns |----------------------------------------L0.1180-----------------------------------------|" - - "L0.1194[1762,2000] 73ns |----------------------------------------L0.1194-----------------------------------------|" - - "L0.1208[1762,2000] 74ns |----------------------------------------L0.1208-----------------------------------------|" - - "L0.1222[1762,2000] 75ns |----------------------------------------L0.1222-----------------------------------------|" - - "L0.1236[1762,2000] 76ns |----------------------------------------L0.1236-----------------------------------------|" - - "L0.1250[1762,2000] 77ns |----------------------------------------L0.1250-----------------------------------------|" + - "L0.1286[802,961] 80ns |----------------------------------------L0.1286-----------------------------------------|" + - "L0.1300[802,961] 81ns |----------------------------------------L0.1300-----------------------------------------|" + - "L0.1314[802,961] 82ns |----------------------------------------L0.1314-----------------------------------------|" + - "L0.1328[802,961] 83ns |----------------------------------------L0.1328-----------------------------------------|" + - "L0.1342[802,961] 84ns |----------------------------------------L0.1342-----------------------------------------|" + - "L0.1356[802,961] 85ns |----------------------------------------L0.1356-----------------------------------------|" + - "L0.1370[802,961] 86ns |----------------------------------------L0.1370-----------------------------------------|" + - "L0.1384[802,961] 87ns |----------------------------------------L0.1384-----------------------------------------|" + - "L0.1398[802,961] 88ns |----------------------------------------L0.1398-----------------------------------------|" + - "L0.1412[802,961] 89ns |----------------------------------------L0.1412-----------------------------------------|" + - "L0.1426[802,961] 90ns |----------------------------------------L0.1426-----------------------------------------|" + - "L0.1440[802,961] 91ns |----------------------------------------L0.1440-----------------------------------------|" + - "L0.1454[802,961] 92ns |----------------------------------------L0.1454-----------------------------------------|" + - "L0.1468[802,961] 93ns |----------------------------------------L0.1468-----------------------------------------|" + - "L0.1482[802,961] 94ns |----------------------------------------L0.1482-----------------------------------------|" + - "L0.1496[802,961] 95ns |----------------------------------------L0.1496-----------------------------------------|" + - "L0.1510[802,961] 96ns |----------------------------------------L0.1510-----------------------------------------|" + - "L0.1524[802,961] 97ns |----------------------------------------L0.1524-----------------------------------------|" + - "L0.1538[802,961] 98ns |----------------------------------------L0.1538-----------------------------------------|" + - "L0.1552[802,961] 99ns |----------------------------------------L0.1552-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1762,2000] 77ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[802,961] 99ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.998, L0.1012, L0.1026, L0.1040, L0.1054, L0.1068, L0.1082, L0.1096, L0.1110, L0.1124, L0.1138, L0.1152, L0.1166, L0.1180, L0.1194, L0.1208, L0.1222, L0.1236, L0.1250, L0.2960" + - " Soft Deleting 20 files: L0.1286, L0.1300, L0.1314, L0.1328, L0.1342, L0.1356, L0.1370, L0.1384, L0.1398, L0.1412, L0.1426, L0.1440, L0.1454, L0.1468, L0.1482, L0.1496, L0.1510, L0.1524, L0.1538, L0.1552" - " Creating 1 files" - "**** Simulation run 254, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2961[2001,2086] 77ns |----------------------------------------L0.2961-----------------------------------------|" - - "L0.1265[2001,2086] 78ns |----------------------------------------L0.1265-----------------------------------------|" - - "L0.1279[2001,2086] 79ns |----------------------------------------L0.1279-----------------------------------------|" - - "L0.1293[2001,2086] 80ns |----------------------------------------L0.1293-----------------------------------------|" - - "L0.1307[2001,2086] 81ns |----------------------------------------L0.1307-----------------------------------------|" - - "L0.1321[2001,2086] 82ns |----------------------------------------L0.1321-----------------------------------------|" - - "L0.1335[2001,2086] 83ns |----------------------------------------L0.1335-----------------------------------------|" - - "L0.1349[2001,2086] 84ns |----------------------------------------L0.1349-----------------------------------------|" - - "L0.1363[2001,2086] 85ns |----------------------------------------L0.1363-----------------------------------------|" - - "L0.1377[2001,2086] 86ns |----------------------------------------L0.1377-----------------------------------------|" - - "L0.1391[2001,2086] 87ns |----------------------------------------L0.1391-----------------------------------------|" - - "L0.1405[2001,2086] 88ns |----------------------------------------L0.1405-----------------------------------------|" - - "L0.1419[2001,2086] 89ns |----------------------------------------L0.1419-----------------------------------------|" - - "L0.1433[2001,2086] 90ns |----------------------------------------L0.1433-----------------------------------------|" - - "L0.1447[2001,2086] 91ns |----------------------------------------L0.1447-----------------------------------------|" - - "L0.1461[2001,2086] 92ns |----------------------------------------L0.1461-----------------------------------------|" - - "L0.1475[2001,2086] 93ns |----------------------------------------L0.1475-----------------------------------------|" - - "L0.1489[2001,2086] 94ns |----------------------------------------L0.1489-----------------------------------------|" - - "L0.1503[2001,2086] 95ns |----------------------------------------L0.1503-----------------------------------------|" - - "L0.1517[2001,2086] 96ns |----------------------------------------L0.1517-----------------------------------------|" + - "L0.1566[802,961] 100ns |----------------------------------------L0.1566-----------------------------------------|" + - "L0.1580[802,961] 101ns |----------------------------------------L0.1580-----------------------------------------|" + - "L0.1594[802,961] 102ns |----------------------------------------L0.1594-----------------------------------------|" + - "L0.1608[802,961] 103ns |----------------------------------------L0.1608-----------------------------------------|" + - "L0.1622[802,961] 104ns |----------------------------------------L0.1622-----------------------------------------|" + - "L0.1636[802,961] 105ns |----------------------------------------L0.1636-----------------------------------------|" + - "L0.1650[802,961] 106ns |----------------------------------------L0.1650-----------------------------------------|" + - "L0.1664[802,961] 107ns |----------------------------------------L0.1664-----------------------------------------|" + - "L0.1678[802,961] 108ns |----------------------------------------L0.1678-----------------------------------------|" + - "L0.1692[802,961] 109ns |----------------------------------------L0.1692-----------------------------------------|" + - "L0.1706[802,961] 110ns |----------------------------------------L0.1706-----------------------------------------|" + - "L0.1720[802,961] 111ns |----------------------------------------L0.1720-----------------------------------------|" + - "L0.1734[802,961] 112ns |----------------------------------------L0.1734-----------------------------------------|" + - "L0.1748[802,961] 113ns |----------------------------------------L0.1748-----------------------------------------|" + - "L0.1762[802,961] 114ns |----------------------------------------L0.1762-----------------------------------------|" + - "L0.1776[802,961] 115ns |----------------------------------------L0.1776-----------------------------------------|" + - "L0.1790[802,961] 116ns |----------------------------------------L0.1790-----------------------------------------|" + - "L0.1804[802,961] 117ns |----------------------------------------L0.1804-----------------------------------------|" + - "L0.1818[802,961] 118ns |----------------------------------------L0.1818-----------------------------------------|" + - "L0.1832[802,961] 119ns |----------------------------------------L0.1832-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[2001,2086] 96ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1265, L0.1279, L0.1293, L0.1307, L0.1321, L0.1335, L0.1349, L0.1363, L0.1377, L0.1391, L0.1405, L0.1419, L0.1433, L0.1447, L0.1461, L0.1475, L0.1489, L0.1503, L0.1517, L0.2961" - - " Creating 1 files" - - "**** Simulation run 255, type=compact(ManySmallFiles). 20 Input Files, 770b total:" - - "L0 " - - "L0.2962[2087,770000] 77ns 580b|-------------------------------L0.2962--------------------------------| " - - "L0.1266[2087,780000] 78ns 10b|--------------------------------L0.1266--------------------------------| " - - "L0.1280[2087,790000] 79ns 10b|--------------------------------L0.1280---------------------------------| " - - "L0.1294[2087,800000] 80ns 10b|--------------------------------L0.1294---------------------------------| " - - "L0.1308[2087,810000] 81ns 10b|---------------------------------L0.1308---------------------------------| " - - "L0.1322[2087,820000] 82ns 10b|---------------------------------L0.1322----------------------------------| " - - "L0.1336[2087,830000] 83ns 10b|----------------------------------L0.1336----------------------------------| " - - "L0.1350[2087,840000] 84ns 10b|----------------------------------L0.1350-----------------------------------| " - - "L0.1364[2087,850000] 85ns 10b|-----------------------------------L0.1364-----------------------------------| " - - "L0.1378[2087,860000] 86ns 10b|-----------------------------------L0.1378------------------------------------| " - - "L0.1392[2087,870000] 87ns 10b|------------------------------------L0.1392------------------------------------| " - - "L0.1406[2087,880000] 88ns 10b|------------------------------------L0.1406-------------------------------------| " - - "L0.1420[2087,890000] 89ns 10b|-------------------------------------L0.1420-------------------------------------| " - - "L0.1434[2087,900000] 90ns 10b|-------------------------------------L0.1434--------------------------------------| " - - "L0.1448[2087,910000] 91ns 10b|--------------------------------------L0.1448--------------------------------------| " - - "L0.1462[2087,920000] 92ns 10b|--------------------------------------L0.1462---------------------------------------| " - - "L0.1476[2087,930000] 93ns 10b|---------------------------------------L0.1476---------------------------------------| " - - "L0.1490[2087,940000] 94ns 10b|---------------------------------------L0.1490----------------------------------------| " - - "L0.1504[2087,950000] 95ns 10b|----------------------------------------L0.1504----------------------------------------| " - - "L0.1518[2087,960000] 96ns 10b|----------------------------------------L0.1518-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 770b total:" - - "L0, all files 770b " - - "L0.?[2087,960000] 96ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1266, L0.1280, L0.1294, L0.1308, L0.1322, L0.1336, L0.1350, L0.1364, L0.1378, L0.1392, L0.1406, L0.1420, L0.1434, L0.1448, L0.1462, L0.1476, L0.1490, L0.1504, L0.1518, L0.2962" + - "L0.?[802,961] 119ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1566, L0.1580, L0.1594, L0.1608, L0.1622, L0.1636, L0.1650, L0.1664, L0.1678, L0.1692, L0.1706, L0.1720, L0.1734, L0.1748, L0.1762, L0.1776, L0.1790, L0.1804, L0.1818, L0.1832" - " Creating 1 files" - - "**** Simulation run 256, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 255, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2965[20,161] 77ns |----------------------------------------L0.2965-----------------------------------------|" - - "L0.1253[78,161] 78ns |---------------------L0.1253----------------------| " - - "L0.1267[79,161] 79ns |---------------------L0.1267----------------------| " - - "L0.1281[80,161] 80ns |---------------------L0.1281---------------------| " - - "L0.1295[81,161] 81ns |---------------------L0.1295---------------------| " - - "L0.1309[82,161] 82ns |--------------------L0.1309---------------------| " - - "L0.1323[83,161] 83ns |--------------------L0.1323--------------------| " - - "L0.1337[84,161] 84ns |--------------------L0.1337--------------------| " - - "L0.1351[85,161] 85ns |-------------------L0.1351--------------------| " - - "L0.1365[86,161] 86ns |-------------------L0.1365-------------------| " - - "L0.1379[87,161] 87ns |-------------------L0.1379-------------------| " - - "L0.1393[88,161] 88ns |------------------L0.1393-------------------| " - - "L0.1407[89,161] 89ns |------------------L0.1407------------------| " - - "L0.1421[90,161] 90ns |------------------L0.1421------------------| " - - "L0.1435[91,161] 91ns |-----------------L0.1435------------------| " - - "L0.1449[92,161] 92ns |-----------------L0.1449------------------| " - - "L0.1463[93,161] 93ns |-----------------L0.1463-----------------| " - - "L0.1477[94,161] 94ns |----------------L0.1477-----------------| " - - "L0.1491[95,161] 95ns |----------------L0.1491-----------------| " - - "L0.1505[96,161] 96ns |----------------L0.1505----------------| " + - "L0.1846[802,961] 120ns |----------------------------------------L0.1846-----------------------------------------|" + - "L0.1860[802,961] 121ns |----------------------------------------L0.1860-----------------------------------------|" + - "L0.1874[802,961] 122ns |----------------------------------------L0.1874-----------------------------------------|" + - "L0.1888[802,961] 123ns |----------------------------------------L0.1888-----------------------------------------|" + - "L0.1902[802,961] 124ns |----------------------------------------L0.1902-----------------------------------------|" + - "L0.1916[802,961] 125ns |----------------------------------------L0.1916-----------------------------------------|" + - "L0.1930[802,961] 126ns |----------------------------------------L0.1930-----------------------------------------|" + - "L0.1944[802,961] 127ns |----------------------------------------L0.1944-----------------------------------------|" + - "L0.1958[802,961] 128ns |----------------------------------------L0.1958-----------------------------------------|" + - "L0.1972[802,961] 129ns |----------------------------------------L0.1972-----------------------------------------|" + - "L0.1986[802,961] 130ns |----------------------------------------L0.1986-----------------------------------------|" + - "L0.2000[802,961] 131ns |----------------------------------------L0.2000-----------------------------------------|" + - "L0.2014[802,961] 132ns |----------------------------------------L0.2014-----------------------------------------|" + - "L0.2028[802,961] 133ns |----------------------------------------L0.2028-----------------------------------------|" + - "L0.2154[802,961] 134ns |----------------------------------------L0.2154-----------------------------------------|" + - "L0.2168[802,961] 135ns |----------------------------------------L0.2168-----------------------------------------|" + - "L0.2042[802,961] 136ns |----------------------------------------L0.2042-----------------------------------------|" + - "L0.2056[802,961] 137ns |----------------------------------------L0.2056-----------------------------------------|" + - "L0.2070[802,961] 138ns |----------------------------------------L0.2070-----------------------------------------|" + - "L0.2084[802,961] 139ns |----------------------------------------L0.2084-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[20,161] 96ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[802,961] 139ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1253, L0.1267, L0.1281, L0.1295, L0.1309, L0.1323, L0.1337, L0.1351, L0.1365, L0.1379, L0.1393, L0.1407, L0.1421, L0.1435, L0.1449, L0.1463, L0.1477, L0.1491, L0.1505, L0.2965" + - " Soft Deleting 20 files: L0.1846, L0.1860, L0.1874, L0.1888, L0.1902, L0.1916, L0.1930, L0.1944, L0.1958, L0.1972, L0.1986, L0.2000, L0.2014, L0.2028, L0.2042, L0.2056, L0.2070, L0.2084, L0.2154, L0.2168" - " Creating 1 files" - - "**** Simulation run 257, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 256, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2966[162,321] 77ns |----------------------------------------L0.2966-----------------------------------------|" - - "L0.1254[162,321] 78ns |----------------------------------------L0.1254-----------------------------------------|" - - "L0.1268[162,321] 79ns |----------------------------------------L0.1268-----------------------------------------|" - - "L0.1282[162,321] 80ns |----------------------------------------L0.1282-----------------------------------------|" - - "L0.1296[162,321] 81ns |----------------------------------------L0.1296-----------------------------------------|" - - "L0.1310[162,321] 82ns |----------------------------------------L0.1310-----------------------------------------|" - - "L0.1324[162,321] 83ns |----------------------------------------L0.1324-----------------------------------------|" - - "L0.1338[162,321] 84ns |----------------------------------------L0.1338-----------------------------------------|" - - "L0.1352[162,321] 85ns |----------------------------------------L0.1352-----------------------------------------|" - - "L0.1366[162,321] 86ns |----------------------------------------L0.1366-----------------------------------------|" - - "L0.1380[162,321] 87ns |----------------------------------------L0.1380-----------------------------------------|" - - "L0.1394[162,321] 88ns |----------------------------------------L0.1394-----------------------------------------|" - - "L0.1408[162,321] 89ns |----------------------------------------L0.1408-----------------------------------------|" - - "L0.1422[162,321] 90ns |----------------------------------------L0.1422-----------------------------------------|" - - "L0.1436[162,321] 91ns |----------------------------------------L0.1436-----------------------------------------|" - - "L0.1450[162,321] 92ns |----------------------------------------L0.1450-----------------------------------------|" - - "L0.1464[162,321] 93ns |----------------------------------------L0.1464-----------------------------------------|" - - "L0.1478[162,321] 94ns |----------------------------------------L0.1478-----------------------------------------|" - - "L0.1492[162,321] 95ns |----------------------------------------L0.1492-----------------------------------------|" - - "L0.1506[162,321] 96ns |----------------------------------------L0.1506-----------------------------------------|" + - "L0.2098[802,961] 140ns |----------------------------------------L0.2098-----------------------------------------|" + - "L0.2112[802,961] 141ns |----------------------------------------L0.2112-----------------------------------------|" + - "L0.2126[802,961] 142ns |----------------------------------------L0.2126-----------------------------------------|" + - "L0.2140[802,961] 143ns |----------------------------------------L0.2140-----------------------------------------|" + - "L0.2182[802,961] 144ns |----------------------------------------L0.2182-----------------------------------------|" + - "L0.2196[802,961] 145ns |----------------------------------------L0.2196-----------------------------------------|" + - "L0.2210[802,961] 146ns |----------------------------------------L0.2210-----------------------------------------|" + - "L0.2224[802,961] 147ns |----------------------------------------L0.2224-----------------------------------------|" + - "L0.2238[802,961] 148ns |----------------------------------------L0.2238-----------------------------------------|" + - "L0.2252[802,961] 149ns |----------------------------------------L0.2252-----------------------------------------|" + - "L0.2266[802,961] 150ns |----------------------------------------L0.2266-----------------------------------------|" + - "L0.2280[802,961] 151ns |----------------------------------------L0.2280-----------------------------------------|" + - "L0.2294[802,961] 152ns |----------------------------------------L0.2294-----------------------------------------|" + - "L0.2308[802,961] 153ns |----------------------------------------L0.2308-----------------------------------------|" + - "L0.2322[802,961] 154ns |----------------------------------------L0.2322-----------------------------------------|" + - "L0.2336[802,961] 155ns |----------------------------------------L0.2336-----------------------------------------|" + - "L0.2350[802,961] 156ns |----------------------------------------L0.2350-----------------------------------------|" + - "L0.2364[802,961] 157ns |----------------------------------------L0.2364-----------------------------------------|" + - "L0.2378[802,961] 158ns |----------------------------------------L0.2378-----------------------------------------|" + - "L0.2392[802,961] 159ns |----------------------------------------L0.2392-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[162,321] 96ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[802,961] 159ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1254, L0.1268, L0.1282, L0.1296, L0.1310, L0.1324, L0.1338, L0.1352, L0.1366, L0.1380, L0.1394, L0.1408, L0.1422, L0.1436, L0.1450, L0.1464, L0.1478, L0.1492, L0.1506, L0.2966" + - " Soft Deleting 20 files: L0.2098, L0.2112, L0.2126, L0.2140, L0.2182, L0.2196, L0.2210, L0.2224, L0.2238, L0.2252, L0.2266, L0.2280, L0.2294, L0.2308, L0.2322, L0.2336, L0.2350, L0.2364, L0.2378, L0.2392" - " Creating 1 files" - - "**** Simulation run 258, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 257, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2967[322,481] 77ns |----------------------------------------L0.2967-----------------------------------------|" - - "L0.1255[322,481] 78ns |----------------------------------------L0.1255-----------------------------------------|" - - "L0.1269[322,481] 79ns |----------------------------------------L0.1269-----------------------------------------|" - - "L0.1283[322,481] 80ns |----------------------------------------L0.1283-----------------------------------------|" - - "L0.1297[322,481] 81ns |----------------------------------------L0.1297-----------------------------------------|" - - "L0.1311[322,481] 82ns |----------------------------------------L0.1311-----------------------------------------|" - - "L0.1325[322,481] 83ns |----------------------------------------L0.1325-----------------------------------------|" - - "L0.1339[322,481] 84ns |----------------------------------------L0.1339-----------------------------------------|" - - "L0.1353[322,481] 85ns |----------------------------------------L0.1353-----------------------------------------|" - - "L0.1367[322,481] 86ns |----------------------------------------L0.1367-----------------------------------------|" - - "L0.1381[322,481] 87ns |----------------------------------------L0.1381-----------------------------------------|" - - "L0.1395[322,481] 88ns |----------------------------------------L0.1395-----------------------------------------|" - - "L0.1409[322,481] 89ns |----------------------------------------L0.1409-----------------------------------------|" - - "L0.1423[322,481] 90ns |----------------------------------------L0.1423-----------------------------------------|" - - "L0.1437[322,481] 91ns |----------------------------------------L0.1437-----------------------------------------|" - - "L0.1451[322,481] 92ns |----------------------------------------L0.1451-----------------------------------------|" - - "L0.1465[322,481] 93ns |----------------------------------------L0.1465-----------------------------------------|" - - "L0.1479[322,481] 94ns |----------------------------------------L0.1479-----------------------------------------|" - - "L0.1493[322,481] 95ns |----------------------------------------L0.1493-----------------------------------------|" - - "L0.1507[322,481] 96ns |----------------------------------------L0.1507-----------------------------------------|" + - "L0.2406[802,961] 160ns |----------------------------------------L0.2406-----------------------------------------|" + - "L0.2420[802,961] 161ns |----------------------------------------L0.2420-----------------------------------------|" + - "L0.2433[802,961] 162ns |----------------------------------------L0.2433-----------------------------------------|" + - "L0.2446[802,961] 163ns |----------------------------------------L0.2446-----------------------------------------|" + - "L0.2459[802,961] 164ns |----------------------------------------L0.2459-----------------------------------------|" + - "L0.2472[802,961] 165ns |----------------------------------------L0.2472-----------------------------------------|" + - "L0.2485[802,961] 166ns |----------------------------------------L0.2485-----------------------------------------|" + - "L0.2498[802,961] 167ns |----------------------------------------L0.2498-----------------------------------------|" + - "L0.2511[802,961] 168ns |----------------------------------------L0.2511-----------------------------------------|" + - "L0.2524[802,961] 169ns |----------------------------------------L0.2524-----------------------------------------|" + - "L0.2537[802,961] 170ns |----------------------------------------L0.2537-----------------------------------------|" + - "L0.2550[802,961] 171ns |----------------------------------------L0.2550-----------------------------------------|" + - "L0.2563[802,961] 172ns |----------------------------------------L0.2563-----------------------------------------|" + - "L0.2576[802,961] 173ns |----------------------------------------L0.2576-----------------------------------------|" + - "L0.2589[802,961] 174ns |----------------------------------------L0.2589-----------------------------------------|" + - "L0.2602[802,961] 175ns |----------------------------------------L0.2602-----------------------------------------|" + - "L0.2615[802,961] 176ns |----------------------------------------L0.2615-----------------------------------------|" + - "L0.2628[802,961] 177ns |----------------------------------------L0.2628-----------------------------------------|" + - "L0.2641[802,961] 178ns |----------------------------------------L0.2641-----------------------------------------|" + - "L0.2654[802,961] 179ns |----------------------------------------L0.2654-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[322,481] 96ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[802,961] 179ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1255, L0.1269, L0.1283, L0.1297, L0.1311, L0.1325, L0.1339, L0.1353, L0.1367, L0.1381, L0.1395, L0.1409, L0.1423, L0.1437, L0.1451, L0.1465, L0.1479, L0.1493, L0.1507, L0.2967" + - " Soft Deleting 20 files: L0.2406, L0.2420, L0.2433, L0.2446, L0.2459, L0.2472, L0.2485, L0.2498, L0.2511, L0.2524, L0.2537, L0.2550, L0.2563, L0.2576, L0.2589, L0.2602, L0.2615, L0.2628, L0.2641, L0.2654" - " Creating 1 files" - - "**** Simulation run 259, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 258, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2968[482,641] 77ns |----------------------------------------L0.2968-----------------------------------------|" - - "L0.1256[482,641] 78ns |----------------------------------------L0.1256-----------------------------------------|" - - "L0.1270[482,641] 79ns |----------------------------------------L0.1270-----------------------------------------|" - - "L0.1284[482,641] 80ns |----------------------------------------L0.1284-----------------------------------------|" - - "L0.1298[482,641] 81ns |----------------------------------------L0.1298-----------------------------------------|" - - "L0.1312[482,641] 82ns |----------------------------------------L0.1312-----------------------------------------|" - - "L0.1326[482,641] 83ns |----------------------------------------L0.1326-----------------------------------------|" - - "L0.1340[482,641] 84ns |----------------------------------------L0.1340-----------------------------------------|" - - "L0.1354[482,641] 85ns |----------------------------------------L0.1354-----------------------------------------|" - - "L0.1368[482,641] 86ns |----------------------------------------L0.1368-----------------------------------------|" - - "L0.1382[482,641] 87ns |----------------------------------------L0.1382-----------------------------------------|" - - "L0.1396[482,641] 88ns |----------------------------------------L0.1396-----------------------------------------|" - - "L0.1410[482,641] 89ns |----------------------------------------L0.1410-----------------------------------------|" - - "L0.1424[482,641] 90ns |----------------------------------------L0.1424-----------------------------------------|" - - "L0.1438[482,641] 91ns |----------------------------------------L0.1438-----------------------------------------|" - - "L0.1452[482,641] 92ns |----------------------------------------L0.1452-----------------------------------------|" - - "L0.1466[482,641] 93ns |----------------------------------------L0.1466-----------------------------------------|" - - "L0.1480[482,641] 94ns |----------------------------------------L0.1480-----------------------------------------|" - - "L0.1494[482,641] 95ns |----------------------------------------L0.1494-----------------------------------------|" - - "L0.1508[482,641] 96ns |----------------------------------------L0.1508-----------------------------------------|" + - "L0.2667[802,961] 180ns |----------------------------------------L0.2667-----------------------------------------|" + - "L0.2680[802,961] 181ns |----------------------------------------L0.2680-----------------------------------------|" + - "L0.2693[802,961] 182ns |----------------------------------------L0.2693-----------------------------------------|" + - "L0.2706[802,961] 183ns |----------------------------------------L0.2706-----------------------------------------|" + - "L0.2719[802,961] 184ns |----------------------------------------L0.2719-----------------------------------------|" + - "L0.2732[802,961] 185ns |----------------------------------------L0.2732-----------------------------------------|" + - "L0.2745[802,961] 186ns |----------------------------------------L0.2745-----------------------------------------|" + - "L0.2758[802,961] 187ns |----------------------------------------L0.2758-----------------------------------------|" + - "L0.2771[802,961] 188ns |----------------------------------------L0.2771-----------------------------------------|" + - "L0.2784[802,961] 189ns |----------------------------------------L0.2784-----------------------------------------|" + - "L0.2797[802,961] 190ns |----------------------------------------L0.2797-----------------------------------------|" + - "L0.2810[802,961] 191ns |----------------------------------------L0.2810-----------------------------------------|" + - "L0.2823[802,961] 192ns |----------------------------------------L0.2823-----------------------------------------|" + - "L0.2836[802,961] 193ns |----------------------------------------L0.2836-----------------------------------------|" + - "L0.2849[802,961] 194ns |----------------------------------------L0.2849-----------------------------------------|" + - "L0.2862[802,961] 195ns |----------------------------------------L0.2862-----------------------------------------|" + - "L0.2875[802,961] 196ns |----------------------------------------L0.2875-----------------------------------------|" + - "L0.2888[802,961] 197ns |----------------------------------------L0.2888-----------------------------------------|" + - "L0.2901[802,961] 198ns |----------------------------------------L0.2901-----------------------------------------|" + - "L0.2914[802,961] 199ns |----------------------------------------L0.2914-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[482,641] 96ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[802,961] 199ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.2667, L0.2680, L0.2693, L0.2706, L0.2719, L0.2732, L0.2745, L0.2758, L0.2771, L0.2784, L0.2797, L0.2810, L0.2823, L0.2836, L0.2849, L0.2862, L0.2875, L0.2888, L0.2901, L0.2914" + - " Creating 1 files" + - "**** Simulation run 259, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" + - "L0, all files 8mb " + - "L0.207[962,1121] 0ns |-----------------------------------------L0.207-----------------------------------------|" + - "L0.219[962,1121] 1ns |-----------------------------------------L0.219-----------------------------------------|" + - "L0.231[962,1121] 2ns |-----------------------------------------L0.231-----------------------------------------|" + - "L0.243[962,1121] 3ns |-----------------------------------------L0.243-----------------------------------------|" + - "L0.255[962,1121] 4ns |-----------------------------------------L0.255-----------------------------------------|" + - "L0.267[962,1121] 5ns |-----------------------------------------L0.267-----------------------------------------|" + - "L0.279[962,1121] 6ns |-----------------------------------------L0.279-----------------------------------------|" + - "L0.291[962,1121] 7ns |-----------------------------------------L0.291-----------------------------------------|" + - "L0.303[962,1121] 8ns |-----------------------------------------L0.303-----------------------------------------|" + - "L0.315[962,1121] 9ns |-----------------------------------------L0.315-----------------------------------------|" + - "L0.327[962,1121] 10ns |-----------------------------------------L0.327-----------------------------------------|" + - "L0.339[962,1121] 11ns |-----------------------------------------L0.339-----------------------------------------|" + - "L0.351[962,1121] 12ns |-----------------------------------------L0.351-----------------------------------------|" + - "L0.363[962,1121] 13ns |-----------------------------------------L0.363-----------------------------------------|" + - "L0.375[962,1121] 14ns |-----------------------------------------L0.375-----------------------------------------|" + - "L0.387[962,1121] 15ns |-----------------------------------------L0.387-----------------------------------------|" + - "L0.399[962,1121] 16ns |-----------------------------------------L0.399-----------------------------------------|" + - "L0.411[962,1121] 17ns |-----------------------------------------L0.411-----------------------------------------|" + - "L0.535[962,1121] 18ns |-----------------------------------------L0.535-----------------------------------------|" + - "L0.547[962,1121] 19ns |-----------------------------------------L0.547-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" + - "L0, all files 160mb " + - "L0.?[962,1121] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1256, L0.1270, L0.1284, L0.1298, L0.1312, L0.1326, L0.1340, L0.1354, L0.1368, L0.1382, L0.1396, L0.1410, L0.1424, L0.1438, L0.1452, L0.1466, L0.1480, L0.1494, L0.1508, L0.2968" + - " Soft Deleting 20 files: L0.207, L0.219, L0.231, L0.243, L0.255, L0.267, L0.279, L0.291, L0.303, L0.315, L0.327, L0.339, L0.351, L0.363, L0.375, L0.387, L0.399, L0.411, L0.535, L0.547" - " Creating 1 files" - "**** Simulation run 260, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2969[642,801] 77ns |----------------------------------------L0.2969-----------------------------------------|" - - "L0.1257[642,801] 78ns |----------------------------------------L0.1257-----------------------------------------|" - - "L0.1271[642,801] 79ns |----------------------------------------L0.1271-----------------------------------------|" - - "L0.1285[642,801] 80ns |----------------------------------------L0.1285-----------------------------------------|" - - "L0.1299[642,801] 81ns |----------------------------------------L0.1299-----------------------------------------|" - - "L0.1313[642,801] 82ns |----------------------------------------L0.1313-----------------------------------------|" - - "L0.1327[642,801] 83ns |----------------------------------------L0.1327-----------------------------------------|" - - "L0.1341[642,801] 84ns |----------------------------------------L0.1341-----------------------------------------|" - - "L0.1355[642,801] 85ns |----------------------------------------L0.1355-----------------------------------------|" - - "L0.1369[642,801] 86ns |----------------------------------------L0.1369-----------------------------------------|" - - "L0.1383[642,801] 87ns |----------------------------------------L0.1383-----------------------------------------|" - - "L0.1397[642,801] 88ns |----------------------------------------L0.1397-----------------------------------------|" - - "L0.1411[642,801] 89ns |----------------------------------------L0.1411-----------------------------------------|" - - "L0.1425[642,801] 90ns |----------------------------------------L0.1425-----------------------------------------|" - - "L0.1439[642,801] 91ns |----------------------------------------L0.1439-----------------------------------------|" - - "L0.1453[642,801] 92ns |----------------------------------------L0.1453-----------------------------------------|" - - "L0.1467[642,801] 93ns |----------------------------------------L0.1467-----------------------------------------|" - - "L0.1481[642,801] 94ns |----------------------------------------L0.1481-----------------------------------------|" - - "L0.1495[642,801] 95ns |----------------------------------------L0.1495-----------------------------------------|" - - "L0.1509[642,801] 96ns |----------------------------------------L0.1509-----------------------------------------|" + - "L0.423[962,1121] 20ns |-----------------------------------------L0.423-----------------------------------------|" + - "L0.437[962,1121] 21ns |-----------------------------------------L0.437-----------------------------------------|" + - "L0.451[962,1121] 22ns |-----------------------------------------L0.451-----------------------------------------|" + - "L0.465[962,1121] 23ns |-----------------------------------------L0.465-----------------------------------------|" + - "L0.479[962,1121] 24ns |-----------------------------------------L0.479-----------------------------------------|" + - "L0.493[962,1121] 25ns |-----------------------------------------L0.493-----------------------------------------|" + - "L0.507[962,1121] 26ns |-----------------------------------------L0.507-----------------------------------------|" + - "L0.521[962,1121] 27ns |-----------------------------------------L0.521-----------------------------------------|" + - "L0.559[962,1121] 28ns |-----------------------------------------L0.559-----------------------------------------|" + - "L0.573[962,1121] 29ns |-----------------------------------------L0.573-----------------------------------------|" + - "L0.587[962,1121] 30ns |-----------------------------------------L0.587-----------------------------------------|" + - "L0.601[962,1121] 31ns |-----------------------------------------L0.601-----------------------------------------|" + - "L0.615[962,1121] 32ns |-----------------------------------------L0.615-----------------------------------------|" + - "L0.629[962,1121] 33ns |-----------------------------------------L0.629-----------------------------------------|" + - "L0.643[962,1121] 34ns |-----------------------------------------L0.643-----------------------------------------|" + - "L0.657[962,1121] 35ns |-----------------------------------------L0.657-----------------------------------------|" + - "L0.671[962,1121] 36ns |-----------------------------------------L0.671-----------------------------------------|" + - "L0.685[962,1121] 37ns |-----------------------------------------L0.685-----------------------------------------|" + - "L0.699[962,1121] 38ns |-----------------------------------------L0.699-----------------------------------------|" + - "L0.713[962,1121] 39ns |-----------------------------------------L0.713-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[642,801] 96ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[962,1121] 39ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1257, L0.1271, L0.1285, L0.1299, L0.1313, L0.1327, L0.1341, L0.1355, L0.1369, L0.1383, L0.1397, L0.1411, L0.1425, L0.1439, L0.1453, L0.1467, L0.1481, L0.1495, L0.1509, L0.2969" + - " Soft Deleting 20 files: L0.423, L0.437, L0.451, L0.465, L0.479, L0.493, L0.507, L0.521, L0.559, L0.573, L0.587, L0.601, L0.615, L0.629, L0.643, L0.657, L0.671, L0.685, L0.699, L0.713" - " Creating 1 files" - "**** Simulation run 261, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2970[802,961] 77ns |----------------------------------------L0.2970-----------------------------------------|" - - "L0.1258[802,961] 78ns |----------------------------------------L0.1258-----------------------------------------|" - - "L0.1272[802,961] 79ns |----------------------------------------L0.1272-----------------------------------------|" - - "L0.1286[802,961] 80ns |----------------------------------------L0.1286-----------------------------------------|" - - "L0.1300[802,961] 81ns |----------------------------------------L0.1300-----------------------------------------|" - - "L0.1314[802,961] 82ns |----------------------------------------L0.1314-----------------------------------------|" - - "L0.1328[802,961] 83ns |----------------------------------------L0.1328-----------------------------------------|" - - "L0.1342[802,961] 84ns |----------------------------------------L0.1342-----------------------------------------|" - - "L0.1356[802,961] 85ns |----------------------------------------L0.1356-----------------------------------------|" - - "L0.1370[802,961] 86ns |----------------------------------------L0.1370-----------------------------------------|" - - "L0.1384[802,961] 87ns |----------------------------------------L0.1384-----------------------------------------|" - - "L0.1398[802,961] 88ns |----------------------------------------L0.1398-----------------------------------------|" - - "L0.1412[802,961] 89ns |----------------------------------------L0.1412-----------------------------------------|" - - "L0.1426[802,961] 90ns |----------------------------------------L0.1426-----------------------------------------|" - - "L0.1440[802,961] 91ns |----------------------------------------L0.1440-----------------------------------------|" - - "L0.1454[802,961] 92ns |----------------------------------------L0.1454-----------------------------------------|" - - "L0.1468[802,961] 93ns |----------------------------------------L0.1468-----------------------------------------|" - - "L0.1482[802,961] 94ns |----------------------------------------L0.1482-----------------------------------------|" - - "L0.1496[802,961] 95ns |----------------------------------------L0.1496-----------------------------------------|" - - "L0.1510[802,961] 96ns |----------------------------------------L0.1510-----------------------------------------|" + - "L0.727[962,1121] 40ns |-----------------------------------------L0.727-----------------------------------------|" + - "L0.741[962,1121] 41ns |-----------------------------------------L0.741-----------------------------------------|" + - "L0.755[962,1121] 42ns |-----------------------------------------L0.755-----------------------------------------|" + - "L0.769[962,1121] 43ns |-----------------------------------------L0.769-----------------------------------------|" + - "L0.783[962,1121] 44ns |-----------------------------------------L0.783-----------------------------------------|" + - "L0.797[962,1121] 45ns |-----------------------------------------L0.797-----------------------------------------|" + - "L0.811[962,1121] 46ns |-----------------------------------------L0.811-----------------------------------------|" + - "L0.825[962,1121] 47ns |-----------------------------------------L0.825-----------------------------------------|" + - "L0.839[962,1121] 48ns |-----------------------------------------L0.839-----------------------------------------|" + - "L0.853[962,1121] 49ns |-----------------------------------------L0.853-----------------------------------------|" + - "L0.867[962,1121] 50ns |-----------------------------------------L0.867-----------------------------------------|" + - "L0.881[962,1121] 51ns |-----------------------------------------L0.881-----------------------------------------|" + - "L0.895[962,1121] 52ns |-----------------------------------------L0.895-----------------------------------------|" + - "L0.909[962,1121] 53ns |-----------------------------------------L0.909-----------------------------------------|" + - "L0.923[962,1121] 54ns |-----------------------------------------L0.923-----------------------------------------|" + - "L0.937[962,1121] 55ns |-----------------------------------------L0.937-----------------------------------------|" + - "L0.951[962,1121] 56ns |-----------------------------------------L0.951-----------------------------------------|" + - "L0.965[962,1121] 57ns |-----------------------------------------L0.965-----------------------------------------|" + - "L0.979[962,1121] 58ns |-----------------------------------------L0.979-----------------------------------------|" + - "L0.993[962,1121] 59ns |-----------------------------------------L0.993-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[802,961] 96ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[962,1121] 59ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1258, L0.1272, L0.1286, L0.1300, L0.1314, L0.1328, L0.1342, L0.1356, L0.1370, L0.1384, L0.1398, L0.1412, L0.1426, L0.1440, L0.1454, L0.1468, L0.1482, L0.1496, L0.1510, L0.2970" + - " Soft Deleting 20 files: L0.727, L0.741, L0.755, L0.769, L0.783, L0.797, L0.811, L0.825, L0.839, L0.853, L0.867, L0.881, L0.895, L0.909, L0.923, L0.937, L0.951, L0.965, L0.979, L0.993" - " Creating 1 files" - "**** Simulation run 262, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2973[1282,1441] 77ns |----------------------------------------L0.2973-----------------------------------------|" - - "L0.1261[1282,1441] 78ns |----------------------------------------L0.1261-----------------------------------------|" - - "L0.1275[1282,1441] 79ns |----------------------------------------L0.1275-----------------------------------------|" - - "L0.1289[1282,1441] 80ns |----------------------------------------L0.1289-----------------------------------------|" - - "L0.1303[1282,1441] 81ns |----------------------------------------L0.1303-----------------------------------------|" - - "L0.1317[1282,1441] 82ns |----------------------------------------L0.1317-----------------------------------------|" - - "L0.1331[1282,1441] 83ns |----------------------------------------L0.1331-----------------------------------------|" - - "L0.1345[1282,1441] 84ns |----------------------------------------L0.1345-----------------------------------------|" - - "L0.1359[1282,1441] 85ns |----------------------------------------L0.1359-----------------------------------------|" - - "L0.1373[1282,1441] 86ns |----------------------------------------L0.1373-----------------------------------------|" - - "L0.1387[1282,1441] 87ns |----------------------------------------L0.1387-----------------------------------------|" - - "L0.1401[1282,1441] 88ns |----------------------------------------L0.1401-----------------------------------------|" - - "L0.1415[1282,1441] 89ns |----------------------------------------L0.1415-----------------------------------------|" - - "L0.1429[1282,1441] 90ns |----------------------------------------L0.1429-----------------------------------------|" - - "L0.1443[1282,1441] 91ns |----------------------------------------L0.1443-----------------------------------------|" - - "L0.1457[1282,1441] 92ns |----------------------------------------L0.1457-----------------------------------------|" - - "L0.1471[1282,1441] 93ns |----------------------------------------L0.1471-----------------------------------------|" - - "L0.1485[1282,1441] 94ns |----------------------------------------L0.1485-----------------------------------------|" - - "L0.1499[1282,1441] 95ns |----------------------------------------L0.1499-----------------------------------------|" - - "L0.1513[1282,1441] 96ns |----------------------------------------L0.1513-----------------------------------------|" + - "L0.1567[962,1121] 100ns |----------------------------------------L0.1567-----------------------------------------|" + - "L0.1581[962,1121] 101ns |----------------------------------------L0.1581-----------------------------------------|" + - "L0.1595[962,1121] 102ns |----------------------------------------L0.1595-----------------------------------------|" + - "L0.1609[962,1121] 103ns |----------------------------------------L0.1609-----------------------------------------|" + - "L0.1623[962,1121] 104ns |----------------------------------------L0.1623-----------------------------------------|" + - "L0.1637[962,1121] 105ns |----------------------------------------L0.1637-----------------------------------------|" + - "L0.1651[962,1121] 106ns |----------------------------------------L0.1651-----------------------------------------|" + - "L0.1665[962,1121] 107ns |----------------------------------------L0.1665-----------------------------------------|" + - "L0.1679[962,1121] 108ns |----------------------------------------L0.1679-----------------------------------------|" + - "L0.1693[962,1121] 109ns |----------------------------------------L0.1693-----------------------------------------|" + - "L0.1707[962,1121] 110ns |----------------------------------------L0.1707-----------------------------------------|" + - "L0.1721[962,1121] 111ns |----------------------------------------L0.1721-----------------------------------------|" + - "L0.1735[962,1121] 112ns |----------------------------------------L0.1735-----------------------------------------|" + - "L0.1749[962,1121] 113ns |----------------------------------------L0.1749-----------------------------------------|" + - "L0.1763[962,1121] 114ns |----------------------------------------L0.1763-----------------------------------------|" + - "L0.1777[962,1121] 115ns |----------------------------------------L0.1777-----------------------------------------|" + - "L0.1791[962,1121] 116ns |----------------------------------------L0.1791-----------------------------------------|" + - "L0.1805[962,1121] 117ns |----------------------------------------L0.1805-----------------------------------------|" + - "L0.1819[962,1121] 118ns |----------------------------------------L0.1819-----------------------------------------|" + - "L0.1833[962,1121] 119ns |----------------------------------------L0.1833-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1282,1441] 96ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[962,1121] 119ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1261, L0.1275, L0.1289, L0.1303, L0.1317, L0.1331, L0.1345, L0.1359, L0.1373, L0.1387, L0.1401, L0.1415, L0.1429, L0.1443, L0.1457, L0.1471, L0.1485, L0.1499, L0.1513, L0.2973" + - " Soft Deleting 20 files: L0.1567, L0.1581, L0.1595, L0.1609, L0.1623, L0.1637, L0.1651, L0.1665, L0.1679, L0.1693, L0.1707, L0.1721, L0.1735, L0.1749, L0.1763, L0.1777, L0.1791, L0.1805, L0.1819, L0.1833" - " Creating 1 files" - "**** Simulation run 263, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2974[1442,1601] 77ns |----------------------------------------L0.2974-----------------------------------------|" - - "L0.1262[1442,1601] 78ns |----------------------------------------L0.1262-----------------------------------------|" - - "L0.1276[1442,1601] 79ns |----------------------------------------L0.1276-----------------------------------------|" - - "L0.1290[1442,1601] 80ns |----------------------------------------L0.1290-----------------------------------------|" - - "L0.1304[1442,1601] 81ns |----------------------------------------L0.1304-----------------------------------------|" - - "L0.1318[1442,1601] 82ns |----------------------------------------L0.1318-----------------------------------------|" - - "L0.1332[1442,1601] 83ns |----------------------------------------L0.1332-----------------------------------------|" - - "L0.1346[1442,1601] 84ns |----------------------------------------L0.1346-----------------------------------------|" - - "L0.1360[1442,1601] 85ns |----------------------------------------L0.1360-----------------------------------------|" - - "L0.1374[1442,1601] 86ns |----------------------------------------L0.1374-----------------------------------------|" - - "L0.1388[1442,1601] 87ns |----------------------------------------L0.1388-----------------------------------------|" - - "L0.1402[1442,1601] 88ns |----------------------------------------L0.1402-----------------------------------------|" - - "L0.1416[1442,1601] 89ns |----------------------------------------L0.1416-----------------------------------------|" - - "L0.1430[1442,1601] 90ns |----------------------------------------L0.1430-----------------------------------------|" - - "L0.1444[1442,1601] 91ns |----------------------------------------L0.1444-----------------------------------------|" - - "L0.1458[1442,1601] 92ns |----------------------------------------L0.1458-----------------------------------------|" - - "L0.1472[1442,1601] 93ns |----------------------------------------L0.1472-----------------------------------------|" - - "L0.1486[1442,1601] 94ns |----------------------------------------L0.1486-----------------------------------------|" - - "L0.1500[1442,1601] 95ns |----------------------------------------L0.1500-----------------------------------------|" - - "L0.1514[1442,1601] 96ns |----------------------------------------L0.1514-----------------------------------------|" + - "L0.1847[962,1121] 120ns |----------------------------------------L0.1847-----------------------------------------|" + - "L0.1861[962,1121] 121ns |----------------------------------------L0.1861-----------------------------------------|" + - "L0.1875[962,1121] 122ns |----------------------------------------L0.1875-----------------------------------------|" + - "L0.1889[962,1121] 123ns |----------------------------------------L0.1889-----------------------------------------|" + - "L0.1903[962,1121] 124ns |----------------------------------------L0.1903-----------------------------------------|" + - "L0.1917[962,1121] 125ns |----------------------------------------L0.1917-----------------------------------------|" + - "L0.1931[962,1121] 126ns |----------------------------------------L0.1931-----------------------------------------|" + - "L0.1945[962,1121] 127ns |----------------------------------------L0.1945-----------------------------------------|" + - "L0.1959[962,1121] 128ns |----------------------------------------L0.1959-----------------------------------------|" + - "L0.1973[962,1121] 129ns |----------------------------------------L0.1973-----------------------------------------|" + - "L0.1987[962,1121] 130ns |----------------------------------------L0.1987-----------------------------------------|" + - "L0.2001[962,1121] 131ns |----------------------------------------L0.2001-----------------------------------------|" + - "L0.2015[962,1121] 132ns |----------------------------------------L0.2015-----------------------------------------|" + - "L0.2029[962,1121] 133ns |----------------------------------------L0.2029-----------------------------------------|" + - "L0.2155[962,1121] 134ns |----------------------------------------L0.2155-----------------------------------------|" + - "L0.2169[962,1121] 135ns |----------------------------------------L0.2169-----------------------------------------|" + - "L0.2043[962,1121] 136ns |----------------------------------------L0.2043-----------------------------------------|" + - "L0.2057[962,1121] 137ns |----------------------------------------L0.2057-----------------------------------------|" + - "L0.2071[962,1121] 138ns |----------------------------------------L0.2071-----------------------------------------|" + - "L0.2085[962,1121] 139ns |----------------------------------------L0.2085-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[962,1121] 139ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1847, L0.1861, L0.1875, L0.1889, L0.1903, L0.1917, L0.1931, L0.1945, L0.1959, L0.1973, L0.1987, L0.2001, L0.2015, L0.2029, L0.2043, L0.2057, L0.2071, L0.2085, L0.2155, L0.2169" + - " Creating 1 files" + - "**** Simulation run 264, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.2099[962,1121] 140ns |----------------------------------------L0.2099-----------------------------------------|" + - "L0.2113[962,1121] 141ns |----------------------------------------L0.2113-----------------------------------------|" + - "L0.2127[962,1121] 142ns |----------------------------------------L0.2127-----------------------------------------|" + - "L0.2141[962,1121] 143ns |----------------------------------------L0.2141-----------------------------------------|" + - "L0.2183[962,1121] 144ns |----------------------------------------L0.2183-----------------------------------------|" + - "L0.2197[962,1121] 145ns |----------------------------------------L0.2197-----------------------------------------|" + - "L0.2211[962,1121] 146ns |----------------------------------------L0.2211-----------------------------------------|" + - "L0.2225[962,1121] 147ns |----------------------------------------L0.2225-----------------------------------------|" + - "L0.2239[962,1121] 148ns |----------------------------------------L0.2239-----------------------------------------|" + - "L0.2253[962,1121] 149ns |----------------------------------------L0.2253-----------------------------------------|" + - "L0.2267[962,1121] 150ns |----------------------------------------L0.2267-----------------------------------------|" + - "L0.2281[962,1121] 151ns |----------------------------------------L0.2281-----------------------------------------|" + - "L0.2295[962,1121] 152ns |----------------------------------------L0.2295-----------------------------------------|" + - "L0.2309[962,1121] 153ns |----------------------------------------L0.2309-----------------------------------------|" + - "L0.2323[962,1121] 154ns |----------------------------------------L0.2323-----------------------------------------|" + - "L0.2337[962,1121] 155ns |----------------------------------------L0.2337-----------------------------------------|" + - "L0.2351[962,1121] 156ns |----------------------------------------L0.2351-----------------------------------------|" + - "L0.2365[962,1121] 157ns |----------------------------------------L0.2365-----------------------------------------|" + - "L0.2379[962,1121] 158ns |----------------------------------------L0.2379-----------------------------------------|" + - "L0.2393[962,1121] 159ns |----------------------------------------L0.2393-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1442,1601] 96ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[962,1121] 159ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1262, L0.1276, L0.1290, L0.1304, L0.1318, L0.1332, L0.1346, L0.1360, L0.1374, L0.1388, L0.1402, L0.1416, L0.1430, L0.1444, L0.1458, L0.1472, L0.1486, L0.1500, L0.1514, L0.2974" + - " Soft Deleting 20 files: L0.2099, L0.2113, L0.2127, L0.2141, L0.2183, L0.2197, L0.2211, L0.2225, L0.2239, L0.2253, L0.2267, L0.2281, L0.2295, L0.2309, L0.2323, L0.2337, L0.2351, L0.2365, L0.2379, L0.2393" - " Creating 1 files" - - "**** Simulation run 264, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 265, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2975[1602,1761] 77ns |----------------------------------------L0.2975-----------------------------------------|" - - "L0.1263[1602,1761] 78ns |----------------------------------------L0.1263-----------------------------------------|" - - "L0.1277[1602,1761] 79ns |----------------------------------------L0.1277-----------------------------------------|" - - "L0.1291[1602,1761] 80ns |----------------------------------------L0.1291-----------------------------------------|" - - "L0.1305[1602,1761] 81ns |----------------------------------------L0.1305-----------------------------------------|" - - "L0.1319[1602,1761] 82ns |----------------------------------------L0.1319-----------------------------------------|" - - "L0.1333[1602,1761] 83ns |----------------------------------------L0.1333-----------------------------------------|" - - "L0.1347[1602,1761] 84ns |----------------------------------------L0.1347-----------------------------------------|" - - "L0.1361[1602,1761] 85ns |----------------------------------------L0.1361-----------------------------------------|" - - "L0.1375[1602,1761] 86ns |----------------------------------------L0.1375-----------------------------------------|" - - "L0.1389[1602,1761] 87ns |----------------------------------------L0.1389-----------------------------------------|" - - "L0.1403[1602,1761] 88ns |----------------------------------------L0.1403-----------------------------------------|" - - "L0.1417[1602,1761] 89ns |----------------------------------------L0.1417-----------------------------------------|" - - "L0.1431[1602,1761] 90ns |----------------------------------------L0.1431-----------------------------------------|" - - "L0.1445[1602,1761] 91ns |----------------------------------------L0.1445-----------------------------------------|" - - "L0.1459[1602,1761] 92ns |----------------------------------------L0.1459-----------------------------------------|" - - "L0.1473[1602,1761] 93ns |----------------------------------------L0.1473-----------------------------------------|" - - "L0.1487[1602,1761] 94ns |----------------------------------------L0.1487-----------------------------------------|" - - "L0.1501[1602,1761] 95ns |----------------------------------------L0.1501-----------------------------------------|" - - "L0.1515[1602,1761] 96ns |----------------------------------------L0.1515-----------------------------------------|" + - "L0.2407[962,1121] 160ns |----------------------------------------L0.2407-----------------------------------------|" + - "L0.2421[962,1121] 161ns |----------------------------------------L0.2421-----------------------------------------|" + - "L0.2434[962,1121] 162ns |----------------------------------------L0.2434-----------------------------------------|" + - "L0.2447[962,1121] 163ns |----------------------------------------L0.2447-----------------------------------------|" + - "L0.2460[962,1121] 164ns |----------------------------------------L0.2460-----------------------------------------|" + - "L0.2473[962,1121] 165ns |----------------------------------------L0.2473-----------------------------------------|" + - "L0.2486[962,1121] 166ns |----------------------------------------L0.2486-----------------------------------------|" + - "L0.2499[962,1121] 167ns |----------------------------------------L0.2499-----------------------------------------|" + - "L0.2512[962,1121] 168ns |----------------------------------------L0.2512-----------------------------------------|" + - "L0.2525[962,1121] 169ns |----------------------------------------L0.2525-----------------------------------------|" + - "L0.2538[962,1121] 170ns |----------------------------------------L0.2538-----------------------------------------|" + - "L0.2551[962,1121] 171ns |----------------------------------------L0.2551-----------------------------------------|" + - "L0.2564[962,1121] 172ns |----------------------------------------L0.2564-----------------------------------------|" + - "L0.2577[962,1121] 173ns |----------------------------------------L0.2577-----------------------------------------|" + - "L0.2590[962,1121] 174ns |----------------------------------------L0.2590-----------------------------------------|" + - "L0.2603[962,1121] 175ns |----------------------------------------L0.2603-----------------------------------------|" + - "L0.2616[962,1121] 176ns |----------------------------------------L0.2616-----------------------------------------|" + - "L0.2629[962,1121] 177ns |----------------------------------------L0.2629-----------------------------------------|" + - "L0.2642[962,1121] 178ns |----------------------------------------L0.2642-----------------------------------------|" + - "L0.2655[962,1121] 179ns |----------------------------------------L0.2655-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1602,1761] 96ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[962,1121] 179ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1263, L0.1277, L0.1291, L0.1305, L0.1319, L0.1333, L0.1347, L0.1361, L0.1375, L0.1389, L0.1403, L0.1417, L0.1431, L0.1445, L0.1459, L0.1473, L0.1487, L0.1501, L0.1515, L0.2975" + - " Soft Deleting 20 files: L0.2407, L0.2421, L0.2434, L0.2447, L0.2460, L0.2473, L0.2486, L0.2499, L0.2512, L0.2525, L0.2538, L0.2551, L0.2564, L0.2577, L0.2590, L0.2603, L0.2616, L0.2629, L0.2642, L0.2655" - " Creating 1 files" - - "**** Simulation run 265, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 266, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2976[1762,2000] 77ns |----------------------------------------L0.2976-----------------------------------------|" - - "L0.1264[1762,2000] 78ns |----------------------------------------L0.1264-----------------------------------------|" - - "L0.1278[1762,2000] 79ns |----------------------------------------L0.1278-----------------------------------------|" - - "L0.1292[1762,2000] 80ns |----------------------------------------L0.1292-----------------------------------------|" - - "L0.1306[1762,2000] 81ns |----------------------------------------L0.1306-----------------------------------------|" - - "L0.1320[1762,2000] 82ns |----------------------------------------L0.1320-----------------------------------------|" - - "L0.1334[1762,2000] 83ns |----------------------------------------L0.1334-----------------------------------------|" - - "L0.1348[1762,2000] 84ns |----------------------------------------L0.1348-----------------------------------------|" - - "L0.1362[1762,2000] 85ns |----------------------------------------L0.1362-----------------------------------------|" - - "L0.1376[1762,2000] 86ns |----------------------------------------L0.1376-----------------------------------------|" - - "L0.1390[1762,2000] 87ns |----------------------------------------L0.1390-----------------------------------------|" - - "L0.1404[1762,2000] 88ns |----------------------------------------L0.1404-----------------------------------------|" - - "L0.1418[1762,2000] 89ns |----------------------------------------L0.1418-----------------------------------------|" - - "L0.1432[1762,2000] 90ns |----------------------------------------L0.1432-----------------------------------------|" - - "L0.1446[1762,2000] 91ns |----------------------------------------L0.1446-----------------------------------------|" - - "L0.1460[1762,2000] 92ns |----------------------------------------L0.1460-----------------------------------------|" - - "L0.1474[1762,2000] 93ns |----------------------------------------L0.1474-----------------------------------------|" - - "L0.1488[1762,2000] 94ns |----------------------------------------L0.1488-----------------------------------------|" - - "L0.1502[1762,2000] 95ns |----------------------------------------L0.1502-----------------------------------------|" - - "L0.1516[1762,2000] 96ns |----------------------------------------L0.1516-----------------------------------------|" + - "L0.2668[962,1121] 180ns |----------------------------------------L0.2668-----------------------------------------|" + - "L0.2681[962,1121] 181ns |----------------------------------------L0.2681-----------------------------------------|" + - "L0.2694[962,1121] 182ns |----------------------------------------L0.2694-----------------------------------------|" + - "L0.2707[962,1121] 183ns |----------------------------------------L0.2707-----------------------------------------|" + - "L0.2720[962,1121] 184ns |----------------------------------------L0.2720-----------------------------------------|" + - "L0.2733[962,1121] 185ns |----------------------------------------L0.2733-----------------------------------------|" + - "L0.2746[962,1121] 186ns |----------------------------------------L0.2746-----------------------------------------|" + - "L0.2759[962,1121] 187ns |----------------------------------------L0.2759-----------------------------------------|" + - "L0.2772[962,1121] 188ns |----------------------------------------L0.2772-----------------------------------------|" + - "L0.2785[962,1121] 189ns |----------------------------------------L0.2785-----------------------------------------|" + - "L0.2798[962,1121] 190ns |----------------------------------------L0.2798-----------------------------------------|" + - "L0.2811[962,1121] 191ns |----------------------------------------L0.2811-----------------------------------------|" + - "L0.2824[962,1121] 192ns |----------------------------------------L0.2824-----------------------------------------|" + - "L0.2837[962,1121] 193ns |----------------------------------------L0.2837-----------------------------------------|" + - "L0.2850[962,1121] 194ns |----------------------------------------L0.2850-----------------------------------------|" + - "L0.2863[962,1121] 195ns |----------------------------------------L0.2863-----------------------------------------|" + - "L0.2876[962,1121] 196ns |----------------------------------------L0.2876-----------------------------------------|" + - "L0.2889[962,1121] 197ns |----------------------------------------L0.2889-----------------------------------------|" + - "L0.2902[962,1121] 198ns |----------------------------------------L0.2902-----------------------------------------|" + - "L0.2915[962,1121] 199ns |----------------------------------------L0.2915-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1762,2000] 96ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[962,1121] 199ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1264, L0.1278, L0.1292, L0.1306, L0.1320, L0.1334, L0.1348, L0.1362, L0.1376, L0.1390, L0.1404, L0.1418, L0.1432, L0.1446, L0.1460, L0.1474, L0.1488, L0.1502, L0.1516, L0.2976" + - " Soft Deleting 20 files: L0.2668, L0.2681, L0.2694, L0.2707, L0.2720, L0.2733, L0.2746, L0.2759, L0.2772, L0.2785, L0.2798, L0.2811, L0.2824, L0.2837, L0.2850, L0.2863, L0.2876, L0.2889, L0.2902, L0.2915" - " Creating 1 files" - - "**** Simulation run 266, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 267, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2977[2001,2086] 96ns |----------------------------------------L0.2977-----------------------------------------|" - - "L0.1531[2001,2086] 97ns |----------------------------------------L0.1531-----------------------------------------|" - - "L0.1545[2001,2086] 98ns |----------------------------------------L0.1545-----------------------------------------|" - - "L0.1559[2001,2086] 99ns |----------------------------------------L0.1559-----------------------------------------|" - - "L0.1573[2001,2086] 100ns |----------------------------------------L0.1573-----------------------------------------|" - - "L0.1587[2001,2086] 101ns |----------------------------------------L0.1587-----------------------------------------|" - - "L0.1601[2001,2086] 102ns |----------------------------------------L0.1601-----------------------------------------|" - - "L0.1615[2001,2086] 103ns |----------------------------------------L0.1615-----------------------------------------|" - - "L0.1629[2001,2086] 104ns |----------------------------------------L0.1629-----------------------------------------|" - - "L0.1643[2001,2086] 105ns |----------------------------------------L0.1643-----------------------------------------|" - - "L0.1657[2001,2086] 106ns |----------------------------------------L0.1657-----------------------------------------|" - - "L0.1671[2001,2086] 107ns |----------------------------------------L0.1671-----------------------------------------|" - - "L0.1685[2001,2086] 108ns |----------------------------------------L0.1685-----------------------------------------|" - - "L0.1699[2001,2086] 109ns |----------------------------------------L0.1699-----------------------------------------|" - - "L0.1713[2001,2086] 110ns |----------------------------------------L0.1713-----------------------------------------|" - - "L0.1727[2001,2086] 111ns |----------------------------------------L0.1727-----------------------------------------|" - - "L0.1741[2001,2086] 112ns |----------------------------------------L0.1741-----------------------------------------|" - - "L0.1755[2001,2086] 113ns |----------------------------------------L0.1755-----------------------------------------|" - - "L0.1769[2001,2086] 114ns |----------------------------------------L0.1769-----------------------------------------|" - - "L0.1783[2001,2086] 115ns |----------------------------------------L0.1783-----------------------------------------|" + - "L0.1007[962,1121] 60ns |----------------------------------------L0.1007-----------------------------------------|" + - "L0.1021[962,1121] 61ns |----------------------------------------L0.1021-----------------------------------------|" + - "L0.1035[962,1121] 62ns |----------------------------------------L0.1035-----------------------------------------|" + - "L0.1049[962,1121] 63ns |----------------------------------------L0.1049-----------------------------------------|" + - "L0.1063[962,1121] 64ns |----------------------------------------L0.1063-----------------------------------------|" + - "L0.1077[962,1121] 65ns |----------------------------------------L0.1077-----------------------------------------|" + - "L0.1091[962,1121] 66ns |----------------------------------------L0.1091-----------------------------------------|" + - "L0.1105[962,1121] 67ns |----------------------------------------L0.1105-----------------------------------------|" + - "L0.1119[962,1121] 68ns |----------------------------------------L0.1119-----------------------------------------|" + - "L0.1133[962,1121] 69ns |----------------------------------------L0.1133-----------------------------------------|" + - "L0.1147[962,1121] 70ns |----------------------------------------L0.1147-----------------------------------------|" + - "L0.1161[962,1121] 71ns |----------------------------------------L0.1161-----------------------------------------|" + - "L0.1175[962,1121] 72ns |----------------------------------------L0.1175-----------------------------------------|" + - "L0.1189[962,1121] 73ns |----------------------------------------L0.1189-----------------------------------------|" + - "L0.1203[962,1121] 74ns |----------------------------------------L0.1203-----------------------------------------|" + - "L0.1217[962,1121] 75ns |----------------------------------------L0.1217-----------------------------------------|" + - "L0.1231[962,1121] 76ns |----------------------------------------L0.1231-----------------------------------------|" + - "L0.1245[962,1121] 77ns |----------------------------------------L0.1245-----------------------------------------|" + - "L0.1259[962,1121] 78ns |----------------------------------------L0.1259-----------------------------------------|" + - "L0.1273[962,1121] 79ns |----------------------------------------L0.1273-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[2001,2086] 115ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1531, L0.1545, L0.1559, L0.1573, L0.1587, L0.1601, L0.1615, L0.1629, L0.1643, L0.1657, L0.1671, L0.1685, L0.1699, L0.1713, L0.1727, L0.1741, L0.1755, L0.1769, L0.1783, L0.2977" - - " Creating 1 files" - - "**** Simulation run 267, type=compact(ManySmallFiles). 20 Input Files, 960b total:" - - "L0 " - - "L0.2978[2087,960000] 96ns 770b|---------------------------------L0.2978---------------------------------| " - - "L0.1532[2087,970000] 97ns 10b|---------------------------------L0.1532---------------------------------| " - - "L0.1546[2087,980000] 98ns 10b|---------------------------------L0.1546----------------------------------| " - - "L0.1560[2087,990000] 99ns 10b|----------------------------------L0.1560----------------------------------| " - - "L0.1574[2087,1000000] 100ns 10b|----------------------------------L0.1574-----------------------------------| " - - "L0.1588[2087,1010000] 101ns 10b|-----------------------------------L0.1588-----------------------------------| " - - "L0.1602[2087,1020000] 102ns 10b|-----------------------------------L0.1602-----------------------------------| " - - "L0.1616[2087,1030000] 103ns 10b|-----------------------------------L0.1616------------------------------------| " - - "L0.1630[2087,1040000] 104ns 10b|------------------------------------L0.1630------------------------------------| " - - "L0.1644[2087,1050000] 105ns 10b|------------------------------------L0.1644-------------------------------------| " - - "L0.1658[2087,1060000] 106ns 10b|------------------------------------L0.1658-------------------------------------| " - - "L0.1672[2087,1070000] 107ns 10b|-------------------------------------L0.1672-------------------------------------| " - - "L0.1686[2087,1080000] 108ns 10b|-------------------------------------L0.1686--------------------------------------| " - - "L0.1700[2087,1090000] 109ns 10b|--------------------------------------L0.1700--------------------------------------| " - - "L0.1714[2087,1100000] 110ns 10b|--------------------------------------L0.1714---------------------------------------| " - - "L0.1728[2087,1110000] 111ns 10b|--------------------------------------L0.1728---------------------------------------| " - - "L0.1742[2087,1120000] 112ns 10b|---------------------------------------L0.1742---------------------------------------| " - - "L0.1756[2087,1130000] 113ns 10b|---------------------------------------L0.1756----------------------------------------| " - - "L0.1770[2087,1140000] 114ns 10b|----------------------------------------L0.1770----------------------------------------| " - - "L0.1784[2087,1150000] 115ns 10b|----------------------------------------L0.1784-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 960b total:" - - "L0, all files 960b " - - "L0.?[2087,1150000] 115ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1532, L0.1546, L0.1560, L0.1574, L0.1588, L0.1602, L0.1616, L0.1630, L0.1644, L0.1658, L0.1672, L0.1686, L0.1700, L0.1714, L0.1728, L0.1742, L0.1756, L0.1770, L0.1784, L0.2978" + - "L0.?[962,1121] 79ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1007, L0.1021, L0.1035, L0.1049, L0.1063, L0.1077, L0.1091, L0.1105, L0.1119, L0.1133, L0.1147, L0.1161, L0.1175, L0.1189, L0.1203, L0.1217, L0.1231, L0.1245, L0.1259, L0.1273" - " Creating 1 files" - "**** Simulation run 268, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2971[962,1121] 77ns |----------------------------------------L0.2971-----------------------------------------|" - - "L0.1259[962,1121] 78ns |----------------------------------------L0.1259-----------------------------------------|" - - "L0.1273[962,1121] 79ns |----------------------------------------L0.1273-----------------------------------------|" - "L0.1287[962,1121] 80ns |----------------------------------------L0.1287-----------------------------------------|" - "L0.1301[962,1121] 81ns |----------------------------------------L0.1301-----------------------------------------|" - "L0.1315[962,1121] 82ns |----------------------------------------L0.1315-----------------------------------------|" @@ -7382,242 +7361,157 @@ async fn stuck_l0_large_l0s() { - "L0.1483[962,1121] 94ns |----------------------------------------L0.1483-----------------------------------------|" - "L0.1497[962,1121] 95ns |----------------------------------------L0.1497-----------------------------------------|" - "L0.1511[962,1121] 96ns |----------------------------------------L0.1511-----------------------------------------|" + - "L0.1525[962,1121] 97ns |----------------------------------------L0.1525-----------------------------------------|" + - "L0.1539[962,1121] 98ns |----------------------------------------L0.1539-----------------------------------------|" + - "L0.1553[962,1121] 99ns |----------------------------------------L0.1553-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[962,1121] 96ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[962,1121] 99ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1259, L0.1273, L0.1287, L0.1301, L0.1315, L0.1329, L0.1343, L0.1357, L0.1371, L0.1385, L0.1399, L0.1413, L0.1427, L0.1441, L0.1455, L0.1469, L0.1483, L0.1497, L0.1511, L0.2971" + - " Soft Deleting 20 files: L0.1287, L0.1301, L0.1315, L0.1329, L0.1343, L0.1357, L0.1371, L0.1385, L0.1399, L0.1413, L0.1427, L0.1441, L0.1455, L0.1469, L0.1483, L0.1497, L0.1511, L0.1525, L0.1539, L0.1553" - " Creating 1 files" - - "**** Simulation run 269, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.2972[1122,1281] 77ns |----------------------------------------L0.2972-----------------------------------------|" - - "L0.1260[1122,1281] 78ns |----------------------------------------L0.1260-----------------------------------------|" - - "L0.1274[1122,1281] 79ns |----------------------------------------L0.1274-----------------------------------------|" - - "L0.1288[1122,1281] 80ns |----------------------------------------L0.1288-----------------------------------------|" - - "L0.1302[1122,1281] 81ns |----------------------------------------L0.1302-----------------------------------------|" - - "L0.1316[1122,1281] 82ns |----------------------------------------L0.1316-----------------------------------------|" - - "L0.1330[1122,1281] 83ns |----------------------------------------L0.1330-----------------------------------------|" - - "L0.1344[1122,1281] 84ns |----------------------------------------L0.1344-----------------------------------------|" - - "L0.1358[1122,1281] 85ns |----------------------------------------L0.1358-----------------------------------------|" - - "L0.1372[1122,1281] 86ns |----------------------------------------L0.1372-----------------------------------------|" - - "L0.1386[1122,1281] 87ns |----------------------------------------L0.1386-----------------------------------------|" - - "L0.1400[1122,1281] 88ns |----------------------------------------L0.1400-----------------------------------------|" - - "L0.1414[1122,1281] 89ns |----------------------------------------L0.1414-----------------------------------------|" - - "L0.1428[1122,1281] 90ns |----------------------------------------L0.1428-----------------------------------------|" - - "L0.1442[1122,1281] 91ns |----------------------------------------L0.1442-----------------------------------------|" - - "L0.1456[1122,1281] 92ns |----------------------------------------L0.1456-----------------------------------------|" - - "L0.1470[1122,1281] 93ns |----------------------------------------L0.1470-----------------------------------------|" - - "L0.1484[1122,1281] 94ns |----------------------------------------L0.1484-----------------------------------------|" - - "L0.1498[1122,1281] 95ns |----------------------------------------L0.1498-----------------------------------------|" - - "L0.1512[1122,1281] 96ns |----------------------------------------L0.1512-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[1122,1281] 96ns |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 269, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" + - "L0, all files 8mb " + - "L0.208[1122,1281] 0ns |-----------------------------------------L0.208-----------------------------------------|" + - "L0.220[1122,1281] 1ns |-----------------------------------------L0.220-----------------------------------------|" + - "L0.232[1122,1281] 2ns |-----------------------------------------L0.232-----------------------------------------|" + - "L0.244[1122,1281] 3ns |-----------------------------------------L0.244-----------------------------------------|" + - "L0.256[1122,1281] 4ns |-----------------------------------------L0.256-----------------------------------------|" + - "L0.268[1122,1281] 5ns |-----------------------------------------L0.268-----------------------------------------|" + - "L0.280[1122,1281] 6ns |-----------------------------------------L0.280-----------------------------------------|" + - "L0.292[1122,1281] 7ns |-----------------------------------------L0.292-----------------------------------------|" + - "L0.304[1122,1281] 8ns |-----------------------------------------L0.304-----------------------------------------|" + - "L0.316[1122,1281] 9ns |-----------------------------------------L0.316-----------------------------------------|" + - "L0.328[1122,1281] 10ns |-----------------------------------------L0.328-----------------------------------------|" + - "L0.340[1122,1281] 11ns |-----------------------------------------L0.340-----------------------------------------|" + - "L0.352[1122,1281] 12ns |-----------------------------------------L0.352-----------------------------------------|" + - "L0.364[1122,1281] 13ns |-----------------------------------------L0.364-----------------------------------------|" + - "L0.376[1122,1281] 14ns |-----------------------------------------L0.376-----------------------------------------|" + - "L0.388[1122,1281] 15ns |-----------------------------------------L0.388-----------------------------------------|" + - "L0.400[1122,1281] 16ns |-----------------------------------------L0.400-----------------------------------------|" + - "L0.412[1122,1281] 17ns |-----------------------------------------L0.412-----------------------------------------|" + - "L0.536[1122,1281] 18ns |-----------------------------------------L0.536-----------------------------------------|" + - "L0.548[1122,1281] 19ns |-----------------------------------------L0.548-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" + - "L0, all files 160mb " + - "L0.?[1122,1281] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1260, L0.1274, L0.1288, L0.1302, L0.1316, L0.1330, L0.1344, L0.1358, L0.1372, L0.1386, L0.1400, L0.1414, L0.1428, L0.1442, L0.1456, L0.1470, L0.1484, L0.1498, L0.1512, L0.2972" + - " Soft Deleting 20 files: L0.208, L0.220, L0.232, L0.244, L0.256, L0.268, L0.280, L0.292, L0.304, L0.316, L0.328, L0.340, L0.352, L0.364, L0.376, L0.388, L0.400, L0.412, L0.536, L0.548" - " Creating 1 files" - "**** Simulation run 270, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2979[20,161] 96ns |----------------------------------------L0.2979-----------------------------------------|" - - "L0.1519[97,161] 97ns |---------------L0.1519----------------| " - - "L0.1533[98,161] 98ns |---------------L0.1533----------------| " - - "L0.1547[99,161] 99ns |---------------L0.1547---------------| " - - "L0.1561[100,161] 100ns |--------------L0.1561---------------| " - - "L0.1575[101,161] 101ns |--------------L0.1575---------------| " - - "L0.1589[102,161] 102ns |--------------L0.1589--------------| " - - "L0.1603[103,161] 103ns |--------------L0.1603--------------| " - - "L0.1617[104,161] 104ns |-------------L0.1617--------------| " - - "L0.1631[105,161] 105ns |-------------L0.1631-------------| " - - "L0.1645[106,161] 106ns |-------------L0.1645-------------| " - - "L0.1659[107,161] 107ns |------------L0.1659-------------| " - - "L0.1673[108,161] 108ns |------------L0.1673------------| " - - "L0.1687[109,161] 109ns |------------L0.1687------------| " - - "L0.1701[110,161] 110ns |-----------L0.1701------------| " - - "L0.1715[111,161] 111ns |-----------L0.1715-----------| " - - "L0.1729[112,161] 112ns |-----------L0.1729-----------| " - - "L0.1743[113,161] 113ns |----------L0.1743-----------| " - - "L0.1757[114,161] 114ns |----------L0.1757-----------|" - - "L0.1771[115,161] 115ns |----------L0.1771----------| " - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[20,161] 115ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1519, L0.1533, L0.1547, L0.1561, L0.1575, L0.1589, L0.1603, L0.1617, L0.1631, L0.1645, L0.1659, L0.1673, L0.1687, L0.1701, L0.1715, L0.1729, L0.1743, L0.1757, L0.1771, L0.2979" - - " Creating 1 files" - - "**** Simulation run 271, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.2980[162,321] 96ns |----------------------------------------L0.2980-----------------------------------------|" - - "L0.1520[162,321] 97ns |----------------------------------------L0.1520-----------------------------------------|" - - "L0.1534[162,321] 98ns |----------------------------------------L0.1534-----------------------------------------|" - - "L0.1548[162,321] 99ns |----------------------------------------L0.1548-----------------------------------------|" - - "L0.1562[162,321] 100ns |----------------------------------------L0.1562-----------------------------------------|" - - "L0.1576[162,321] 101ns |----------------------------------------L0.1576-----------------------------------------|" - - "L0.1590[162,321] 102ns |----------------------------------------L0.1590-----------------------------------------|" - - "L0.1604[162,321] 103ns |----------------------------------------L0.1604-----------------------------------------|" - - "L0.1618[162,321] 104ns |----------------------------------------L0.1618-----------------------------------------|" - - "L0.1632[162,321] 105ns |----------------------------------------L0.1632-----------------------------------------|" - - "L0.1646[162,321] 106ns |----------------------------------------L0.1646-----------------------------------------|" - - "L0.1660[162,321] 107ns |----------------------------------------L0.1660-----------------------------------------|" - - "L0.1674[162,321] 108ns |----------------------------------------L0.1674-----------------------------------------|" - - "L0.1688[162,321] 109ns |----------------------------------------L0.1688-----------------------------------------|" - - "L0.1702[162,321] 110ns |----------------------------------------L0.1702-----------------------------------------|" - - "L0.1716[162,321] 111ns |----------------------------------------L0.1716-----------------------------------------|" - - "L0.1730[162,321] 112ns |----------------------------------------L0.1730-----------------------------------------|" - - "L0.1744[162,321] 113ns |----------------------------------------L0.1744-----------------------------------------|" - - "L0.1758[162,321] 114ns |----------------------------------------L0.1758-----------------------------------------|" - - "L0.1772[162,321] 115ns |----------------------------------------L0.1772-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[162,321] 115ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1520, L0.1534, L0.1548, L0.1562, L0.1576, L0.1590, L0.1604, L0.1618, L0.1632, L0.1646, L0.1660, L0.1674, L0.1688, L0.1702, L0.1716, L0.1730, L0.1744, L0.1758, L0.1772, L0.2980" - - " Creating 1 files" - - "**** Simulation run 272, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.2981[322,481] 96ns |----------------------------------------L0.2981-----------------------------------------|" - - "L0.1521[322,481] 97ns |----------------------------------------L0.1521-----------------------------------------|" - - "L0.1535[322,481] 98ns |----------------------------------------L0.1535-----------------------------------------|" - - "L0.1549[322,481] 99ns |----------------------------------------L0.1549-----------------------------------------|" - - "L0.1563[322,481] 100ns |----------------------------------------L0.1563-----------------------------------------|" - - "L0.1577[322,481] 101ns |----------------------------------------L0.1577-----------------------------------------|" - - "L0.1591[322,481] 102ns |----------------------------------------L0.1591-----------------------------------------|" - - "L0.1605[322,481] 103ns |----------------------------------------L0.1605-----------------------------------------|" - - "L0.1619[322,481] 104ns |----------------------------------------L0.1619-----------------------------------------|" - - "L0.1633[322,481] 105ns |----------------------------------------L0.1633-----------------------------------------|" - - "L0.1647[322,481] 106ns |----------------------------------------L0.1647-----------------------------------------|" - - "L0.1661[322,481] 107ns |----------------------------------------L0.1661-----------------------------------------|" - - "L0.1675[322,481] 108ns |----------------------------------------L0.1675-----------------------------------------|" - - "L0.1689[322,481] 109ns |----------------------------------------L0.1689-----------------------------------------|" - - "L0.1703[322,481] 110ns |----------------------------------------L0.1703-----------------------------------------|" - - "L0.1717[322,481] 111ns |----------------------------------------L0.1717-----------------------------------------|" - - "L0.1731[322,481] 112ns |----------------------------------------L0.1731-----------------------------------------|" - - "L0.1745[322,481] 113ns |----------------------------------------L0.1745-----------------------------------------|" - - "L0.1759[322,481] 114ns |----------------------------------------L0.1759-----------------------------------------|" - - "L0.1773[322,481] 115ns |----------------------------------------L0.1773-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[322,481] 115ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1521, L0.1535, L0.1549, L0.1563, L0.1577, L0.1591, L0.1605, L0.1619, L0.1633, L0.1647, L0.1661, L0.1675, L0.1689, L0.1703, L0.1717, L0.1731, L0.1745, L0.1759, L0.1773, L0.2981" - - " Creating 1 files" - - "**** Simulation run 273, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.2982[482,641] 96ns |----------------------------------------L0.2982-----------------------------------------|" - - "L0.1522[482,641] 97ns |----------------------------------------L0.1522-----------------------------------------|" - - "L0.1536[482,641] 98ns |----------------------------------------L0.1536-----------------------------------------|" - - "L0.1550[482,641] 99ns |----------------------------------------L0.1550-----------------------------------------|" - - "L0.1564[482,641] 100ns |----------------------------------------L0.1564-----------------------------------------|" - - "L0.1578[482,641] 101ns |----------------------------------------L0.1578-----------------------------------------|" - - "L0.1592[482,641] 102ns |----------------------------------------L0.1592-----------------------------------------|" - - "L0.1606[482,641] 103ns |----------------------------------------L0.1606-----------------------------------------|" - - "L0.1620[482,641] 104ns |----------------------------------------L0.1620-----------------------------------------|" - - "L0.1634[482,641] 105ns |----------------------------------------L0.1634-----------------------------------------|" - - "L0.1648[482,641] 106ns |----------------------------------------L0.1648-----------------------------------------|" - - "L0.1662[482,641] 107ns |----------------------------------------L0.1662-----------------------------------------|" - - "L0.1676[482,641] 108ns |----------------------------------------L0.1676-----------------------------------------|" - - "L0.1690[482,641] 109ns |----------------------------------------L0.1690-----------------------------------------|" - - "L0.1704[482,641] 110ns |----------------------------------------L0.1704-----------------------------------------|" - - "L0.1718[482,641] 111ns |----------------------------------------L0.1718-----------------------------------------|" - - "L0.1732[482,641] 112ns |----------------------------------------L0.1732-----------------------------------------|" - - "L0.1746[482,641] 113ns |----------------------------------------L0.1746-----------------------------------------|" - - "L0.1760[482,641] 114ns |----------------------------------------L0.1760-----------------------------------------|" - - "L0.1774[482,641] 115ns |----------------------------------------L0.1774-----------------------------------------|" + - "L0.424[1122,1281] 20ns |-----------------------------------------L0.424-----------------------------------------|" + - "L0.438[1122,1281] 21ns |-----------------------------------------L0.438-----------------------------------------|" + - "L0.452[1122,1281] 22ns |-----------------------------------------L0.452-----------------------------------------|" + - "L0.466[1122,1281] 23ns |-----------------------------------------L0.466-----------------------------------------|" + - "L0.480[1122,1281] 24ns |-----------------------------------------L0.480-----------------------------------------|" + - "L0.494[1122,1281] 25ns |-----------------------------------------L0.494-----------------------------------------|" + - "L0.508[1122,1281] 26ns |-----------------------------------------L0.508-----------------------------------------|" + - "L0.522[1122,1281] 27ns |-----------------------------------------L0.522-----------------------------------------|" + - "L0.560[1122,1281] 28ns |-----------------------------------------L0.560-----------------------------------------|" + - "L0.574[1122,1281] 29ns |-----------------------------------------L0.574-----------------------------------------|" + - "L0.588[1122,1281] 30ns |-----------------------------------------L0.588-----------------------------------------|" + - "L0.602[1122,1281] 31ns |-----------------------------------------L0.602-----------------------------------------|" + - "L0.616[1122,1281] 32ns |-----------------------------------------L0.616-----------------------------------------|" + - "L0.630[1122,1281] 33ns |-----------------------------------------L0.630-----------------------------------------|" + - "L0.644[1122,1281] 34ns |-----------------------------------------L0.644-----------------------------------------|" + - "L0.658[1122,1281] 35ns |-----------------------------------------L0.658-----------------------------------------|" + - "L0.672[1122,1281] 36ns |-----------------------------------------L0.672-----------------------------------------|" + - "L0.686[1122,1281] 37ns |-----------------------------------------L0.686-----------------------------------------|" + - "L0.700[1122,1281] 38ns |-----------------------------------------L0.700-----------------------------------------|" + - "L0.714[1122,1281] 39ns |-----------------------------------------L0.714-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[482,641] 115ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1122,1281] 39ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1522, L0.1536, L0.1550, L0.1564, L0.1578, L0.1592, L0.1606, L0.1620, L0.1634, L0.1648, L0.1662, L0.1676, L0.1690, L0.1704, L0.1718, L0.1732, L0.1746, L0.1760, L0.1774, L0.2982" + - " Soft Deleting 20 files: L0.424, L0.438, L0.452, L0.466, L0.480, L0.494, L0.508, L0.522, L0.560, L0.574, L0.588, L0.602, L0.616, L0.630, L0.644, L0.658, L0.672, L0.686, L0.700, L0.714" - " Creating 1 files" - - "**** Simulation run 274, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 271, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2983[642,801] 96ns |----------------------------------------L0.2983-----------------------------------------|" - - "L0.1523[642,801] 97ns |----------------------------------------L0.1523-----------------------------------------|" - - "L0.1537[642,801] 98ns |----------------------------------------L0.1537-----------------------------------------|" - - "L0.1551[642,801] 99ns |----------------------------------------L0.1551-----------------------------------------|" - - "L0.1565[642,801] 100ns |----------------------------------------L0.1565-----------------------------------------|" - - "L0.1579[642,801] 101ns |----------------------------------------L0.1579-----------------------------------------|" - - "L0.1593[642,801] 102ns |----------------------------------------L0.1593-----------------------------------------|" - - "L0.1607[642,801] 103ns |----------------------------------------L0.1607-----------------------------------------|" - - "L0.1621[642,801] 104ns |----------------------------------------L0.1621-----------------------------------------|" - - "L0.1635[642,801] 105ns |----------------------------------------L0.1635-----------------------------------------|" - - "L0.1649[642,801] 106ns |----------------------------------------L0.1649-----------------------------------------|" - - "L0.1663[642,801] 107ns |----------------------------------------L0.1663-----------------------------------------|" - - "L0.1677[642,801] 108ns |----------------------------------------L0.1677-----------------------------------------|" - - "L0.1691[642,801] 109ns |----------------------------------------L0.1691-----------------------------------------|" - - "L0.1705[642,801] 110ns |----------------------------------------L0.1705-----------------------------------------|" - - "L0.1719[642,801] 111ns |----------------------------------------L0.1719-----------------------------------------|" - - "L0.1733[642,801] 112ns |----------------------------------------L0.1733-----------------------------------------|" - - "L0.1747[642,801] 113ns |----------------------------------------L0.1747-----------------------------------------|" - - "L0.1761[642,801] 114ns |----------------------------------------L0.1761-----------------------------------------|" - - "L0.1775[642,801] 115ns |----------------------------------------L0.1775-----------------------------------------|" + - "L0.728[1122,1281] 40ns |-----------------------------------------L0.728-----------------------------------------|" + - "L0.742[1122,1281] 41ns |-----------------------------------------L0.742-----------------------------------------|" + - "L0.756[1122,1281] 42ns |-----------------------------------------L0.756-----------------------------------------|" + - "L0.770[1122,1281] 43ns |-----------------------------------------L0.770-----------------------------------------|" + - "L0.784[1122,1281] 44ns |-----------------------------------------L0.784-----------------------------------------|" + - "L0.798[1122,1281] 45ns |-----------------------------------------L0.798-----------------------------------------|" + - "L0.812[1122,1281] 46ns |-----------------------------------------L0.812-----------------------------------------|" + - "L0.826[1122,1281] 47ns |-----------------------------------------L0.826-----------------------------------------|" + - "L0.840[1122,1281] 48ns |-----------------------------------------L0.840-----------------------------------------|" + - "L0.854[1122,1281] 49ns |-----------------------------------------L0.854-----------------------------------------|" + - "L0.868[1122,1281] 50ns |-----------------------------------------L0.868-----------------------------------------|" + - "L0.882[1122,1281] 51ns |-----------------------------------------L0.882-----------------------------------------|" + - "L0.896[1122,1281] 52ns |-----------------------------------------L0.896-----------------------------------------|" + - "L0.910[1122,1281] 53ns |-----------------------------------------L0.910-----------------------------------------|" + - "L0.924[1122,1281] 54ns |-----------------------------------------L0.924-----------------------------------------|" + - "L0.938[1122,1281] 55ns |-----------------------------------------L0.938-----------------------------------------|" + - "L0.952[1122,1281] 56ns |-----------------------------------------L0.952-----------------------------------------|" + - "L0.966[1122,1281] 57ns |-----------------------------------------L0.966-----------------------------------------|" + - "L0.980[1122,1281] 58ns |-----------------------------------------L0.980-----------------------------------------|" + - "L0.994[1122,1281] 59ns |-----------------------------------------L0.994-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[642,801] 115ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1122,1281] 59ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1523, L0.1537, L0.1551, L0.1565, L0.1579, L0.1593, L0.1607, L0.1621, L0.1635, L0.1649, L0.1663, L0.1677, L0.1691, L0.1705, L0.1719, L0.1733, L0.1747, L0.1761, L0.1775, L0.2983" + - " Soft Deleting 20 files: L0.728, L0.742, L0.756, L0.770, L0.784, L0.798, L0.812, L0.826, L0.840, L0.854, L0.868, L0.882, L0.896, L0.910, L0.924, L0.938, L0.952, L0.966, L0.980, L0.994" - " Creating 1 files" - - "**** Simulation run 275, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 272, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2984[802,961] 96ns |----------------------------------------L0.2984-----------------------------------------|" - - "L0.1524[802,961] 97ns |----------------------------------------L0.1524-----------------------------------------|" - - "L0.1538[802,961] 98ns |----------------------------------------L0.1538-----------------------------------------|" - - "L0.1552[802,961] 99ns |----------------------------------------L0.1552-----------------------------------------|" - - "L0.1566[802,961] 100ns |----------------------------------------L0.1566-----------------------------------------|" - - "L0.1580[802,961] 101ns |----------------------------------------L0.1580-----------------------------------------|" - - "L0.1594[802,961] 102ns |----------------------------------------L0.1594-----------------------------------------|" - - "L0.1608[802,961] 103ns |----------------------------------------L0.1608-----------------------------------------|" - - "L0.1622[802,961] 104ns |----------------------------------------L0.1622-----------------------------------------|" - - "L0.1636[802,961] 105ns |----------------------------------------L0.1636-----------------------------------------|" - - "L0.1650[802,961] 106ns |----------------------------------------L0.1650-----------------------------------------|" - - "L0.1664[802,961] 107ns |----------------------------------------L0.1664-----------------------------------------|" - - "L0.1678[802,961] 108ns |----------------------------------------L0.1678-----------------------------------------|" - - "L0.1692[802,961] 109ns |----------------------------------------L0.1692-----------------------------------------|" - - "L0.1706[802,961] 110ns |----------------------------------------L0.1706-----------------------------------------|" - - "L0.1720[802,961] 111ns |----------------------------------------L0.1720-----------------------------------------|" - - "L0.1734[802,961] 112ns |----------------------------------------L0.1734-----------------------------------------|" - - "L0.1748[802,961] 113ns |----------------------------------------L0.1748-----------------------------------------|" - - "L0.1762[802,961] 114ns |----------------------------------------L0.1762-----------------------------------------|" - - "L0.1776[802,961] 115ns |----------------------------------------L0.1776-----------------------------------------|" + - "L0.1008[1122,1281] 60ns |----------------------------------------L0.1008-----------------------------------------|" + - "L0.1022[1122,1281] 61ns |----------------------------------------L0.1022-----------------------------------------|" + - "L0.1036[1122,1281] 62ns |----------------------------------------L0.1036-----------------------------------------|" + - "L0.1050[1122,1281] 63ns |----------------------------------------L0.1050-----------------------------------------|" + - "L0.1064[1122,1281] 64ns |----------------------------------------L0.1064-----------------------------------------|" + - "L0.1078[1122,1281] 65ns |----------------------------------------L0.1078-----------------------------------------|" + - "L0.1092[1122,1281] 66ns |----------------------------------------L0.1092-----------------------------------------|" + - "L0.1106[1122,1281] 67ns |----------------------------------------L0.1106-----------------------------------------|" + - "L0.1120[1122,1281] 68ns |----------------------------------------L0.1120-----------------------------------------|" + - "L0.1134[1122,1281] 69ns |----------------------------------------L0.1134-----------------------------------------|" + - "L0.1148[1122,1281] 70ns |----------------------------------------L0.1148-----------------------------------------|" + - "L0.1162[1122,1281] 71ns |----------------------------------------L0.1162-----------------------------------------|" + - "L0.1176[1122,1281] 72ns |----------------------------------------L0.1176-----------------------------------------|" + - "L0.1190[1122,1281] 73ns |----------------------------------------L0.1190-----------------------------------------|" + - "L0.1204[1122,1281] 74ns |----------------------------------------L0.1204-----------------------------------------|" + - "L0.1218[1122,1281] 75ns |----------------------------------------L0.1218-----------------------------------------|" + - "L0.1232[1122,1281] 76ns |----------------------------------------L0.1232-----------------------------------------|" + - "L0.1246[1122,1281] 77ns |----------------------------------------L0.1246-----------------------------------------|" + - "L0.1260[1122,1281] 78ns |----------------------------------------L0.1260-----------------------------------------|" + - "L0.1274[1122,1281] 79ns |----------------------------------------L0.1274-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[802,961] 115ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1122,1281] 79ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1524, L0.1538, L0.1552, L0.1566, L0.1580, L0.1594, L0.1608, L0.1622, L0.1636, L0.1650, L0.1664, L0.1678, L0.1692, L0.1706, L0.1720, L0.1734, L0.1748, L0.1762, L0.1776, L0.2984" + - " Soft Deleting 20 files: L0.1008, L0.1022, L0.1036, L0.1050, L0.1064, L0.1078, L0.1092, L0.1106, L0.1120, L0.1134, L0.1148, L0.1162, L0.1176, L0.1190, L0.1204, L0.1218, L0.1232, L0.1246, L0.1260, L0.1274" - " Creating 1 files" - - "**** Simulation run 276, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 273, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2991[962,1121] 96ns |----------------------------------------L0.2991-----------------------------------------|" - - "L0.1525[962,1121] 97ns |----------------------------------------L0.1525-----------------------------------------|" - - "L0.1539[962,1121] 98ns |----------------------------------------L0.1539-----------------------------------------|" - - "L0.1553[962,1121] 99ns |----------------------------------------L0.1553-----------------------------------------|" - - "L0.1567[962,1121] 100ns |----------------------------------------L0.1567-----------------------------------------|" - - "L0.1581[962,1121] 101ns |----------------------------------------L0.1581-----------------------------------------|" - - "L0.1595[962,1121] 102ns |----------------------------------------L0.1595-----------------------------------------|" - - "L0.1609[962,1121] 103ns |----------------------------------------L0.1609-----------------------------------------|" - - "L0.1623[962,1121] 104ns |----------------------------------------L0.1623-----------------------------------------|" - - "L0.1637[962,1121] 105ns |----------------------------------------L0.1637-----------------------------------------|" - - "L0.1651[962,1121] 106ns |----------------------------------------L0.1651-----------------------------------------|" - - "L0.1665[962,1121] 107ns |----------------------------------------L0.1665-----------------------------------------|" - - "L0.1679[962,1121] 108ns |----------------------------------------L0.1679-----------------------------------------|" - - "L0.1693[962,1121] 109ns |----------------------------------------L0.1693-----------------------------------------|" - - "L0.1707[962,1121] 110ns |----------------------------------------L0.1707-----------------------------------------|" - - "L0.1721[962,1121] 111ns |----------------------------------------L0.1721-----------------------------------------|" - - "L0.1735[962,1121] 112ns |----------------------------------------L0.1735-----------------------------------------|" - - "L0.1749[962,1121] 113ns |----------------------------------------L0.1749-----------------------------------------|" - - "L0.1763[962,1121] 114ns |----------------------------------------L0.1763-----------------------------------------|" - - "L0.1777[962,1121] 115ns |----------------------------------------L0.1777-----------------------------------------|" + - "L0.1288[1122,1281] 80ns |----------------------------------------L0.1288-----------------------------------------|" + - "L0.1302[1122,1281] 81ns |----------------------------------------L0.1302-----------------------------------------|" + - "L0.1316[1122,1281] 82ns |----------------------------------------L0.1316-----------------------------------------|" + - "L0.1330[1122,1281] 83ns |----------------------------------------L0.1330-----------------------------------------|" + - "L0.1344[1122,1281] 84ns |----------------------------------------L0.1344-----------------------------------------|" + - "L0.1358[1122,1281] 85ns |----------------------------------------L0.1358-----------------------------------------|" + - "L0.1372[1122,1281] 86ns |----------------------------------------L0.1372-----------------------------------------|" + - "L0.1386[1122,1281] 87ns |----------------------------------------L0.1386-----------------------------------------|" + - "L0.1400[1122,1281] 88ns |----------------------------------------L0.1400-----------------------------------------|" + - "L0.1414[1122,1281] 89ns |----------------------------------------L0.1414-----------------------------------------|" + - "L0.1428[1122,1281] 90ns |----------------------------------------L0.1428-----------------------------------------|" + - "L0.1442[1122,1281] 91ns |----------------------------------------L0.1442-----------------------------------------|" + - "L0.1456[1122,1281] 92ns |----------------------------------------L0.1456-----------------------------------------|" + - "L0.1470[1122,1281] 93ns |----------------------------------------L0.1470-----------------------------------------|" + - "L0.1484[1122,1281] 94ns |----------------------------------------L0.1484-----------------------------------------|" + - "L0.1498[1122,1281] 95ns |----------------------------------------L0.1498-----------------------------------------|" + - "L0.1512[1122,1281] 96ns |----------------------------------------L0.1512-----------------------------------------|" + - "L0.1526[1122,1281] 97ns |----------------------------------------L0.1526-----------------------------------------|" + - "L0.1540[1122,1281] 98ns |----------------------------------------L0.1540-----------------------------------------|" + - "L0.1554[1122,1281] 99ns |----------------------------------------L0.1554-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[962,1121] 115ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1122,1281] 99ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1525, L0.1539, L0.1553, L0.1567, L0.1581, L0.1595, L0.1609, L0.1623, L0.1637, L0.1651, L0.1665, L0.1679, L0.1693, L0.1707, L0.1721, L0.1735, L0.1749, L0.1763, L0.1777, L0.2991" + - " Soft Deleting 20 files: L0.1288, L0.1302, L0.1316, L0.1330, L0.1344, L0.1358, L0.1372, L0.1386, L0.1400, L0.1414, L0.1428, L0.1442, L0.1456, L0.1470, L0.1484, L0.1498, L0.1512, L0.1526, L0.1540, L0.1554" - " Creating 1 files" - - "**** Simulation run 277, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 274, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2992[1122,1281] 96ns |----------------------------------------L0.2992-----------------------------------------|" - - "L0.1526[1122,1281] 97ns |----------------------------------------L0.1526-----------------------------------------|" - - "L0.1540[1122,1281] 98ns |----------------------------------------L0.1540-----------------------------------------|" - - "L0.1554[1122,1281] 99ns |----------------------------------------L0.1554-----------------------------------------|" - "L0.1568[1122,1281] 100ns |----------------------------------------L0.1568-----------------------------------------|" - "L0.1582[1122,1281] 101ns |----------------------------------------L0.1582-----------------------------------------|" - "L0.1596[1122,1281] 102ns |----------------------------------------L0.1596-----------------------------------------|" @@ -7634,383 +7528,298 @@ async fn stuck_l0_large_l0s() { - "L0.1750[1122,1281] 113ns |----------------------------------------L0.1750-----------------------------------------|" - "L0.1764[1122,1281] 114ns |----------------------------------------L0.1764-----------------------------------------|" - "L0.1778[1122,1281] 115ns |----------------------------------------L0.1778-----------------------------------------|" + - "L0.1792[1122,1281] 116ns |----------------------------------------L0.1792-----------------------------------------|" + - "L0.1806[1122,1281] 117ns |----------------------------------------L0.1806-----------------------------------------|" + - "L0.1820[1122,1281] 118ns |----------------------------------------L0.1820-----------------------------------------|" + - "L0.1834[1122,1281] 119ns |----------------------------------------L0.1834-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1122,1281] 115ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1526, L0.1540, L0.1554, L0.1568, L0.1582, L0.1596, L0.1610, L0.1624, L0.1638, L0.1652, L0.1666, L0.1680, L0.1694, L0.1708, L0.1722, L0.1736, L0.1750, L0.1764, L0.1778, L0.2992" - - " Creating 1 files" - - "**** Simulation run 278, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.2985[1282,1441] 96ns |----------------------------------------L0.2985-----------------------------------------|" - - "L0.1527[1282,1441] 97ns |----------------------------------------L0.1527-----------------------------------------|" - - "L0.1541[1282,1441] 98ns |----------------------------------------L0.1541-----------------------------------------|" - - "L0.1555[1282,1441] 99ns |----------------------------------------L0.1555-----------------------------------------|" - - "L0.1569[1282,1441] 100ns |----------------------------------------L0.1569-----------------------------------------|" - - "L0.1583[1282,1441] 101ns |----------------------------------------L0.1583-----------------------------------------|" - - "L0.1597[1282,1441] 102ns |----------------------------------------L0.1597-----------------------------------------|" - - "L0.1611[1282,1441] 103ns |----------------------------------------L0.1611-----------------------------------------|" - - "L0.1625[1282,1441] 104ns |----------------------------------------L0.1625-----------------------------------------|" - - "L0.1639[1282,1441] 105ns |----------------------------------------L0.1639-----------------------------------------|" - - "L0.1653[1282,1441] 106ns |----------------------------------------L0.1653-----------------------------------------|" - - "L0.1667[1282,1441] 107ns |----------------------------------------L0.1667-----------------------------------------|" - - "L0.1681[1282,1441] 108ns |----------------------------------------L0.1681-----------------------------------------|" - - "L0.1695[1282,1441] 109ns |----------------------------------------L0.1695-----------------------------------------|" - - "L0.1709[1282,1441] 110ns |----------------------------------------L0.1709-----------------------------------------|" - - "L0.1723[1282,1441] 111ns |----------------------------------------L0.1723-----------------------------------------|" - - "L0.1737[1282,1441] 112ns |----------------------------------------L0.1737-----------------------------------------|" - - "L0.1751[1282,1441] 113ns |----------------------------------------L0.1751-----------------------------------------|" - - "L0.1765[1282,1441] 114ns |----------------------------------------L0.1765-----------------------------------------|" - - "L0.1779[1282,1441] 115ns |----------------------------------------L0.1779-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[1282,1441] 115ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1527, L0.1541, L0.1555, L0.1569, L0.1583, L0.1597, L0.1611, L0.1625, L0.1639, L0.1653, L0.1667, L0.1681, L0.1695, L0.1709, L0.1723, L0.1737, L0.1751, L0.1765, L0.1779, L0.2985" - - " Creating 1 files" - - "**** Simulation run 279, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.2986[1442,1601] 96ns |----------------------------------------L0.2986-----------------------------------------|" - - "L0.1528[1442,1601] 97ns |----------------------------------------L0.1528-----------------------------------------|" - - "L0.1542[1442,1601] 98ns |----------------------------------------L0.1542-----------------------------------------|" - - "L0.1556[1442,1601] 99ns |----------------------------------------L0.1556-----------------------------------------|" - - "L0.1570[1442,1601] 100ns |----------------------------------------L0.1570-----------------------------------------|" - - "L0.1584[1442,1601] 101ns |----------------------------------------L0.1584-----------------------------------------|" - - "L0.1598[1442,1601] 102ns |----------------------------------------L0.1598-----------------------------------------|" - - "L0.1612[1442,1601] 103ns |----------------------------------------L0.1612-----------------------------------------|" - - "L0.1626[1442,1601] 104ns |----------------------------------------L0.1626-----------------------------------------|" - - "L0.1640[1442,1601] 105ns |----------------------------------------L0.1640-----------------------------------------|" - - "L0.1654[1442,1601] 106ns |----------------------------------------L0.1654-----------------------------------------|" - - "L0.1668[1442,1601] 107ns |----------------------------------------L0.1668-----------------------------------------|" - - "L0.1682[1442,1601] 108ns |----------------------------------------L0.1682-----------------------------------------|" - - "L0.1696[1442,1601] 109ns |----------------------------------------L0.1696-----------------------------------------|" - - "L0.1710[1442,1601] 110ns |----------------------------------------L0.1710-----------------------------------------|" - - "L0.1724[1442,1601] 111ns |----------------------------------------L0.1724-----------------------------------------|" - - "L0.1738[1442,1601] 112ns |----------------------------------------L0.1738-----------------------------------------|" - - "L0.1752[1442,1601] 113ns |----------------------------------------L0.1752-----------------------------------------|" - - "L0.1766[1442,1601] 114ns |----------------------------------------L0.1766-----------------------------------------|" - - "L0.1780[1442,1601] 115ns |----------------------------------------L0.1780-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[1442,1601] 115ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1528, L0.1542, L0.1556, L0.1570, L0.1584, L0.1598, L0.1612, L0.1626, L0.1640, L0.1654, L0.1668, L0.1682, L0.1696, L0.1710, L0.1724, L0.1738, L0.1752, L0.1766, L0.1780, L0.2986" - - " Creating 1 files" - - "**** Simulation run 280, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.2987[1602,1761] 96ns |----------------------------------------L0.2987-----------------------------------------|" - - "L0.1529[1602,1761] 97ns |----------------------------------------L0.1529-----------------------------------------|" - - "L0.1543[1602,1761] 98ns |----------------------------------------L0.1543-----------------------------------------|" - - "L0.1557[1602,1761] 99ns |----------------------------------------L0.1557-----------------------------------------|" - - "L0.1571[1602,1761] 100ns |----------------------------------------L0.1571-----------------------------------------|" - - "L0.1585[1602,1761] 101ns |----------------------------------------L0.1585-----------------------------------------|" - - "L0.1599[1602,1761] 102ns |----------------------------------------L0.1599-----------------------------------------|" - - "L0.1613[1602,1761] 103ns |----------------------------------------L0.1613-----------------------------------------|" - - "L0.1627[1602,1761] 104ns |----------------------------------------L0.1627-----------------------------------------|" - - "L0.1641[1602,1761] 105ns |----------------------------------------L0.1641-----------------------------------------|" - - "L0.1655[1602,1761] 106ns |----------------------------------------L0.1655-----------------------------------------|" - - "L0.1669[1602,1761] 107ns |----------------------------------------L0.1669-----------------------------------------|" - - "L0.1683[1602,1761] 108ns |----------------------------------------L0.1683-----------------------------------------|" - - "L0.1697[1602,1761] 109ns |----------------------------------------L0.1697-----------------------------------------|" - - "L0.1711[1602,1761] 110ns |----------------------------------------L0.1711-----------------------------------------|" - - "L0.1725[1602,1761] 111ns |----------------------------------------L0.1725-----------------------------------------|" - - "L0.1739[1602,1761] 112ns |----------------------------------------L0.1739-----------------------------------------|" - - "L0.1753[1602,1761] 113ns |----------------------------------------L0.1753-----------------------------------------|" - - "L0.1767[1602,1761] 114ns |----------------------------------------L0.1767-----------------------------------------|" - - "L0.1781[1602,1761] 115ns |----------------------------------------L0.1781-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[1602,1761] 115ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1122,1281] 119ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1529, L0.1543, L0.1557, L0.1571, L0.1585, L0.1599, L0.1613, L0.1627, L0.1641, L0.1655, L0.1669, L0.1683, L0.1697, L0.1711, L0.1725, L0.1739, L0.1753, L0.1767, L0.1781, L0.2987" + - " Soft Deleting 20 files: L0.1568, L0.1582, L0.1596, L0.1610, L0.1624, L0.1638, L0.1652, L0.1666, L0.1680, L0.1694, L0.1708, L0.1722, L0.1736, L0.1750, L0.1764, L0.1778, L0.1792, L0.1806, L0.1820, L0.1834" - " Creating 1 files" - - "**** Simulation run 281, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 275, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2988[1762,2000] 96ns |----------------------------------------L0.2988-----------------------------------------|" - - "L0.1530[1762,2000] 97ns |----------------------------------------L0.1530-----------------------------------------|" - - "L0.1544[1762,2000] 98ns |----------------------------------------L0.1544-----------------------------------------|" - - "L0.1558[1762,2000] 99ns |----------------------------------------L0.1558-----------------------------------------|" - - "L0.1572[1762,2000] 100ns |----------------------------------------L0.1572-----------------------------------------|" - - "L0.1586[1762,2000] 101ns |----------------------------------------L0.1586-----------------------------------------|" - - "L0.1600[1762,2000] 102ns |----------------------------------------L0.1600-----------------------------------------|" - - "L0.1614[1762,2000] 103ns |----------------------------------------L0.1614-----------------------------------------|" - - "L0.1628[1762,2000] 104ns |----------------------------------------L0.1628-----------------------------------------|" - - "L0.1642[1762,2000] 105ns |----------------------------------------L0.1642-----------------------------------------|" - - "L0.1656[1762,2000] 106ns |----------------------------------------L0.1656-----------------------------------------|" - - "L0.1670[1762,2000] 107ns |----------------------------------------L0.1670-----------------------------------------|" - - "L0.1684[1762,2000] 108ns |----------------------------------------L0.1684-----------------------------------------|" - - "L0.1698[1762,2000] 109ns |----------------------------------------L0.1698-----------------------------------------|" - - "L0.1712[1762,2000] 110ns |----------------------------------------L0.1712-----------------------------------------|" - - "L0.1726[1762,2000] 111ns |----------------------------------------L0.1726-----------------------------------------|" - - "L0.1740[1762,2000] 112ns |----------------------------------------L0.1740-----------------------------------------|" - - "L0.1754[1762,2000] 113ns |----------------------------------------L0.1754-----------------------------------------|" - - "L0.1768[1762,2000] 114ns |----------------------------------------L0.1768-----------------------------------------|" - - "L0.1782[1762,2000] 115ns |----------------------------------------L0.1782-----------------------------------------|" + - "L0.1848[1122,1281] 120ns |----------------------------------------L0.1848-----------------------------------------|" + - "L0.1862[1122,1281] 121ns |----------------------------------------L0.1862-----------------------------------------|" + - "L0.1876[1122,1281] 122ns |----------------------------------------L0.1876-----------------------------------------|" + - "L0.1890[1122,1281] 123ns |----------------------------------------L0.1890-----------------------------------------|" + - "L0.1904[1122,1281] 124ns |----------------------------------------L0.1904-----------------------------------------|" + - "L0.1918[1122,1281] 125ns |----------------------------------------L0.1918-----------------------------------------|" + - "L0.1932[1122,1281] 126ns |----------------------------------------L0.1932-----------------------------------------|" + - "L0.1946[1122,1281] 127ns |----------------------------------------L0.1946-----------------------------------------|" + - "L0.1960[1122,1281] 128ns |----------------------------------------L0.1960-----------------------------------------|" + - "L0.1974[1122,1281] 129ns |----------------------------------------L0.1974-----------------------------------------|" + - "L0.1988[1122,1281] 130ns |----------------------------------------L0.1988-----------------------------------------|" + - "L0.2002[1122,1281] 131ns |----------------------------------------L0.2002-----------------------------------------|" + - "L0.2016[1122,1281] 132ns |----------------------------------------L0.2016-----------------------------------------|" + - "L0.2030[1122,1281] 133ns |----------------------------------------L0.2030-----------------------------------------|" + - "L0.2156[1122,1281] 134ns |----------------------------------------L0.2156-----------------------------------------|" + - "L0.2170[1122,1281] 135ns |----------------------------------------L0.2170-----------------------------------------|" + - "L0.2044[1122,1281] 136ns |----------------------------------------L0.2044-----------------------------------------|" + - "L0.2058[1122,1281] 137ns |----------------------------------------L0.2058-----------------------------------------|" + - "L0.2072[1122,1281] 138ns |----------------------------------------L0.2072-----------------------------------------|" + - "L0.2086[1122,1281] 139ns |----------------------------------------L0.2086-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1762,2000] 115ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1122,1281] 139ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1530, L0.1544, L0.1558, L0.1572, L0.1586, L0.1600, L0.1614, L0.1628, L0.1642, L0.1656, L0.1670, L0.1684, L0.1698, L0.1712, L0.1726, L0.1740, L0.1754, L0.1768, L0.1782, L0.2988" + - " Soft Deleting 20 files: L0.1848, L0.1862, L0.1876, L0.1890, L0.1904, L0.1918, L0.1932, L0.1946, L0.1960, L0.1974, L0.1988, L0.2002, L0.2016, L0.2030, L0.2044, L0.2058, L0.2072, L0.2086, L0.2156, L0.2170" - " Creating 1 files" - - "**** Simulation run 282, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 276, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2989[2001,2086] 115ns |----------------------------------------L0.2989-----------------------------------------|" - - "L0.1797[2001,2086] 116ns |----------------------------------------L0.1797-----------------------------------------|" - - "L0.1811[2001,2086] 117ns |----------------------------------------L0.1811-----------------------------------------|" - - "L0.1825[2001,2086] 118ns |----------------------------------------L0.1825-----------------------------------------|" - - "L0.1839[2001,2086] 119ns |----------------------------------------L0.1839-----------------------------------------|" - - "L0.1853[2001,2086] 120ns |----------------------------------------L0.1853-----------------------------------------|" - - "L0.1867[2001,2086] 121ns |----------------------------------------L0.1867-----------------------------------------|" - - "L0.1881[2001,2086] 122ns |----------------------------------------L0.1881-----------------------------------------|" - - "L0.1895[2001,2086] 123ns |----------------------------------------L0.1895-----------------------------------------|" - - "L0.1909[2001,2086] 124ns |----------------------------------------L0.1909-----------------------------------------|" - - "L0.1923[2001,2086] 125ns |----------------------------------------L0.1923-----------------------------------------|" - - "L0.1937[2001,2086] 126ns |----------------------------------------L0.1937-----------------------------------------|" - - "L0.1951[2001,2086] 127ns |----------------------------------------L0.1951-----------------------------------------|" - - "L0.1965[2001,2086] 128ns |----------------------------------------L0.1965-----------------------------------------|" - - "L0.1979[2001,2086] 129ns |----------------------------------------L0.1979-----------------------------------------|" - - "L0.1993[2001,2086] 130ns |----------------------------------------L0.1993-----------------------------------------|" - - "L0.2007[2001,2086] 131ns |----------------------------------------L0.2007-----------------------------------------|" - - "L0.2021[2001,2086] 132ns |----------------------------------------L0.2021-----------------------------------------|" - - "L0.2035[2001,2086] 133ns |----------------------------------------L0.2035-----------------------------------------|" - - "L0.2161[2001,2086] 134ns |----------------------------------------L0.2161-----------------------------------------|" + - "L0.2100[1122,1281] 140ns |----------------------------------------L0.2100-----------------------------------------|" + - "L0.2114[1122,1281] 141ns |----------------------------------------L0.2114-----------------------------------------|" + - "L0.2128[1122,1281] 142ns |----------------------------------------L0.2128-----------------------------------------|" + - "L0.2142[1122,1281] 143ns |----------------------------------------L0.2142-----------------------------------------|" + - "L0.2184[1122,1281] 144ns |----------------------------------------L0.2184-----------------------------------------|" + - "L0.2198[1122,1281] 145ns |----------------------------------------L0.2198-----------------------------------------|" + - "L0.2212[1122,1281] 146ns |----------------------------------------L0.2212-----------------------------------------|" + - "L0.2226[1122,1281] 147ns |----------------------------------------L0.2226-----------------------------------------|" + - "L0.2240[1122,1281] 148ns |----------------------------------------L0.2240-----------------------------------------|" + - "L0.2254[1122,1281] 149ns |----------------------------------------L0.2254-----------------------------------------|" + - "L0.2268[1122,1281] 150ns |----------------------------------------L0.2268-----------------------------------------|" + - "L0.2282[1122,1281] 151ns |----------------------------------------L0.2282-----------------------------------------|" + - "L0.2296[1122,1281] 152ns |----------------------------------------L0.2296-----------------------------------------|" + - "L0.2310[1122,1281] 153ns |----------------------------------------L0.2310-----------------------------------------|" + - "L0.2324[1122,1281] 154ns |----------------------------------------L0.2324-----------------------------------------|" + - "L0.2338[1122,1281] 155ns |----------------------------------------L0.2338-----------------------------------------|" + - "L0.2352[1122,1281] 156ns |----------------------------------------L0.2352-----------------------------------------|" + - "L0.2366[1122,1281] 157ns |----------------------------------------L0.2366-----------------------------------------|" + - "L0.2380[1122,1281] 158ns |----------------------------------------L0.2380-----------------------------------------|" + - "L0.2394[1122,1281] 159ns |----------------------------------------L0.2394-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[2001,2086] 134ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1797, L0.1811, L0.1825, L0.1839, L0.1853, L0.1867, L0.1881, L0.1895, L0.1909, L0.1923, L0.1937, L0.1951, L0.1965, L0.1979, L0.1993, L0.2007, L0.2021, L0.2035, L0.2161, L0.2989" - - " Creating 1 files" - - "**** Simulation run 283, type=compact(ManySmallFiles). 20 Input Files, 1kb total:" - - "L0 " - - "L0.2990[2087,1150000] 115ns 960b|----------------------------------L0.2990----------------------------------| " - - "L0.1798[2087,1160000] 116ns 10b|----------------------------------L0.1798----------------------------------| " - - "L0.1812[2087,1170000] 117ns 10b|----------------------------------L0.1812-----------------------------------| " - - "L0.1826[2087,1180000] 118ns 10b|-----------------------------------L0.1826-----------------------------------| " - - "L0.1840[2087,1190000] 119ns 10b|-----------------------------------L0.1840-----------------------------------| " - - "L0.1854[2087,1200000] 120ns 10b|-----------------------------------L0.1854------------------------------------| " - - "L0.1868[2087,1210000] 121ns 10b|------------------------------------L0.1868------------------------------------| " - - "L0.1882[2087,1220000] 122ns 10b|------------------------------------L0.1882------------------------------------| " - - "L0.1896[2087,1230000] 123ns 10b|------------------------------------L0.1896-------------------------------------| " - - "L0.1910[2087,1240000] 124ns 10b|-------------------------------------L0.1910-------------------------------------| " - - "L0.1924[2087,1250000] 125ns 10b|-------------------------------------L0.1924-------------------------------------| " - - "L0.1938[2087,1260000] 126ns 10b|-------------------------------------L0.1938--------------------------------------| " - - "L0.1952[2087,1270000] 127ns 10b|--------------------------------------L0.1952--------------------------------------| " - - "L0.1966[2087,1280000] 128ns 10b|--------------------------------------L0.1966--------------------------------------| " - - "L0.1980[2087,1290000] 129ns 10b|--------------------------------------L0.1980---------------------------------------| " - - "L0.1994[2087,1300000] 130ns 10b|---------------------------------------L0.1994---------------------------------------| " - - "L0.2008[2087,1310000] 131ns 10b|---------------------------------------L0.2008---------------------------------------| " - - "L0.2022[2087,1320000] 132ns 10b|---------------------------------------L0.2022----------------------------------------| " - - "L0.2036[2087,1330000] 133ns 10b|----------------------------------------L0.2036----------------------------------------| " - - "L0.2162[2087,1340000] 134ns 10b|----------------------------------------L0.2162-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 1kb total:" - - "L0, all files 1kb " - - "L0.?[2087,1340000] 134ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1798, L0.1812, L0.1826, L0.1840, L0.1854, L0.1868, L0.1882, L0.1896, L0.1910, L0.1924, L0.1938, L0.1952, L0.1966, L0.1980, L0.1994, L0.2008, L0.2022, L0.2036, L0.2162, L0.2990" + - "L0.?[1122,1281] 159ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.2100, L0.2114, L0.2128, L0.2142, L0.2184, L0.2198, L0.2212, L0.2226, L0.2240, L0.2254, L0.2268, L0.2282, L0.2296, L0.2310, L0.2324, L0.2338, L0.2352, L0.2366, L0.2380, L0.2394" - " Creating 1 files" - - "**** Simulation run 284, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 277, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2993[20,161] 115ns |----------------------------------------L0.2993-----------------------------------------|" - - "L0.1785[116,161] 116ns |---------L0.1785----------| " - - "L0.1799[117,161] 117ns |---------L0.1799----------| " - - "L0.1813[118,161] 118ns |---------L0.1813---------| " - - "L0.1827[119,161] 119ns |--------L0.1827---------| " - - "L0.1841[120,161] 120ns |--------L0.1841---------| " - - "L0.1855[121,161] 121ns |--------L0.1855--------| " - - "L0.1869[122,161] 122ns |-------L0.1869--------| " - - "L0.1883[123,161] 123ns |-------L0.1883--------| " - - "L0.1897[124,161] 124ns |-------L0.1897-------| " - - "L0.1911[125,161] 125ns |------L0.1911-------| " - - "L0.1925[126,161] 126ns |------L0.1925-------| " - - "L0.1939[127,161] 127ns |------L0.1939------| " - - "L0.1953[128,161] 128ns |------L0.1953------| " - - "L0.1967[129,161] 129ns |-----L0.1967------| " - - "L0.1981[130,161] 130ns |-----L0.1981-----| " - - "L0.1995[131,161] 131ns |-----L0.1995-----| " - - "L0.2009[132,161] 132ns |----L0.2009-----| " - - "L0.2023[133,161] 133ns |----L0.2023----| " - - "L0.2149[134,161] 134ns |----L0.2149----| " + - "L0.2408[1122,1281] 160ns |----------------------------------------L0.2408-----------------------------------------|" + - "L0.2422[1122,1281] 161ns |----------------------------------------L0.2422-----------------------------------------|" + - "L0.2435[1122,1281] 162ns |----------------------------------------L0.2435-----------------------------------------|" + - "L0.2448[1122,1281] 163ns |----------------------------------------L0.2448-----------------------------------------|" + - "L0.2461[1122,1281] 164ns |----------------------------------------L0.2461-----------------------------------------|" + - "L0.2474[1122,1281] 165ns |----------------------------------------L0.2474-----------------------------------------|" + - "L0.2487[1122,1281] 166ns |----------------------------------------L0.2487-----------------------------------------|" + - "L0.2500[1122,1281] 167ns |----------------------------------------L0.2500-----------------------------------------|" + - "L0.2513[1122,1281] 168ns |----------------------------------------L0.2513-----------------------------------------|" + - "L0.2526[1122,1281] 169ns |----------------------------------------L0.2526-----------------------------------------|" + - "L0.2539[1122,1281] 170ns |----------------------------------------L0.2539-----------------------------------------|" + - "L0.2552[1122,1281] 171ns |----------------------------------------L0.2552-----------------------------------------|" + - "L0.2565[1122,1281] 172ns |----------------------------------------L0.2565-----------------------------------------|" + - "L0.2578[1122,1281] 173ns |----------------------------------------L0.2578-----------------------------------------|" + - "L0.2591[1122,1281] 174ns |----------------------------------------L0.2591-----------------------------------------|" + - "L0.2604[1122,1281] 175ns |----------------------------------------L0.2604-----------------------------------------|" + - "L0.2617[1122,1281] 176ns |----------------------------------------L0.2617-----------------------------------------|" + - "L0.2630[1122,1281] 177ns |----------------------------------------L0.2630-----------------------------------------|" + - "L0.2643[1122,1281] 178ns |----------------------------------------L0.2643-----------------------------------------|" + - "L0.2656[1122,1281] 179ns |----------------------------------------L0.2656-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[20,161] 134ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1122,1281] 179ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1785, L0.1799, L0.1813, L0.1827, L0.1841, L0.1855, L0.1869, L0.1883, L0.1897, L0.1911, L0.1925, L0.1939, L0.1953, L0.1967, L0.1981, L0.1995, L0.2009, L0.2023, L0.2149, L0.2993" + - " Soft Deleting 20 files: L0.2408, L0.2422, L0.2435, L0.2448, L0.2461, L0.2474, L0.2487, L0.2500, L0.2513, L0.2526, L0.2539, L0.2552, L0.2565, L0.2578, L0.2591, L0.2604, L0.2617, L0.2630, L0.2643, L0.2656" - " Creating 1 files" - - "**** Simulation run 285, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 278, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2994[162,321] 115ns |----------------------------------------L0.2994-----------------------------------------|" - - "L0.1786[162,321] 116ns |----------------------------------------L0.1786-----------------------------------------|" - - "L0.1800[162,321] 117ns |----------------------------------------L0.1800-----------------------------------------|" - - "L0.1814[162,321] 118ns |----------------------------------------L0.1814-----------------------------------------|" - - "L0.1828[162,321] 119ns |----------------------------------------L0.1828-----------------------------------------|" - - "L0.1842[162,321] 120ns |----------------------------------------L0.1842-----------------------------------------|" - - "L0.1856[162,321] 121ns |----------------------------------------L0.1856-----------------------------------------|" - - "L0.1870[162,321] 122ns |----------------------------------------L0.1870-----------------------------------------|" - - "L0.1884[162,321] 123ns |----------------------------------------L0.1884-----------------------------------------|" - - "L0.1898[162,321] 124ns |----------------------------------------L0.1898-----------------------------------------|" - - "L0.1912[162,321] 125ns |----------------------------------------L0.1912-----------------------------------------|" - - "L0.1926[162,321] 126ns |----------------------------------------L0.1926-----------------------------------------|" - - "L0.1940[162,321] 127ns |----------------------------------------L0.1940-----------------------------------------|" - - "L0.1954[162,321] 128ns |----------------------------------------L0.1954-----------------------------------------|" - - "L0.1968[162,321] 129ns |----------------------------------------L0.1968-----------------------------------------|" - - "L0.1982[162,321] 130ns |----------------------------------------L0.1982-----------------------------------------|" - - "L0.1996[162,321] 131ns |----------------------------------------L0.1996-----------------------------------------|" - - "L0.2010[162,321] 132ns |----------------------------------------L0.2010-----------------------------------------|" - - "L0.2024[162,321] 133ns |----------------------------------------L0.2024-----------------------------------------|" - - "L0.2150[162,321] 134ns |----------------------------------------L0.2150-----------------------------------------|" + - "L0.2669[1122,1281] 180ns |----------------------------------------L0.2669-----------------------------------------|" + - "L0.2682[1122,1281] 181ns |----------------------------------------L0.2682-----------------------------------------|" + - "L0.2695[1122,1281] 182ns |----------------------------------------L0.2695-----------------------------------------|" + - "L0.2708[1122,1281] 183ns |----------------------------------------L0.2708-----------------------------------------|" + - "L0.2721[1122,1281] 184ns |----------------------------------------L0.2721-----------------------------------------|" + - "L0.2734[1122,1281] 185ns |----------------------------------------L0.2734-----------------------------------------|" + - "L0.2747[1122,1281] 186ns |----------------------------------------L0.2747-----------------------------------------|" + - "L0.2760[1122,1281] 187ns |----------------------------------------L0.2760-----------------------------------------|" + - "L0.2773[1122,1281] 188ns |----------------------------------------L0.2773-----------------------------------------|" + - "L0.2786[1122,1281] 189ns |----------------------------------------L0.2786-----------------------------------------|" + - "L0.2799[1122,1281] 190ns |----------------------------------------L0.2799-----------------------------------------|" + - "L0.2812[1122,1281] 191ns |----------------------------------------L0.2812-----------------------------------------|" + - "L0.2825[1122,1281] 192ns |----------------------------------------L0.2825-----------------------------------------|" + - "L0.2838[1122,1281] 193ns |----------------------------------------L0.2838-----------------------------------------|" + - "L0.2851[1122,1281] 194ns |----------------------------------------L0.2851-----------------------------------------|" + - "L0.2864[1122,1281] 195ns |----------------------------------------L0.2864-----------------------------------------|" + - "L0.2877[1122,1281] 196ns |----------------------------------------L0.2877-----------------------------------------|" + - "L0.2890[1122,1281] 197ns |----------------------------------------L0.2890-----------------------------------------|" + - "L0.2903[1122,1281] 198ns |----------------------------------------L0.2903-----------------------------------------|" + - "L0.2916[1122,1281] 199ns |----------------------------------------L0.2916-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[162,321] 134ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1122,1281] 199ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1786, L0.1800, L0.1814, L0.1828, L0.1842, L0.1856, L0.1870, L0.1884, L0.1898, L0.1912, L0.1926, L0.1940, L0.1954, L0.1968, L0.1982, L0.1996, L0.2010, L0.2024, L0.2150, L0.2994" + - " Soft Deleting 20 files: L0.2669, L0.2682, L0.2695, L0.2708, L0.2721, L0.2734, L0.2747, L0.2760, L0.2773, L0.2786, L0.2799, L0.2812, L0.2825, L0.2838, L0.2851, L0.2864, L0.2877, L0.2890, L0.2903, L0.2916" - " Creating 1 files" - - "**** Simulation run 286, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.2995[322,481] 115ns |----------------------------------------L0.2995-----------------------------------------|" - - "L0.1787[322,481] 116ns |----------------------------------------L0.1787-----------------------------------------|" - - "L0.1801[322,481] 117ns |----------------------------------------L0.1801-----------------------------------------|" - - "L0.1815[322,481] 118ns |----------------------------------------L0.1815-----------------------------------------|" - - "L0.1829[322,481] 119ns |----------------------------------------L0.1829-----------------------------------------|" - - "L0.1843[322,481] 120ns |----------------------------------------L0.1843-----------------------------------------|" - - "L0.1857[322,481] 121ns |----------------------------------------L0.1857-----------------------------------------|" - - "L0.1871[322,481] 122ns |----------------------------------------L0.1871-----------------------------------------|" - - "L0.1885[322,481] 123ns |----------------------------------------L0.1885-----------------------------------------|" - - "L0.1899[322,481] 124ns |----------------------------------------L0.1899-----------------------------------------|" - - "L0.1913[322,481] 125ns |----------------------------------------L0.1913-----------------------------------------|" - - "L0.1927[322,481] 126ns |----------------------------------------L0.1927-----------------------------------------|" - - "L0.1941[322,481] 127ns |----------------------------------------L0.1941-----------------------------------------|" - - "L0.1955[322,481] 128ns |----------------------------------------L0.1955-----------------------------------------|" - - "L0.1969[322,481] 129ns |----------------------------------------L0.1969-----------------------------------------|" - - "L0.1983[322,481] 130ns |----------------------------------------L0.1983-----------------------------------------|" - - "L0.1997[322,481] 131ns |----------------------------------------L0.1997-----------------------------------------|" - - "L0.2011[322,481] 132ns |----------------------------------------L0.2011-----------------------------------------|" - - "L0.2025[322,481] 133ns |----------------------------------------L0.2025-----------------------------------------|" - - "L0.2151[322,481] 134ns |----------------------------------------L0.2151-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[322,481] 134ns |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 279, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" + - "L0, all files 8mb " + - "L0.209[1282,1441] 0ns |-----------------------------------------L0.209-----------------------------------------|" + - "L0.221[1282,1441] 1ns |-----------------------------------------L0.221-----------------------------------------|" + - "L0.233[1282,1441] 2ns |-----------------------------------------L0.233-----------------------------------------|" + - "L0.245[1282,1441] 3ns |-----------------------------------------L0.245-----------------------------------------|" + - "L0.257[1282,1441] 4ns |-----------------------------------------L0.257-----------------------------------------|" + - "L0.269[1282,1441] 5ns |-----------------------------------------L0.269-----------------------------------------|" + - "L0.281[1282,1441] 6ns |-----------------------------------------L0.281-----------------------------------------|" + - "L0.293[1282,1441] 7ns |-----------------------------------------L0.293-----------------------------------------|" + - "L0.305[1282,1441] 8ns |-----------------------------------------L0.305-----------------------------------------|" + - "L0.317[1282,1441] 9ns |-----------------------------------------L0.317-----------------------------------------|" + - "L0.329[1282,1441] 10ns |-----------------------------------------L0.329-----------------------------------------|" + - "L0.341[1282,1441] 11ns |-----------------------------------------L0.341-----------------------------------------|" + - "L0.353[1282,1441] 12ns |-----------------------------------------L0.353-----------------------------------------|" + - "L0.365[1282,1441] 13ns |-----------------------------------------L0.365-----------------------------------------|" + - "L0.377[1282,1441] 14ns |-----------------------------------------L0.377-----------------------------------------|" + - "L0.389[1282,1441] 15ns |-----------------------------------------L0.389-----------------------------------------|" + - "L0.401[1282,1441] 16ns |-----------------------------------------L0.401-----------------------------------------|" + - "L0.413[1282,1441] 17ns |-----------------------------------------L0.413-----------------------------------------|" + - "L0.537[1282,1441] 18ns |-----------------------------------------L0.537-----------------------------------------|" + - "L0.549[1282,1441] 19ns |-----------------------------------------L0.549-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" + - "L0, all files 160mb " + - "L0.?[1282,1441] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1787, L0.1801, L0.1815, L0.1829, L0.1843, L0.1857, L0.1871, L0.1885, L0.1899, L0.1913, L0.1927, L0.1941, L0.1955, L0.1969, L0.1983, L0.1997, L0.2011, L0.2025, L0.2151, L0.2995" + - " Soft Deleting 20 files: L0.209, L0.221, L0.233, L0.245, L0.257, L0.269, L0.281, L0.293, L0.305, L0.317, L0.329, L0.341, L0.353, L0.365, L0.377, L0.389, L0.401, L0.413, L0.537, L0.549" - " Creating 1 files" - - "**** Simulation run 287, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 280, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2996[482,641] 115ns |----------------------------------------L0.2996-----------------------------------------|" - - "L0.1788[482,641] 116ns |----------------------------------------L0.1788-----------------------------------------|" - - "L0.1802[482,641] 117ns |----------------------------------------L0.1802-----------------------------------------|" - - "L0.1816[482,641] 118ns |----------------------------------------L0.1816-----------------------------------------|" - - "L0.1830[482,641] 119ns |----------------------------------------L0.1830-----------------------------------------|" - - "L0.1844[482,641] 120ns |----------------------------------------L0.1844-----------------------------------------|" - - "L0.1858[482,641] 121ns |----------------------------------------L0.1858-----------------------------------------|" - - "L0.1872[482,641] 122ns |----------------------------------------L0.1872-----------------------------------------|" - - "L0.1886[482,641] 123ns |----------------------------------------L0.1886-----------------------------------------|" - - "L0.1900[482,641] 124ns |----------------------------------------L0.1900-----------------------------------------|" - - "L0.1914[482,641] 125ns |----------------------------------------L0.1914-----------------------------------------|" - - "L0.1928[482,641] 126ns |----------------------------------------L0.1928-----------------------------------------|" - - "L0.1942[482,641] 127ns |----------------------------------------L0.1942-----------------------------------------|" - - "L0.1956[482,641] 128ns |----------------------------------------L0.1956-----------------------------------------|" - - "L0.1970[482,641] 129ns |----------------------------------------L0.1970-----------------------------------------|" - - "L0.1984[482,641] 130ns |----------------------------------------L0.1984-----------------------------------------|" - - "L0.1998[482,641] 131ns |----------------------------------------L0.1998-----------------------------------------|" - - "L0.2012[482,641] 132ns |----------------------------------------L0.2012-----------------------------------------|" - - "L0.2026[482,641] 133ns |----------------------------------------L0.2026-----------------------------------------|" - - "L0.2152[482,641] 134ns |----------------------------------------L0.2152-----------------------------------------|" + - "L0.425[1282,1441] 20ns |-----------------------------------------L0.425-----------------------------------------|" + - "L0.439[1282,1441] 21ns |-----------------------------------------L0.439-----------------------------------------|" + - "L0.453[1282,1441] 22ns |-----------------------------------------L0.453-----------------------------------------|" + - "L0.467[1282,1441] 23ns |-----------------------------------------L0.467-----------------------------------------|" + - "L0.481[1282,1441] 24ns |-----------------------------------------L0.481-----------------------------------------|" + - "L0.495[1282,1441] 25ns |-----------------------------------------L0.495-----------------------------------------|" + - "L0.509[1282,1441] 26ns |-----------------------------------------L0.509-----------------------------------------|" + - "L0.523[1282,1441] 27ns |-----------------------------------------L0.523-----------------------------------------|" + - "L0.561[1282,1441] 28ns |-----------------------------------------L0.561-----------------------------------------|" + - "L0.575[1282,1441] 29ns |-----------------------------------------L0.575-----------------------------------------|" + - "L0.589[1282,1441] 30ns |-----------------------------------------L0.589-----------------------------------------|" + - "L0.603[1282,1441] 31ns |-----------------------------------------L0.603-----------------------------------------|" + - "L0.617[1282,1441] 32ns |-----------------------------------------L0.617-----------------------------------------|" + - "L0.631[1282,1441] 33ns |-----------------------------------------L0.631-----------------------------------------|" + - "L0.645[1282,1441] 34ns |-----------------------------------------L0.645-----------------------------------------|" + - "L0.659[1282,1441] 35ns |-----------------------------------------L0.659-----------------------------------------|" + - "L0.673[1282,1441] 36ns |-----------------------------------------L0.673-----------------------------------------|" + - "L0.687[1282,1441] 37ns |-----------------------------------------L0.687-----------------------------------------|" + - "L0.701[1282,1441] 38ns |-----------------------------------------L0.701-----------------------------------------|" + - "L0.715[1282,1441] 39ns |-----------------------------------------L0.715-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[482,641] 134ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1282,1441] 39ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1788, L0.1802, L0.1816, L0.1830, L0.1844, L0.1858, L0.1872, L0.1886, L0.1900, L0.1914, L0.1928, L0.1942, L0.1956, L0.1970, L0.1984, L0.1998, L0.2012, L0.2026, L0.2152, L0.2996" + - " Soft Deleting 20 files: L0.425, L0.439, L0.453, L0.467, L0.481, L0.495, L0.509, L0.523, L0.561, L0.575, L0.589, L0.603, L0.617, L0.631, L0.645, L0.659, L0.673, L0.687, L0.701, L0.715" - " Creating 1 files" - - "**** Simulation run 288, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 281, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2998[802,961] 115ns |----------------------------------------L0.2998-----------------------------------------|" - - "L0.1790[802,961] 116ns |----------------------------------------L0.1790-----------------------------------------|" - - "L0.1804[802,961] 117ns |----------------------------------------L0.1804-----------------------------------------|" - - "L0.1818[802,961] 118ns |----------------------------------------L0.1818-----------------------------------------|" - - "L0.1832[802,961] 119ns |----------------------------------------L0.1832-----------------------------------------|" - - "L0.1846[802,961] 120ns |----------------------------------------L0.1846-----------------------------------------|" - - "L0.1860[802,961] 121ns |----------------------------------------L0.1860-----------------------------------------|" - - "L0.1874[802,961] 122ns |----------------------------------------L0.1874-----------------------------------------|" - - "L0.1888[802,961] 123ns |----------------------------------------L0.1888-----------------------------------------|" - - "L0.1902[802,961] 124ns |----------------------------------------L0.1902-----------------------------------------|" - - "L0.1916[802,961] 125ns |----------------------------------------L0.1916-----------------------------------------|" - - "L0.1930[802,961] 126ns |----------------------------------------L0.1930-----------------------------------------|" - - "L0.1944[802,961] 127ns |----------------------------------------L0.1944-----------------------------------------|" - - "L0.1958[802,961] 128ns |----------------------------------------L0.1958-----------------------------------------|" - - "L0.1972[802,961] 129ns |----------------------------------------L0.1972-----------------------------------------|" - - "L0.1986[802,961] 130ns |----------------------------------------L0.1986-----------------------------------------|" - - "L0.2000[802,961] 131ns |----------------------------------------L0.2000-----------------------------------------|" - - "L0.2014[802,961] 132ns |----------------------------------------L0.2014-----------------------------------------|" - - "L0.2028[802,961] 133ns |----------------------------------------L0.2028-----------------------------------------|" - - "L0.2154[802,961] 134ns |----------------------------------------L0.2154-----------------------------------------|" + - "L0.729[1282,1441] 40ns |-----------------------------------------L0.729-----------------------------------------|" + - "L0.743[1282,1441] 41ns |-----------------------------------------L0.743-----------------------------------------|" + - "L0.757[1282,1441] 42ns |-----------------------------------------L0.757-----------------------------------------|" + - "L0.771[1282,1441] 43ns |-----------------------------------------L0.771-----------------------------------------|" + - "L0.785[1282,1441] 44ns |-----------------------------------------L0.785-----------------------------------------|" + - "L0.799[1282,1441] 45ns |-----------------------------------------L0.799-----------------------------------------|" + - "L0.813[1282,1441] 46ns |-----------------------------------------L0.813-----------------------------------------|" + - "L0.827[1282,1441] 47ns |-----------------------------------------L0.827-----------------------------------------|" + - "L0.841[1282,1441] 48ns |-----------------------------------------L0.841-----------------------------------------|" + - "L0.855[1282,1441] 49ns |-----------------------------------------L0.855-----------------------------------------|" + - "L0.869[1282,1441] 50ns |-----------------------------------------L0.869-----------------------------------------|" + - "L0.883[1282,1441] 51ns |-----------------------------------------L0.883-----------------------------------------|" + - "L0.897[1282,1441] 52ns |-----------------------------------------L0.897-----------------------------------------|" + - "L0.911[1282,1441] 53ns |-----------------------------------------L0.911-----------------------------------------|" + - "L0.925[1282,1441] 54ns |-----------------------------------------L0.925-----------------------------------------|" + - "L0.939[1282,1441] 55ns |-----------------------------------------L0.939-----------------------------------------|" + - "L0.953[1282,1441] 56ns |-----------------------------------------L0.953-----------------------------------------|" + - "L0.967[1282,1441] 57ns |-----------------------------------------L0.967-----------------------------------------|" + - "L0.981[1282,1441] 58ns |-----------------------------------------L0.981-----------------------------------------|" + - "L0.995[1282,1441] 59ns |-----------------------------------------L0.995-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[802,961] 134ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1282,1441] 59ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1790, L0.1804, L0.1818, L0.1832, L0.1846, L0.1860, L0.1874, L0.1888, L0.1902, L0.1916, L0.1930, L0.1944, L0.1958, L0.1972, L0.1986, L0.2000, L0.2014, L0.2028, L0.2154, L0.2998" + - " Soft Deleting 20 files: L0.729, L0.743, L0.757, L0.771, L0.785, L0.799, L0.813, L0.827, L0.841, L0.855, L0.869, L0.883, L0.897, L0.911, L0.925, L0.939, L0.953, L0.967, L0.981, L0.995" - " Creating 1 files" - - "**** Simulation run 289, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 282, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2999[962,1121] 115ns |----------------------------------------L0.2999-----------------------------------------|" - - "L0.1791[962,1121] 116ns |----------------------------------------L0.1791-----------------------------------------|" - - "L0.1805[962,1121] 117ns |----------------------------------------L0.1805-----------------------------------------|" - - "L0.1819[962,1121] 118ns |----------------------------------------L0.1819-----------------------------------------|" - - "L0.1833[962,1121] 119ns |----------------------------------------L0.1833-----------------------------------------|" - - "L0.1847[962,1121] 120ns |----------------------------------------L0.1847-----------------------------------------|" - - "L0.1861[962,1121] 121ns |----------------------------------------L0.1861-----------------------------------------|" - - "L0.1875[962,1121] 122ns |----------------------------------------L0.1875-----------------------------------------|" - - "L0.1889[962,1121] 123ns |----------------------------------------L0.1889-----------------------------------------|" - - "L0.1903[962,1121] 124ns |----------------------------------------L0.1903-----------------------------------------|" - - "L0.1917[962,1121] 125ns |----------------------------------------L0.1917-----------------------------------------|" - - "L0.1931[962,1121] 126ns |----------------------------------------L0.1931-----------------------------------------|" - - "L0.1945[962,1121] 127ns |----------------------------------------L0.1945-----------------------------------------|" - - "L0.1959[962,1121] 128ns |----------------------------------------L0.1959-----------------------------------------|" - - "L0.1973[962,1121] 129ns |----------------------------------------L0.1973-----------------------------------------|" - - "L0.1987[962,1121] 130ns |----------------------------------------L0.1987-----------------------------------------|" - - "L0.2001[962,1121] 131ns |----------------------------------------L0.2001-----------------------------------------|" - - "L0.2015[962,1121] 132ns |----------------------------------------L0.2015-----------------------------------------|" - - "L0.2029[962,1121] 133ns |----------------------------------------L0.2029-----------------------------------------|" - - "L0.2155[962,1121] 134ns |----------------------------------------L0.2155-----------------------------------------|" + - "L0.1009[1282,1441] 60ns |----------------------------------------L0.1009-----------------------------------------|" + - "L0.1023[1282,1441] 61ns |----------------------------------------L0.1023-----------------------------------------|" + - "L0.1037[1282,1441] 62ns |----------------------------------------L0.1037-----------------------------------------|" + - "L0.1051[1282,1441] 63ns |----------------------------------------L0.1051-----------------------------------------|" + - "L0.1065[1282,1441] 64ns |----------------------------------------L0.1065-----------------------------------------|" + - "L0.1079[1282,1441] 65ns |----------------------------------------L0.1079-----------------------------------------|" + - "L0.1093[1282,1441] 66ns |----------------------------------------L0.1093-----------------------------------------|" + - "L0.1107[1282,1441] 67ns |----------------------------------------L0.1107-----------------------------------------|" + - "L0.1121[1282,1441] 68ns |----------------------------------------L0.1121-----------------------------------------|" + - "L0.1135[1282,1441] 69ns |----------------------------------------L0.1135-----------------------------------------|" + - "L0.1149[1282,1441] 70ns |----------------------------------------L0.1149-----------------------------------------|" + - "L0.1163[1282,1441] 71ns |----------------------------------------L0.1163-----------------------------------------|" + - "L0.1177[1282,1441] 72ns |----------------------------------------L0.1177-----------------------------------------|" + - "L0.1191[1282,1441] 73ns |----------------------------------------L0.1191-----------------------------------------|" + - "L0.1205[1282,1441] 74ns |----------------------------------------L0.1205-----------------------------------------|" + - "L0.1219[1282,1441] 75ns |----------------------------------------L0.1219-----------------------------------------|" + - "L0.1233[1282,1441] 76ns |----------------------------------------L0.1233-----------------------------------------|" + - "L0.1247[1282,1441] 77ns |----------------------------------------L0.1247-----------------------------------------|" + - "L0.1261[1282,1441] 78ns |----------------------------------------L0.1261-----------------------------------------|" + - "L0.1275[1282,1441] 79ns |----------------------------------------L0.1275-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[962,1121] 134ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1282,1441] 79ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1791, L0.1805, L0.1819, L0.1833, L0.1847, L0.1861, L0.1875, L0.1889, L0.1903, L0.1917, L0.1931, L0.1945, L0.1959, L0.1973, L0.1987, L0.2001, L0.2015, L0.2029, L0.2155, L0.2999" + - " Soft Deleting 20 files: L0.1009, L0.1023, L0.1037, L0.1051, L0.1065, L0.1079, L0.1093, L0.1107, L0.1121, L0.1135, L0.1149, L0.1163, L0.1177, L0.1191, L0.1205, L0.1219, L0.1233, L0.1247, L0.1261, L0.1275" - " Creating 1 files" - - "**** Simulation run 290, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 283, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3000[1122,1281] 115ns |----------------------------------------L0.3000-----------------------------------------|" - - "L0.1792[1122,1281] 116ns |----------------------------------------L0.1792-----------------------------------------|" - - "L0.1806[1122,1281] 117ns |----------------------------------------L0.1806-----------------------------------------|" - - "L0.1820[1122,1281] 118ns |----------------------------------------L0.1820-----------------------------------------|" - - "L0.1834[1122,1281] 119ns |----------------------------------------L0.1834-----------------------------------------|" - - "L0.1848[1122,1281] 120ns |----------------------------------------L0.1848-----------------------------------------|" - - "L0.1862[1122,1281] 121ns |----------------------------------------L0.1862-----------------------------------------|" - - "L0.1876[1122,1281] 122ns |----------------------------------------L0.1876-----------------------------------------|" - - "L0.1890[1122,1281] 123ns |----------------------------------------L0.1890-----------------------------------------|" - - "L0.1904[1122,1281] 124ns |----------------------------------------L0.1904-----------------------------------------|" - - "L0.1918[1122,1281] 125ns |----------------------------------------L0.1918-----------------------------------------|" - - "L0.1932[1122,1281] 126ns |----------------------------------------L0.1932-----------------------------------------|" - - "L0.1946[1122,1281] 127ns |----------------------------------------L0.1946-----------------------------------------|" - - "L0.1960[1122,1281] 128ns |----------------------------------------L0.1960-----------------------------------------|" - - "L0.1974[1122,1281] 129ns |----------------------------------------L0.1974-----------------------------------------|" - - "L0.1988[1122,1281] 130ns |----------------------------------------L0.1988-----------------------------------------|" - - "L0.2002[1122,1281] 131ns |----------------------------------------L0.2002-----------------------------------------|" - - "L0.2016[1122,1281] 132ns |----------------------------------------L0.2016-----------------------------------------|" - - "L0.2030[1122,1281] 133ns |----------------------------------------L0.2030-----------------------------------------|" - - "L0.2156[1122,1281] 134ns |----------------------------------------L0.2156-----------------------------------------|" + - "L0.1289[1282,1441] 80ns |----------------------------------------L0.1289-----------------------------------------|" + - "L0.1303[1282,1441] 81ns |----------------------------------------L0.1303-----------------------------------------|" + - "L0.1317[1282,1441] 82ns |----------------------------------------L0.1317-----------------------------------------|" + - "L0.1331[1282,1441] 83ns |----------------------------------------L0.1331-----------------------------------------|" + - "L0.1345[1282,1441] 84ns |----------------------------------------L0.1345-----------------------------------------|" + - "L0.1359[1282,1441] 85ns |----------------------------------------L0.1359-----------------------------------------|" + - "L0.1373[1282,1441] 86ns |----------------------------------------L0.1373-----------------------------------------|" + - "L0.1387[1282,1441] 87ns |----------------------------------------L0.1387-----------------------------------------|" + - "L0.1401[1282,1441] 88ns |----------------------------------------L0.1401-----------------------------------------|" + - "L0.1415[1282,1441] 89ns |----------------------------------------L0.1415-----------------------------------------|" + - "L0.1429[1282,1441] 90ns |----------------------------------------L0.1429-----------------------------------------|" + - "L0.1443[1282,1441] 91ns |----------------------------------------L0.1443-----------------------------------------|" + - "L0.1457[1282,1441] 92ns |----------------------------------------L0.1457-----------------------------------------|" + - "L0.1471[1282,1441] 93ns |----------------------------------------L0.1471-----------------------------------------|" + - "L0.1485[1282,1441] 94ns |----------------------------------------L0.1485-----------------------------------------|" + - "L0.1499[1282,1441] 95ns |----------------------------------------L0.1499-----------------------------------------|" + - "L0.1513[1282,1441] 96ns |----------------------------------------L0.1513-----------------------------------------|" + - "L0.1527[1282,1441] 97ns |----------------------------------------L0.1527-----------------------------------------|" + - "L0.1541[1282,1441] 98ns |----------------------------------------L0.1541-----------------------------------------|" + - "L0.1555[1282,1441] 99ns |----------------------------------------L0.1555-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1122,1281] 134ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1282,1441] 99ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1792, L0.1806, L0.1820, L0.1834, L0.1848, L0.1862, L0.1876, L0.1890, L0.1904, L0.1918, L0.1932, L0.1946, L0.1960, L0.1974, L0.1988, L0.2002, L0.2016, L0.2030, L0.2156, L0.3000" + - " Soft Deleting 20 files: L0.1289, L0.1303, L0.1317, L0.1331, L0.1345, L0.1359, L0.1373, L0.1387, L0.1401, L0.1415, L0.1429, L0.1443, L0.1457, L0.1471, L0.1485, L0.1499, L0.1513, L0.1527, L0.1541, L0.1555" - " Creating 1 files" - - "**** Simulation run 291, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 284, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3001[1282,1441] 115ns |----------------------------------------L0.3001-----------------------------------------|" + - "L0.1569[1282,1441] 100ns |----------------------------------------L0.1569-----------------------------------------|" + - "L0.1583[1282,1441] 101ns |----------------------------------------L0.1583-----------------------------------------|" + - "L0.1597[1282,1441] 102ns |----------------------------------------L0.1597-----------------------------------------|" + - "L0.1611[1282,1441] 103ns |----------------------------------------L0.1611-----------------------------------------|" + - "L0.1625[1282,1441] 104ns |----------------------------------------L0.1625-----------------------------------------|" + - "L0.1639[1282,1441] 105ns |----------------------------------------L0.1639-----------------------------------------|" + - "L0.1653[1282,1441] 106ns |----------------------------------------L0.1653-----------------------------------------|" + - "L0.1667[1282,1441] 107ns |----------------------------------------L0.1667-----------------------------------------|" + - "L0.1681[1282,1441] 108ns |----------------------------------------L0.1681-----------------------------------------|" + - "L0.1695[1282,1441] 109ns |----------------------------------------L0.1695-----------------------------------------|" + - "L0.1709[1282,1441] 110ns |----------------------------------------L0.1709-----------------------------------------|" + - "L0.1723[1282,1441] 111ns |----------------------------------------L0.1723-----------------------------------------|" + - "L0.1737[1282,1441] 112ns |----------------------------------------L0.1737-----------------------------------------|" + - "L0.1751[1282,1441] 113ns |----------------------------------------L0.1751-----------------------------------------|" + - "L0.1765[1282,1441] 114ns |----------------------------------------L0.1765-----------------------------------------|" + - "L0.1779[1282,1441] 115ns |----------------------------------------L0.1779-----------------------------------------|" - "L0.1793[1282,1441] 116ns |----------------------------------------L0.1793-----------------------------------------|" - "L0.1807[1282,1441] 117ns |----------------------------------------L0.1807-----------------------------------------|" - "L0.1821[1282,1441] 118ns |----------------------------------------L0.1821-----------------------------------------|" - "L0.1835[1282,1441] 119ns |----------------------------------------L0.1835-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[1282,1441] 119ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1569, L0.1583, L0.1597, L0.1611, L0.1625, L0.1639, L0.1653, L0.1667, L0.1681, L0.1695, L0.1709, L0.1723, L0.1737, L0.1751, L0.1765, L0.1779, L0.1793, L0.1807, L0.1821, L0.1835" + - " Creating 1 files" + - "**** Simulation run 285, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " - "L0.1849[1282,1441] 120ns |----------------------------------------L0.1849-----------------------------------------|" - "L0.1863[1282,1441] 121ns |----------------------------------------L0.1863-----------------------------------------|" - "L0.1877[1282,1441] 122ns |----------------------------------------L0.1877-----------------------------------------|" @@ -8026,440 +7835,299 @@ async fn stuck_l0_large_l0s() { - "L0.2017[1282,1441] 132ns |----------------------------------------L0.2017-----------------------------------------|" - "L0.2031[1282,1441] 133ns |----------------------------------------L0.2031-----------------------------------------|" - "L0.2157[1282,1441] 134ns |----------------------------------------L0.2157-----------------------------------------|" + - "L0.2171[1282,1441] 135ns |----------------------------------------L0.2171-----------------------------------------|" + - "L0.2045[1282,1441] 136ns |----------------------------------------L0.2045-----------------------------------------|" + - "L0.2059[1282,1441] 137ns |----------------------------------------L0.2059-----------------------------------------|" + - "L0.2073[1282,1441] 138ns |----------------------------------------L0.2073-----------------------------------------|" + - "L0.2087[1282,1441] 139ns |----------------------------------------L0.2087-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1282,1441] 134ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1793, L0.1807, L0.1821, L0.1835, L0.1849, L0.1863, L0.1877, L0.1891, L0.1905, L0.1919, L0.1933, L0.1947, L0.1961, L0.1975, L0.1989, L0.2003, L0.2017, L0.2031, L0.2157, L0.3001" - - " Creating 1 files" - - "**** Simulation run 292, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.3002[1442,1601] 115ns |----------------------------------------L0.3002-----------------------------------------|" - - "L0.1794[1442,1601] 116ns |----------------------------------------L0.1794-----------------------------------------|" - - "L0.1808[1442,1601] 117ns |----------------------------------------L0.1808-----------------------------------------|" - - "L0.1822[1442,1601] 118ns |----------------------------------------L0.1822-----------------------------------------|" - - "L0.1836[1442,1601] 119ns |----------------------------------------L0.1836-----------------------------------------|" - - "L0.1850[1442,1601] 120ns |----------------------------------------L0.1850-----------------------------------------|" - - "L0.1864[1442,1601] 121ns |----------------------------------------L0.1864-----------------------------------------|" - - "L0.1878[1442,1601] 122ns |----------------------------------------L0.1878-----------------------------------------|" - - "L0.1892[1442,1601] 123ns |----------------------------------------L0.1892-----------------------------------------|" - - "L0.1906[1442,1601] 124ns |----------------------------------------L0.1906-----------------------------------------|" - - "L0.1920[1442,1601] 125ns |----------------------------------------L0.1920-----------------------------------------|" - - "L0.1934[1442,1601] 126ns |----------------------------------------L0.1934-----------------------------------------|" - - "L0.1948[1442,1601] 127ns |----------------------------------------L0.1948-----------------------------------------|" - - "L0.1962[1442,1601] 128ns |----------------------------------------L0.1962-----------------------------------------|" - - "L0.1976[1442,1601] 129ns |----------------------------------------L0.1976-----------------------------------------|" - - "L0.1990[1442,1601] 130ns |----------------------------------------L0.1990-----------------------------------------|" - - "L0.2004[1442,1601] 131ns |----------------------------------------L0.2004-----------------------------------------|" - - "L0.2018[1442,1601] 132ns |----------------------------------------L0.2018-----------------------------------------|" - - "L0.2032[1442,1601] 133ns |----------------------------------------L0.2032-----------------------------------------|" - - "L0.2158[1442,1601] 134ns |----------------------------------------L0.2158-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[1442,1601] 134ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1794, L0.1808, L0.1822, L0.1836, L0.1850, L0.1864, L0.1878, L0.1892, L0.1906, L0.1920, L0.1934, L0.1948, L0.1962, L0.1976, L0.1990, L0.2004, L0.2018, L0.2032, L0.2158, L0.3002" - - " Creating 1 files" - - "**** Simulation run 293, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.3003[1602,1761] 115ns |----------------------------------------L0.3003-----------------------------------------|" - - "L0.1795[1602,1761] 116ns |----------------------------------------L0.1795-----------------------------------------|" - - "L0.1809[1602,1761] 117ns |----------------------------------------L0.1809-----------------------------------------|" - - "L0.1823[1602,1761] 118ns |----------------------------------------L0.1823-----------------------------------------|" - - "L0.1837[1602,1761] 119ns |----------------------------------------L0.1837-----------------------------------------|" - - "L0.1851[1602,1761] 120ns |----------------------------------------L0.1851-----------------------------------------|" - - "L0.1865[1602,1761] 121ns |----------------------------------------L0.1865-----------------------------------------|" - - "L0.1879[1602,1761] 122ns |----------------------------------------L0.1879-----------------------------------------|" - - "L0.1893[1602,1761] 123ns |----------------------------------------L0.1893-----------------------------------------|" - - "L0.1907[1602,1761] 124ns |----------------------------------------L0.1907-----------------------------------------|" - - "L0.1921[1602,1761] 125ns |----------------------------------------L0.1921-----------------------------------------|" - - "L0.1935[1602,1761] 126ns |----------------------------------------L0.1935-----------------------------------------|" - - "L0.1949[1602,1761] 127ns |----------------------------------------L0.1949-----------------------------------------|" - - "L0.1963[1602,1761] 128ns |----------------------------------------L0.1963-----------------------------------------|" - - "L0.1977[1602,1761] 129ns |----------------------------------------L0.1977-----------------------------------------|" - - "L0.1991[1602,1761] 130ns |----------------------------------------L0.1991-----------------------------------------|" - - "L0.2005[1602,1761] 131ns |----------------------------------------L0.2005-----------------------------------------|" - - "L0.2019[1602,1761] 132ns |----------------------------------------L0.2019-----------------------------------------|" - - "L0.2033[1602,1761] 133ns |----------------------------------------L0.2033-----------------------------------------|" - - "L0.2159[1602,1761] 134ns |----------------------------------------L0.2159-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[1602,1761] 134ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1795, L0.1809, L0.1823, L0.1837, L0.1851, L0.1865, L0.1879, L0.1893, L0.1907, L0.1921, L0.1935, L0.1949, L0.1963, L0.1977, L0.1991, L0.2005, L0.2019, L0.2033, L0.2159, L0.3003" - - " Creating 1 files" - - "**** Simulation run 294, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.3004[1762,2000] 115ns |----------------------------------------L0.3004-----------------------------------------|" - - "L0.1796[1762,2000] 116ns |----------------------------------------L0.1796-----------------------------------------|" - - "L0.1810[1762,2000] 117ns |----------------------------------------L0.1810-----------------------------------------|" - - "L0.1824[1762,2000] 118ns |----------------------------------------L0.1824-----------------------------------------|" - - "L0.1838[1762,2000] 119ns |----------------------------------------L0.1838-----------------------------------------|" - - "L0.1852[1762,2000] 120ns |----------------------------------------L0.1852-----------------------------------------|" - - "L0.1866[1762,2000] 121ns |----------------------------------------L0.1866-----------------------------------------|" - - "L0.1880[1762,2000] 122ns |----------------------------------------L0.1880-----------------------------------------|" - - "L0.1894[1762,2000] 123ns |----------------------------------------L0.1894-----------------------------------------|" - - "L0.1908[1762,2000] 124ns |----------------------------------------L0.1908-----------------------------------------|" - - "L0.1922[1762,2000] 125ns |----------------------------------------L0.1922-----------------------------------------|" - - "L0.1936[1762,2000] 126ns |----------------------------------------L0.1936-----------------------------------------|" - - "L0.1950[1762,2000] 127ns |----------------------------------------L0.1950-----------------------------------------|" - - "L0.1964[1762,2000] 128ns |----------------------------------------L0.1964-----------------------------------------|" - - "L0.1978[1762,2000] 129ns |----------------------------------------L0.1978-----------------------------------------|" - - "L0.1992[1762,2000] 130ns |----------------------------------------L0.1992-----------------------------------------|" - - "L0.2006[1762,2000] 131ns |----------------------------------------L0.2006-----------------------------------------|" - - "L0.2020[1762,2000] 132ns |----------------------------------------L0.2020-----------------------------------------|" - - "L0.2034[1762,2000] 133ns |----------------------------------------L0.2034-----------------------------------------|" - - "L0.2160[1762,2000] 134ns |----------------------------------------L0.2160-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[1762,2000] 134ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1796, L0.1810, L0.1824, L0.1838, L0.1852, L0.1866, L0.1880, L0.1894, L0.1908, L0.1922, L0.1936, L0.1950, L0.1964, L0.1978, L0.1992, L0.2006, L0.2020, L0.2034, L0.2160, L0.3004" - - " Creating 1 files" - - "**** Simulation run 295, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.3005[2001,2086] 134ns |----------------------------------------L0.3005-----------------------------------------|" - - "L0.2175[2001,2086] 135ns |----------------------------------------L0.2175-----------------------------------------|" - - "L0.2049[2001,2086] 136ns |----------------------------------------L0.2049-----------------------------------------|" - - "L0.2063[2001,2086] 137ns |----------------------------------------L0.2063-----------------------------------------|" - - "L0.2077[2001,2086] 138ns |----------------------------------------L0.2077-----------------------------------------|" - - "L0.2091[2001,2086] 139ns |----------------------------------------L0.2091-----------------------------------------|" - - "L0.2105[2001,2086] 140ns |----------------------------------------L0.2105-----------------------------------------|" - - "L0.2119[2001,2086] 141ns |----------------------------------------L0.2119-----------------------------------------|" - - "L0.2133[2001,2086] 142ns |----------------------------------------L0.2133-----------------------------------------|" - - "L0.2147[2001,2086] 143ns |----------------------------------------L0.2147-----------------------------------------|" - - "L0.2189[2001,2086] 144ns |----------------------------------------L0.2189-----------------------------------------|" - - "L0.2203[2001,2086] 145ns |----------------------------------------L0.2203-----------------------------------------|" - - "L0.2217[2001,2086] 146ns |----------------------------------------L0.2217-----------------------------------------|" - - "L0.2231[2001,2086] 147ns |----------------------------------------L0.2231-----------------------------------------|" - - "L0.2245[2001,2086] 148ns |----------------------------------------L0.2245-----------------------------------------|" - - "L0.2259[2001,2086] 149ns |----------------------------------------L0.2259-----------------------------------------|" - - "L0.2273[2001,2086] 150ns |----------------------------------------L0.2273-----------------------------------------|" - - "L0.2287[2001,2086] 151ns |----------------------------------------L0.2287-----------------------------------------|" - - "L0.2301[2001,2086] 152ns |----------------------------------------L0.2301-----------------------------------------|" - - "L0.2315[2001,2086] 153ns |----------------------------------------L0.2315-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[2001,2086] 153ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2049, L0.2063, L0.2077, L0.2091, L0.2105, L0.2119, L0.2133, L0.2147, L0.2175, L0.2189, L0.2203, L0.2217, L0.2231, L0.2245, L0.2259, L0.2273, L0.2287, L0.2301, L0.2315, L0.3005" - - " Creating 1 files" - - "**** Simulation run 296, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.2997[642,801] 115ns |----------------------------------------L0.2997-----------------------------------------|" - - "L0.1789[642,801] 116ns |----------------------------------------L0.1789-----------------------------------------|" - - "L0.1803[642,801] 117ns |----------------------------------------L0.1803-----------------------------------------|" - - "L0.1817[642,801] 118ns |----------------------------------------L0.1817-----------------------------------------|" - - "L0.1831[642,801] 119ns |----------------------------------------L0.1831-----------------------------------------|" - - "L0.1845[642,801] 120ns |----------------------------------------L0.1845-----------------------------------------|" - - "L0.1859[642,801] 121ns |----------------------------------------L0.1859-----------------------------------------|" - - "L0.1873[642,801] 122ns |----------------------------------------L0.1873-----------------------------------------|" - - "L0.1887[642,801] 123ns |----------------------------------------L0.1887-----------------------------------------|" - - "L0.1901[642,801] 124ns |----------------------------------------L0.1901-----------------------------------------|" - - "L0.1915[642,801] 125ns |----------------------------------------L0.1915-----------------------------------------|" - - "L0.1929[642,801] 126ns |----------------------------------------L0.1929-----------------------------------------|" - - "L0.1943[642,801] 127ns |----------------------------------------L0.1943-----------------------------------------|" - - "L0.1957[642,801] 128ns |----------------------------------------L0.1957-----------------------------------------|" - - "L0.1971[642,801] 129ns |----------------------------------------L0.1971-----------------------------------------|" - - "L0.1985[642,801] 130ns |----------------------------------------L0.1985-----------------------------------------|" - - "L0.1999[642,801] 131ns |----------------------------------------L0.1999-----------------------------------------|" - - "L0.2013[642,801] 132ns |----------------------------------------L0.2013-----------------------------------------|" - - "L0.2027[642,801] 133ns |----------------------------------------L0.2027-----------------------------------------|" - - "L0.2153[642,801] 134ns |----------------------------------------L0.2153-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[642,801] 134ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.1789, L0.1803, L0.1817, L0.1831, L0.1845, L0.1859, L0.1873, L0.1887, L0.1901, L0.1915, L0.1929, L0.1943, L0.1957, L0.1971, L0.1985, L0.1999, L0.2013, L0.2027, L0.2153, L0.2997" - - " Creating 1 files" - - "**** Simulation run 297, type=compact(ManySmallFiles). 20 Input Files, 1kb total:" - - "L0 " - - "L0.3006[2087,1340000] 134ns 1kb|----------------------------------L0.3006-----------------------------------| " - - "L0.2176[2087,1350000] 135ns 10b|-----------------------------------L0.2176-----------------------------------| " - - "L0.2050[2087,1360000] 136ns 10b|-----------------------------------L0.2050-----------------------------------| " - - "L0.2064[2087,1370000] 137ns 10b|-----------------------------------L0.2064------------------------------------| " - - "L0.2078[2087,1380000] 138ns 10b|------------------------------------L0.2078------------------------------------| " - - "L0.2092[2087,1390000] 139ns 10b|------------------------------------L0.2092------------------------------------| " - - "L0.2106[2087,1400000] 140ns 10b|------------------------------------L0.2106-------------------------------------| " - - "L0.2120[2087,1410000] 141ns 10b|------------------------------------L0.2120-------------------------------------| " - - "L0.2134[2087,1420000] 142ns 10b|-------------------------------------L0.2134-------------------------------------| " - - "L0.2148[2087,1430000] 143ns 10b|-------------------------------------L0.2148--------------------------------------| " - - "L0.2190[2087,1440000] 144ns 10b|-------------------------------------L0.2190--------------------------------------| " - - "L0.2204[2087,1450000] 145ns 10b|--------------------------------------L0.2204--------------------------------------| " - - "L0.2218[2087,1460000] 146ns 10b|--------------------------------------L0.2218--------------------------------------| " - - "L0.2232[2087,1470000] 147ns 10b|--------------------------------------L0.2232---------------------------------------| " - - "L0.2246[2087,1480000] 148ns 10b|---------------------------------------L0.2246---------------------------------------| " - - "L0.2260[2087,1490000] 149ns 10b|---------------------------------------L0.2260---------------------------------------| " - - "L0.2274[2087,1500000] 150ns 10b|---------------------------------------L0.2274----------------------------------------| " - - "L0.2288[2087,1510000] 151ns 10b|---------------------------------------L0.2288----------------------------------------| " - - "L0.2302[2087,1520000] 152ns 10b|----------------------------------------L0.2302----------------------------------------| " - - "L0.2316[2087,1530000] 153ns 10b|----------------------------------------L0.2316-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 1kb total:" - - "L0, all files 1kb " - - "L0.?[2087,1530000] 153ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2050, L0.2064, L0.2078, L0.2092, L0.2106, L0.2120, L0.2134, L0.2148, L0.2176, L0.2190, L0.2204, L0.2218, L0.2232, L0.2246, L0.2260, L0.2274, L0.2288, L0.2302, L0.2316, L0.3006" - - " Creating 1 files" - - "**** Simulation run 298, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.3007[20,161] 134ns |----------------------------------------L0.3007-----------------------------------------|" - - "L0.2163[135,161] 135ns |---L0.2163----| " - - "L0.2037[136,161] 136ns |---L0.2037---| " - - "L0.2051[137,161] 137ns |---L0.2051---| " - - "L0.2065[138,161] 138ns |--L0.2065---| " - - "L0.2079[139,161] 139ns |--L0.2079---| " - - "L0.2093[140,161] 140ns |--L0.2093--| " - - "L0.2107[141,161] 141ns |-L0.2107--| " - - "L0.2121[142,161] 142ns |-L0.2121--| " - - "L0.2135[143,161] 143ns |-L0.2135-| " - - "L0.2177[144,161] 144ns |L0.2177-| " - - "L0.2191[145,161] 145ns |L0.2191-| " - - "L0.2205[146,161] 146ns |L0.2205| " - - "L0.2219[147,161] 147ns |L0.2219|" - - "L0.2233[148,161] 148ns |L0.2233|" - - "L0.2247[149,161] 149ns |L0.2247|" - - "L0.2261[150,161] 150ns |L0.2261|" - - "L0.2275[151,161] 151ns |L0.2275|" - - "L0.2289[152,161] 152ns |L0.2289|" - - "L0.2303[153,161] 153ns |L0.2303|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[20,161] 153ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1282,1441] 139ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2037, L0.2051, L0.2065, L0.2079, L0.2093, L0.2107, L0.2121, L0.2135, L0.2163, L0.2177, L0.2191, L0.2205, L0.2219, L0.2233, L0.2247, L0.2261, L0.2275, L0.2289, L0.2303, L0.3007" + - " Soft Deleting 20 files: L0.1849, L0.1863, L0.1877, L0.1891, L0.1905, L0.1919, L0.1933, L0.1947, L0.1961, L0.1975, L0.1989, L0.2003, L0.2017, L0.2031, L0.2045, L0.2059, L0.2073, L0.2087, L0.2157, L0.2171" - " Creating 1 files" - - "**** Simulation run 299, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 286, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3008[162,321] 134ns |----------------------------------------L0.3008-----------------------------------------|" - - "L0.2164[162,321] 135ns |----------------------------------------L0.2164-----------------------------------------|" - - "L0.2038[162,321] 136ns |----------------------------------------L0.2038-----------------------------------------|" - - "L0.2052[162,321] 137ns |----------------------------------------L0.2052-----------------------------------------|" - - "L0.2066[162,321] 138ns |----------------------------------------L0.2066-----------------------------------------|" - - "L0.2080[162,321] 139ns |----------------------------------------L0.2080-----------------------------------------|" - - "L0.2094[162,321] 140ns |----------------------------------------L0.2094-----------------------------------------|" - - "L0.2108[162,321] 141ns |----------------------------------------L0.2108-----------------------------------------|" - - "L0.2122[162,321] 142ns |----------------------------------------L0.2122-----------------------------------------|" - - "L0.2136[162,321] 143ns |----------------------------------------L0.2136-----------------------------------------|" - - "L0.2178[162,321] 144ns |----------------------------------------L0.2178-----------------------------------------|" - - "L0.2192[162,321] 145ns |----------------------------------------L0.2192-----------------------------------------|" - - "L0.2206[162,321] 146ns |----------------------------------------L0.2206-----------------------------------------|" - - "L0.2220[162,321] 147ns |----------------------------------------L0.2220-----------------------------------------|" - - "L0.2234[162,321] 148ns |----------------------------------------L0.2234-----------------------------------------|" - - "L0.2248[162,321] 149ns |----------------------------------------L0.2248-----------------------------------------|" - - "L0.2262[162,321] 150ns |----------------------------------------L0.2262-----------------------------------------|" - - "L0.2276[162,321] 151ns |----------------------------------------L0.2276-----------------------------------------|" - - "L0.2290[162,321] 152ns |----------------------------------------L0.2290-----------------------------------------|" - - "L0.2304[162,321] 153ns |----------------------------------------L0.2304-----------------------------------------|" + - "L0.2101[1282,1441] 140ns |----------------------------------------L0.2101-----------------------------------------|" + - "L0.2115[1282,1441] 141ns |----------------------------------------L0.2115-----------------------------------------|" + - "L0.2129[1282,1441] 142ns |----------------------------------------L0.2129-----------------------------------------|" + - "L0.2143[1282,1441] 143ns |----------------------------------------L0.2143-----------------------------------------|" + - "L0.2185[1282,1441] 144ns |----------------------------------------L0.2185-----------------------------------------|" + - "L0.2199[1282,1441] 145ns |----------------------------------------L0.2199-----------------------------------------|" + - "L0.2213[1282,1441] 146ns |----------------------------------------L0.2213-----------------------------------------|" + - "L0.2227[1282,1441] 147ns |----------------------------------------L0.2227-----------------------------------------|" + - "L0.2241[1282,1441] 148ns |----------------------------------------L0.2241-----------------------------------------|" + - "L0.2255[1282,1441] 149ns |----------------------------------------L0.2255-----------------------------------------|" + - "L0.2269[1282,1441] 150ns |----------------------------------------L0.2269-----------------------------------------|" + - "L0.2283[1282,1441] 151ns |----------------------------------------L0.2283-----------------------------------------|" + - "L0.2297[1282,1441] 152ns |----------------------------------------L0.2297-----------------------------------------|" + - "L0.2311[1282,1441] 153ns |----------------------------------------L0.2311-----------------------------------------|" + - "L0.2325[1282,1441] 154ns |----------------------------------------L0.2325-----------------------------------------|" + - "L0.2339[1282,1441] 155ns |----------------------------------------L0.2339-----------------------------------------|" + - "L0.2353[1282,1441] 156ns |----------------------------------------L0.2353-----------------------------------------|" + - "L0.2367[1282,1441] 157ns |----------------------------------------L0.2367-----------------------------------------|" + - "L0.2381[1282,1441] 158ns |----------------------------------------L0.2381-----------------------------------------|" + - "L0.2395[1282,1441] 159ns |----------------------------------------L0.2395-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[162,321] 153ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1282,1441] 159ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2038, L0.2052, L0.2066, L0.2080, L0.2094, L0.2108, L0.2122, L0.2136, L0.2164, L0.2178, L0.2192, L0.2206, L0.2220, L0.2234, L0.2248, L0.2262, L0.2276, L0.2290, L0.2304, L0.3008" + - " Soft Deleting 20 files: L0.2101, L0.2115, L0.2129, L0.2143, L0.2185, L0.2199, L0.2213, L0.2227, L0.2241, L0.2255, L0.2269, L0.2283, L0.2297, L0.2311, L0.2325, L0.2339, L0.2353, L0.2367, L0.2381, L0.2395" - " Creating 1 files" - - "**** Simulation run 300, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 287, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3009[322,481] 134ns |----------------------------------------L0.3009-----------------------------------------|" - - "L0.2165[322,481] 135ns |----------------------------------------L0.2165-----------------------------------------|" - - "L0.2039[322,481] 136ns |----------------------------------------L0.2039-----------------------------------------|" - - "L0.2053[322,481] 137ns |----------------------------------------L0.2053-----------------------------------------|" - - "L0.2067[322,481] 138ns |----------------------------------------L0.2067-----------------------------------------|" - - "L0.2081[322,481] 139ns |----------------------------------------L0.2081-----------------------------------------|" - - "L0.2095[322,481] 140ns |----------------------------------------L0.2095-----------------------------------------|" - - "L0.2109[322,481] 141ns |----------------------------------------L0.2109-----------------------------------------|" - - "L0.2123[322,481] 142ns |----------------------------------------L0.2123-----------------------------------------|" - - "L0.2137[322,481] 143ns |----------------------------------------L0.2137-----------------------------------------|" - - "L0.2179[322,481] 144ns |----------------------------------------L0.2179-----------------------------------------|" - - "L0.2193[322,481] 145ns |----------------------------------------L0.2193-----------------------------------------|" - - "L0.2207[322,481] 146ns |----------------------------------------L0.2207-----------------------------------------|" - - "L0.2221[322,481] 147ns |----------------------------------------L0.2221-----------------------------------------|" - - "L0.2235[322,481] 148ns |----------------------------------------L0.2235-----------------------------------------|" - - "L0.2249[322,481] 149ns |----------------------------------------L0.2249-----------------------------------------|" - - "L0.2263[322,481] 150ns |----------------------------------------L0.2263-----------------------------------------|" - - "L0.2277[322,481] 151ns |----------------------------------------L0.2277-----------------------------------------|" - - "L0.2291[322,481] 152ns |----------------------------------------L0.2291-----------------------------------------|" - - "L0.2305[322,481] 153ns |----------------------------------------L0.2305-----------------------------------------|" + - "L0.2409[1282,1441] 160ns |----------------------------------------L0.2409-----------------------------------------|" + - "L0.2423[1282,1441] 161ns |----------------------------------------L0.2423-----------------------------------------|" + - "L0.2436[1282,1441] 162ns |----------------------------------------L0.2436-----------------------------------------|" + - "L0.2449[1282,1441] 163ns |----------------------------------------L0.2449-----------------------------------------|" + - "L0.2462[1282,1441] 164ns |----------------------------------------L0.2462-----------------------------------------|" + - "L0.2475[1282,1441] 165ns |----------------------------------------L0.2475-----------------------------------------|" + - "L0.2488[1282,1441] 166ns |----------------------------------------L0.2488-----------------------------------------|" + - "L0.2501[1282,1441] 167ns |----------------------------------------L0.2501-----------------------------------------|" + - "L0.2514[1282,1441] 168ns |----------------------------------------L0.2514-----------------------------------------|" + - "L0.2527[1282,1441] 169ns |----------------------------------------L0.2527-----------------------------------------|" + - "L0.2540[1282,1441] 170ns |----------------------------------------L0.2540-----------------------------------------|" + - "L0.2553[1282,1441] 171ns |----------------------------------------L0.2553-----------------------------------------|" + - "L0.2566[1282,1441] 172ns |----------------------------------------L0.2566-----------------------------------------|" + - "L0.2579[1282,1441] 173ns |----------------------------------------L0.2579-----------------------------------------|" + - "L0.2592[1282,1441] 174ns |----------------------------------------L0.2592-----------------------------------------|" + - "L0.2605[1282,1441] 175ns |----------------------------------------L0.2605-----------------------------------------|" + - "L0.2618[1282,1441] 176ns |----------------------------------------L0.2618-----------------------------------------|" + - "L0.2631[1282,1441] 177ns |----------------------------------------L0.2631-----------------------------------------|" + - "L0.2644[1282,1441] 178ns |----------------------------------------L0.2644-----------------------------------------|" + - "L0.2657[1282,1441] 179ns |----------------------------------------L0.2657-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[322,481] 153ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1282,1441] 179ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2039, L0.2053, L0.2067, L0.2081, L0.2095, L0.2109, L0.2123, L0.2137, L0.2165, L0.2179, L0.2193, L0.2207, L0.2221, L0.2235, L0.2249, L0.2263, L0.2277, L0.2291, L0.2305, L0.3009" + - " Soft Deleting 20 files: L0.2409, L0.2423, L0.2436, L0.2449, L0.2462, L0.2475, L0.2488, L0.2501, L0.2514, L0.2527, L0.2540, L0.2553, L0.2566, L0.2579, L0.2592, L0.2605, L0.2618, L0.2631, L0.2644, L0.2657" - " Creating 1 files" - - "**** Simulation run 301, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 288, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3018[482,641] 134ns |----------------------------------------L0.3018-----------------------------------------|" - - "L0.2166[482,641] 135ns |----------------------------------------L0.2166-----------------------------------------|" - - "L0.2040[482,641] 136ns |----------------------------------------L0.2040-----------------------------------------|" - - "L0.2054[482,641] 137ns |----------------------------------------L0.2054-----------------------------------------|" - - "L0.2068[482,641] 138ns |----------------------------------------L0.2068-----------------------------------------|" - - "L0.2082[482,641] 139ns |----------------------------------------L0.2082-----------------------------------------|" - - "L0.2096[482,641] 140ns |----------------------------------------L0.2096-----------------------------------------|" - - "L0.2110[482,641] 141ns |----------------------------------------L0.2110-----------------------------------------|" - - "L0.2124[482,641] 142ns |----------------------------------------L0.2124-----------------------------------------|" - - "L0.2138[482,641] 143ns |----------------------------------------L0.2138-----------------------------------------|" - - "L0.2180[482,641] 144ns |----------------------------------------L0.2180-----------------------------------------|" - - "L0.2194[482,641] 145ns |----------------------------------------L0.2194-----------------------------------------|" - - "L0.2208[482,641] 146ns |----------------------------------------L0.2208-----------------------------------------|" - - "L0.2222[482,641] 147ns |----------------------------------------L0.2222-----------------------------------------|" - - "L0.2236[482,641] 148ns |----------------------------------------L0.2236-----------------------------------------|" - - "L0.2250[482,641] 149ns |----------------------------------------L0.2250-----------------------------------------|" - - "L0.2264[482,641] 150ns |----------------------------------------L0.2264-----------------------------------------|" - - "L0.2278[482,641] 151ns |----------------------------------------L0.2278-----------------------------------------|" - - "L0.2292[482,641] 152ns |----------------------------------------L0.2292-----------------------------------------|" - - "L0.2306[482,641] 153ns |----------------------------------------L0.2306-----------------------------------------|" + - "L0.2670[1282,1441] 180ns |----------------------------------------L0.2670-----------------------------------------|" + - "L0.2683[1282,1441] 181ns |----------------------------------------L0.2683-----------------------------------------|" + - "L0.2696[1282,1441] 182ns |----------------------------------------L0.2696-----------------------------------------|" + - "L0.2709[1282,1441] 183ns |----------------------------------------L0.2709-----------------------------------------|" + - "L0.2722[1282,1441] 184ns |----------------------------------------L0.2722-----------------------------------------|" + - "L0.2735[1282,1441] 185ns |----------------------------------------L0.2735-----------------------------------------|" + - "L0.2748[1282,1441] 186ns |----------------------------------------L0.2748-----------------------------------------|" + - "L0.2761[1282,1441] 187ns |----------------------------------------L0.2761-----------------------------------------|" + - "L0.2774[1282,1441] 188ns |----------------------------------------L0.2774-----------------------------------------|" + - "L0.2787[1282,1441] 189ns |----------------------------------------L0.2787-----------------------------------------|" + - "L0.2800[1282,1441] 190ns |----------------------------------------L0.2800-----------------------------------------|" + - "L0.2813[1282,1441] 191ns |----------------------------------------L0.2813-----------------------------------------|" + - "L0.2826[1282,1441] 192ns |----------------------------------------L0.2826-----------------------------------------|" + - "L0.2839[1282,1441] 193ns |----------------------------------------L0.2839-----------------------------------------|" + - "L0.2852[1282,1441] 194ns |----------------------------------------L0.2852-----------------------------------------|" + - "L0.2865[1282,1441] 195ns |----------------------------------------L0.2865-----------------------------------------|" + - "L0.2878[1282,1441] 196ns |----------------------------------------L0.2878-----------------------------------------|" + - "L0.2891[1282,1441] 197ns |----------------------------------------L0.2891-----------------------------------------|" + - "L0.2904[1282,1441] 198ns |----------------------------------------L0.2904-----------------------------------------|" + - "L0.2917[1282,1441] 199ns |----------------------------------------L0.2917-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[482,641] 153ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1282,1441] 199ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2040, L0.2054, L0.2068, L0.2082, L0.2096, L0.2110, L0.2124, L0.2138, L0.2166, L0.2180, L0.2194, L0.2208, L0.2222, L0.2236, L0.2250, L0.2264, L0.2278, L0.2292, L0.2306, L0.3018" + - " Soft Deleting 20 files: L0.2670, L0.2683, L0.2696, L0.2709, L0.2722, L0.2735, L0.2748, L0.2761, L0.2774, L0.2787, L0.2800, L0.2813, L0.2826, L0.2839, L0.2852, L0.2865, L0.2878, L0.2891, L0.2904, L0.2917" - " Creating 1 files" - - "**** Simulation run 302, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 289, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" + - "L0, all files 8mb " + - "L0.210[1442,1601] 0ns |-----------------------------------------L0.210-----------------------------------------|" + - "L0.222[1442,1601] 1ns |-----------------------------------------L0.222-----------------------------------------|" + - "L0.234[1442,1601] 2ns |-----------------------------------------L0.234-----------------------------------------|" + - "L0.246[1442,1601] 3ns |-----------------------------------------L0.246-----------------------------------------|" + - "L0.258[1442,1601] 4ns |-----------------------------------------L0.258-----------------------------------------|" + - "L0.270[1442,1601] 5ns |-----------------------------------------L0.270-----------------------------------------|" + - "L0.282[1442,1601] 6ns |-----------------------------------------L0.282-----------------------------------------|" + - "L0.294[1442,1601] 7ns |-----------------------------------------L0.294-----------------------------------------|" + - "L0.306[1442,1601] 8ns |-----------------------------------------L0.306-----------------------------------------|" + - "L0.318[1442,1601] 9ns |-----------------------------------------L0.318-----------------------------------------|" + - "L0.330[1442,1601] 10ns |-----------------------------------------L0.330-----------------------------------------|" + - "L0.342[1442,1601] 11ns |-----------------------------------------L0.342-----------------------------------------|" + - "L0.354[1442,1601] 12ns |-----------------------------------------L0.354-----------------------------------------|" + - "L0.366[1442,1601] 13ns |-----------------------------------------L0.366-----------------------------------------|" + - "L0.378[1442,1601] 14ns |-----------------------------------------L0.378-----------------------------------------|" + - "L0.390[1442,1601] 15ns |-----------------------------------------L0.390-----------------------------------------|" + - "L0.402[1442,1601] 16ns |-----------------------------------------L0.402-----------------------------------------|" + - "L0.414[1442,1601] 17ns |-----------------------------------------L0.414-----------------------------------------|" + - "L0.538[1442,1601] 18ns |-----------------------------------------L0.538-----------------------------------------|" + - "L0.550[1442,1601] 19ns |-----------------------------------------L0.550-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" + - "L0, all files 160mb " + - "L0.?[1442,1601] 19ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.210, L0.222, L0.234, L0.246, L0.258, L0.270, L0.282, L0.294, L0.306, L0.318, L0.330, L0.342, L0.354, L0.366, L0.378, L0.390, L0.402, L0.414, L0.538, L0.550" + - " Creating 1 files" + - "**** Simulation run 290, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3019[642,801] 134ns |----------------------------------------L0.3019-----------------------------------------|" - - "L0.2167[642,801] 135ns |----------------------------------------L0.2167-----------------------------------------|" - - "L0.2041[642,801] 136ns |----------------------------------------L0.2041-----------------------------------------|" - - "L0.2055[642,801] 137ns |----------------------------------------L0.2055-----------------------------------------|" - - "L0.2069[642,801] 138ns |----------------------------------------L0.2069-----------------------------------------|" - - "L0.2083[642,801] 139ns |----------------------------------------L0.2083-----------------------------------------|" - - "L0.2097[642,801] 140ns |----------------------------------------L0.2097-----------------------------------------|" - - "L0.2111[642,801] 141ns |----------------------------------------L0.2111-----------------------------------------|" - - "L0.2125[642,801] 142ns |----------------------------------------L0.2125-----------------------------------------|" - - "L0.2139[642,801] 143ns |----------------------------------------L0.2139-----------------------------------------|" - - "L0.2181[642,801] 144ns |----------------------------------------L0.2181-----------------------------------------|" - - "L0.2195[642,801] 145ns |----------------------------------------L0.2195-----------------------------------------|" - - "L0.2209[642,801] 146ns |----------------------------------------L0.2209-----------------------------------------|" - - "L0.2223[642,801] 147ns |----------------------------------------L0.2223-----------------------------------------|" - - "L0.2237[642,801] 148ns |----------------------------------------L0.2237-----------------------------------------|" - - "L0.2251[642,801] 149ns |----------------------------------------L0.2251-----------------------------------------|" - - "L0.2265[642,801] 150ns |----------------------------------------L0.2265-----------------------------------------|" - - "L0.2279[642,801] 151ns |----------------------------------------L0.2279-----------------------------------------|" - - "L0.2293[642,801] 152ns |----------------------------------------L0.2293-----------------------------------------|" - - "L0.2307[642,801] 153ns |----------------------------------------L0.2307-----------------------------------------|" + - "L0.426[1442,1601] 20ns |-----------------------------------------L0.426-----------------------------------------|" + - "L0.440[1442,1601] 21ns |-----------------------------------------L0.440-----------------------------------------|" + - "L0.454[1442,1601] 22ns |-----------------------------------------L0.454-----------------------------------------|" + - "L0.468[1442,1601] 23ns |-----------------------------------------L0.468-----------------------------------------|" + - "L0.482[1442,1601] 24ns |-----------------------------------------L0.482-----------------------------------------|" + - "L0.496[1442,1601] 25ns |-----------------------------------------L0.496-----------------------------------------|" + - "L0.510[1442,1601] 26ns |-----------------------------------------L0.510-----------------------------------------|" + - "L0.524[1442,1601] 27ns |-----------------------------------------L0.524-----------------------------------------|" + - "L0.562[1442,1601] 28ns |-----------------------------------------L0.562-----------------------------------------|" + - "L0.576[1442,1601] 29ns |-----------------------------------------L0.576-----------------------------------------|" + - "L0.590[1442,1601] 30ns |-----------------------------------------L0.590-----------------------------------------|" + - "L0.604[1442,1601] 31ns |-----------------------------------------L0.604-----------------------------------------|" + - "L0.618[1442,1601] 32ns |-----------------------------------------L0.618-----------------------------------------|" + - "L0.632[1442,1601] 33ns |-----------------------------------------L0.632-----------------------------------------|" + - "L0.646[1442,1601] 34ns |-----------------------------------------L0.646-----------------------------------------|" + - "L0.660[1442,1601] 35ns |-----------------------------------------L0.660-----------------------------------------|" + - "L0.674[1442,1601] 36ns |-----------------------------------------L0.674-----------------------------------------|" + - "L0.688[1442,1601] 37ns |-----------------------------------------L0.688-----------------------------------------|" + - "L0.702[1442,1601] 38ns |-----------------------------------------L0.702-----------------------------------------|" + - "L0.716[1442,1601] 39ns |-----------------------------------------L0.716-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[642,801] 153ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1442,1601] 39ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2041, L0.2055, L0.2069, L0.2083, L0.2097, L0.2111, L0.2125, L0.2139, L0.2167, L0.2181, L0.2195, L0.2209, L0.2223, L0.2237, L0.2251, L0.2265, L0.2279, L0.2293, L0.2307, L0.3019" + - " Soft Deleting 20 files: L0.426, L0.440, L0.454, L0.468, L0.482, L0.496, L0.510, L0.524, L0.562, L0.576, L0.590, L0.604, L0.618, L0.632, L0.646, L0.660, L0.674, L0.688, L0.702, L0.716" - " Creating 1 files" - - "**** Simulation run 303, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 291, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3010[802,961] 134ns |----------------------------------------L0.3010-----------------------------------------|" - - "L0.2168[802,961] 135ns |----------------------------------------L0.2168-----------------------------------------|" - - "L0.2042[802,961] 136ns |----------------------------------------L0.2042-----------------------------------------|" - - "L0.2056[802,961] 137ns |----------------------------------------L0.2056-----------------------------------------|" - - "L0.2070[802,961] 138ns |----------------------------------------L0.2070-----------------------------------------|" - - "L0.2084[802,961] 139ns |----------------------------------------L0.2084-----------------------------------------|" - - "L0.2098[802,961] 140ns |----------------------------------------L0.2098-----------------------------------------|" - - "L0.2112[802,961] 141ns |----------------------------------------L0.2112-----------------------------------------|" - - "L0.2126[802,961] 142ns |----------------------------------------L0.2126-----------------------------------------|" - - "L0.2140[802,961] 143ns |----------------------------------------L0.2140-----------------------------------------|" - - "L0.2182[802,961] 144ns |----------------------------------------L0.2182-----------------------------------------|" - - "L0.2196[802,961] 145ns |----------------------------------------L0.2196-----------------------------------------|" - - "L0.2210[802,961] 146ns |----------------------------------------L0.2210-----------------------------------------|" - - "L0.2224[802,961] 147ns |----------------------------------------L0.2224-----------------------------------------|" - - "L0.2238[802,961] 148ns |----------------------------------------L0.2238-----------------------------------------|" - - "L0.2252[802,961] 149ns |----------------------------------------L0.2252-----------------------------------------|" - - "L0.2266[802,961] 150ns |----------------------------------------L0.2266-----------------------------------------|" - - "L0.2280[802,961] 151ns |----------------------------------------L0.2280-----------------------------------------|" - - "L0.2294[802,961] 152ns |----------------------------------------L0.2294-----------------------------------------|" - - "L0.2308[802,961] 153ns |----------------------------------------L0.2308-----------------------------------------|" + - "L0.730[1442,1601] 40ns |-----------------------------------------L0.730-----------------------------------------|" + - "L0.744[1442,1601] 41ns |-----------------------------------------L0.744-----------------------------------------|" + - "L0.758[1442,1601] 42ns |-----------------------------------------L0.758-----------------------------------------|" + - "L0.772[1442,1601] 43ns |-----------------------------------------L0.772-----------------------------------------|" + - "L0.786[1442,1601] 44ns |-----------------------------------------L0.786-----------------------------------------|" + - "L0.800[1442,1601] 45ns |-----------------------------------------L0.800-----------------------------------------|" + - "L0.814[1442,1601] 46ns |-----------------------------------------L0.814-----------------------------------------|" + - "L0.828[1442,1601] 47ns |-----------------------------------------L0.828-----------------------------------------|" + - "L0.842[1442,1601] 48ns |-----------------------------------------L0.842-----------------------------------------|" + - "L0.856[1442,1601] 49ns |-----------------------------------------L0.856-----------------------------------------|" + - "L0.870[1442,1601] 50ns |-----------------------------------------L0.870-----------------------------------------|" + - "L0.884[1442,1601] 51ns |-----------------------------------------L0.884-----------------------------------------|" + - "L0.898[1442,1601] 52ns |-----------------------------------------L0.898-----------------------------------------|" + - "L0.912[1442,1601] 53ns |-----------------------------------------L0.912-----------------------------------------|" + - "L0.926[1442,1601] 54ns |-----------------------------------------L0.926-----------------------------------------|" + - "L0.940[1442,1601] 55ns |-----------------------------------------L0.940-----------------------------------------|" + - "L0.954[1442,1601] 56ns |-----------------------------------------L0.954-----------------------------------------|" + - "L0.968[1442,1601] 57ns |-----------------------------------------L0.968-----------------------------------------|" + - "L0.982[1442,1601] 58ns |-----------------------------------------L0.982-----------------------------------------|" + - "L0.996[1442,1601] 59ns |-----------------------------------------L0.996-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[802,961] 153ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1442,1601] 59ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2042, L0.2056, L0.2070, L0.2084, L0.2098, L0.2112, L0.2126, L0.2140, L0.2168, L0.2182, L0.2196, L0.2210, L0.2224, L0.2238, L0.2252, L0.2266, L0.2280, L0.2294, L0.2308, L0.3010" + - " Soft Deleting 20 files: L0.730, L0.744, L0.758, L0.772, L0.786, L0.800, L0.814, L0.828, L0.842, L0.856, L0.870, L0.884, L0.898, L0.912, L0.926, L0.940, L0.954, L0.968, L0.982, L0.996" - " Creating 1 files" - - "**** Simulation run 304, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 292, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3011[962,1121] 134ns |----------------------------------------L0.3011-----------------------------------------|" - - "L0.2169[962,1121] 135ns |----------------------------------------L0.2169-----------------------------------------|" - - "L0.2043[962,1121] 136ns |----------------------------------------L0.2043-----------------------------------------|" - - "L0.2057[962,1121] 137ns |----------------------------------------L0.2057-----------------------------------------|" - - "L0.2071[962,1121] 138ns |----------------------------------------L0.2071-----------------------------------------|" - - "L0.2085[962,1121] 139ns |----------------------------------------L0.2085-----------------------------------------|" - - "L0.2099[962,1121] 140ns |----------------------------------------L0.2099-----------------------------------------|" - - "L0.2113[962,1121] 141ns |----------------------------------------L0.2113-----------------------------------------|" - - "L0.2127[962,1121] 142ns |----------------------------------------L0.2127-----------------------------------------|" - - "L0.2141[962,1121] 143ns |----------------------------------------L0.2141-----------------------------------------|" - - "L0.2183[962,1121] 144ns |----------------------------------------L0.2183-----------------------------------------|" - - "L0.2197[962,1121] 145ns |----------------------------------------L0.2197-----------------------------------------|" - - "L0.2211[962,1121] 146ns |----------------------------------------L0.2211-----------------------------------------|" - - "L0.2225[962,1121] 147ns |----------------------------------------L0.2225-----------------------------------------|" - - "L0.2239[962,1121] 148ns |----------------------------------------L0.2239-----------------------------------------|" - - "L0.2253[962,1121] 149ns |----------------------------------------L0.2253-----------------------------------------|" - - "L0.2267[962,1121] 150ns |----------------------------------------L0.2267-----------------------------------------|" - - "L0.2281[962,1121] 151ns |----------------------------------------L0.2281-----------------------------------------|" - - "L0.2295[962,1121] 152ns |----------------------------------------L0.2295-----------------------------------------|" - - "L0.2309[962,1121] 153ns |----------------------------------------L0.2309-----------------------------------------|" + - "L0.1010[1442,1601] 60ns |----------------------------------------L0.1010-----------------------------------------|" + - "L0.1024[1442,1601] 61ns |----------------------------------------L0.1024-----------------------------------------|" + - "L0.1038[1442,1601] 62ns |----------------------------------------L0.1038-----------------------------------------|" + - "L0.1052[1442,1601] 63ns |----------------------------------------L0.1052-----------------------------------------|" + - "L0.1066[1442,1601] 64ns |----------------------------------------L0.1066-----------------------------------------|" + - "L0.1080[1442,1601] 65ns |----------------------------------------L0.1080-----------------------------------------|" + - "L0.1094[1442,1601] 66ns |----------------------------------------L0.1094-----------------------------------------|" + - "L0.1108[1442,1601] 67ns |----------------------------------------L0.1108-----------------------------------------|" + - "L0.1122[1442,1601] 68ns |----------------------------------------L0.1122-----------------------------------------|" + - "L0.1136[1442,1601] 69ns |----------------------------------------L0.1136-----------------------------------------|" + - "L0.1150[1442,1601] 70ns |----------------------------------------L0.1150-----------------------------------------|" + - "L0.1164[1442,1601] 71ns |----------------------------------------L0.1164-----------------------------------------|" + - "L0.1178[1442,1601] 72ns |----------------------------------------L0.1178-----------------------------------------|" + - "L0.1192[1442,1601] 73ns |----------------------------------------L0.1192-----------------------------------------|" + - "L0.1206[1442,1601] 74ns |----------------------------------------L0.1206-----------------------------------------|" + - "L0.1220[1442,1601] 75ns |----------------------------------------L0.1220-----------------------------------------|" + - "L0.1234[1442,1601] 76ns |----------------------------------------L0.1234-----------------------------------------|" + - "L0.1248[1442,1601] 77ns |----------------------------------------L0.1248-----------------------------------------|" + - "L0.1262[1442,1601] 78ns |----------------------------------------L0.1262-----------------------------------------|" + - "L0.1276[1442,1601] 79ns |----------------------------------------L0.1276-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[962,1121] 153ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1442,1601] 79ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2043, L0.2057, L0.2071, L0.2085, L0.2099, L0.2113, L0.2127, L0.2141, L0.2169, L0.2183, L0.2197, L0.2211, L0.2225, L0.2239, L0.2253, L0.2267, L0.2281, L0.2295, L0.2309, L0.3011" + - " Soft Deleting 20 files: L0.1010, L0.1024, L0.1038, L0.1052, L0.1066, L0.1080, L0.1094, L0.1108, L0.1122, L0.1136, L0.1150, L0.1164, L0.1178, L0.1192, L0.1206, L0.1220, L0.1234, L0.1248, L0.1262, L0.1276" - " Creating 1 files" - - "**** Simulation run 305, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 293, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3012[1122,1281] 134ns |----------------------------------------L0.3012-----------------------------------------|" - - "L0.2170[1122,1281] 135ns |----------------------------------------L0.2170-----------------------------------------|" - - "L0.2044[1122,1281] 136ns |----------------------------------------L0.2044-----------------------------------------|" - - "L0.2058[1122,1281] 137ns |----------------------------------------L0.2058-----------------------------------------|" - - "L0.2072[1122,1281] 138ns |----------------------------------------L0.2072-----------------------------------------|" - - "L0.2086[1122,1281] 139ns |----------------------------------------L0.2086-----------------------------------------|" - - "L0.2100[1122,1281] 140ns |----------------------------------------L0.2100-----------------------------------------|" - - "L0.2114[1122,1281] 141ns |----------------------------------------L0.2114-----------------------------------------|" - - "L0.2128[1122,1281] 142ns |----------------------------------------L0.2128-----------------------------------------|" - - "L0.2142[1122,1281] 143ns |----------------------------------------L0.2142-----------------------------------------|" - - "L0.2184[1122,1281] 144ns |----------------------------------------L0.2184-----------------------------------------|" - - "L0.2198[1122,1281] 145ns |----------------------------------------L0.2198-----------------------------------------|" - - "L0.2212[1122,1281] 146ns |----------------------------------------L0.2212-----------------------------------------|" - - "L0.2226[1122,1281] 147ns |----------------------------------------L0.2226-----------------------------------------|" - - "L0.2240[1122,1281] 148ns |----------------------------------------L0.2240-----------------------------------------|" - - "L0.2254[1122,1281] 149ns |----------------------------------------L0.2254-----------------------------------------|" - - "L0.2268[1122,1281] 150ns |----------------------------------------L0.2268-----------------------------------------|" - - "L0.2282[1122,1281] 151ns |----------------------------------------L0.2282-----------------------------------------|" - - "L0.2296[1122,1281] 152ns |----------------------------------------L0.2296-----------------------------------------|" - - "L0.2310[1122,1281] 153ns |----------------------------------------L0.2310-----------------------------------------|" + - "L0.1290[1442,1601] 80ns |----------------------------------------L0.1290-----------------------------------------|" + - "L0.1304[1442,1601] 81ns |----------------------------------------L0.1304-----------------------------------------|" + - "L0.1318[1442,1601] 82ns |----------------------------------------L0.1318-----------------------------------------|" + - "L0.1332[1442,1601] 83ns |----------------------------------------L0.1332-----------------------------------------|" + - "L0.1346[1442,1601] 84ns |----------------------------------------L0.1346-----------------------------------------|" + - "L0.1360[1442,1601] 85ns |----------------------------------------L0.1360-----------------------------------------|" + - "L0.1374[1442,1601] 86ns |----------------------------------------L0.1374-----------------------------------------|" + - "L0.1388[1442,1601] 87ns |----------------------------------------L0.1388-----------------------------------------|" + - "L0.1402[1442,1601] 88ns |----------------------------------------L0.1402-----------------------------------------|" + - "L0.1416[1442,1601] 89ns |----------------------------------------L0.1416-----------------------------------------|" + - "L0.1430[1442,1601] 90ns |----------------------------------------L0.1430-----------------------------------------|" + - "L0.1444[1442,1601] 91ns |----------------------------------------L0.1444-----------------------------------------|" + - "L0.1458[1442,1601] 92ns |----------------------------------------L0.1458-----------------------------------------|" + - "L0.1472[1442,1601] 93ns |----------------------------------------L0.1472-----------------------------------------|" + - "L0.1486[1442,1601] 94ns |----------------------------------------L0.1486-----------------------------------------|" + - "L0.1500[1442,1601] 95ns |----------------------------------------L0.1500-----------------------------------------|" + - "L0.1514[1442,1601] 96ns |----------------------------------------L0.1514-----------------------------------------|" + - "L0.1528[1442,1601] 97ns |----------------------------------------L0.1528-----------------------------------------|" + - "L0.1542[1442,1601] 98ns |----------------------------------------L0.1542-----------------------------------------|" + - "L0.1556[1442,1601] 99ns |----------------------------------------L0.1556-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1122,1281] 153ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1442,1601] 99ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2044, L0.2058, L0.2072, L0.2086, L0.2100, L0.2114, L0.2128, L0.2142, L0.2170, L0.2184, L0.2198, L0.2212, L0.2226, L0.2240, L0.2254, L0.2268, L0.2282, L0.2296, L0.2310, L0.3012" + - " Soft Deleting 20 files: L0.1290, L0.1304, L0.1318, L0.1332, L0.1346, L0.1360, L0.1374, L0.1388, L0.1402, L0.1416, L0.1430, L0.1444, L0.1458, L0.1472, L0.1486, L0.1500, L0.1514, L0.1528, L0.1542, L0.1556" - " Creating 1 files" - - "**** Simulation run 306, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 294, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3013[1282,1441] 134ns |----------------------------------------L0.3013-----------------------------------------|" - - "L0.2171[1282,1441] 135ns |----------------------------------------L0.2171-----------------------------------------|" - - "L0.2045[1282,1441] 136ns |----------------------------------------L0.2045-----------------------------------------|" - - "L0.2059[1282,1441] 137ns |----------------------------------------L0.2059-----------------------------------------|" - - "L0.2073[1282,1441] 138ns |----------------------------------------L0.2073-----------------------------------------|" - - "L0.2087[1282,1441] 139ns |----------------------------------------L0.2087-----------------------------------------|" - - "L0.2101[1282,1441] 140ns |----------------------------------------L0.2101-----------------------------------------|" - - "L0.2115[1282,1441] 141ns |----------------------------------------L0.2115-----------------------------------------|" - - "L0.2129[1282,1441] 142ns |----------------------------------------L0.2129-----------------------------------------|" - - "L0.2143[1282,1441] 143ns |----------------------------------------L0.2143-----------------------------------------|" - - "L0.2185[1282,1441] 144ns |----------------------------------------L0.2185-----------------------------------------|" - - "L0.2199[1282,1441] 145ns |----------------------------------------L0.2199-----------------------------------------|" - - "L0.2213[1282,1441] 146ns |----------------------------------------L0.2213-----------------------------------------|" - - "L0.2227[1282,1441] 147ns |----------------------------------------L0.2227-----------------------------------------|" - - "L0.2241[1282,1441] 148ns |----------------------------------------L0.2241-----------------------------------------|" - - "L0.2255[1282,1441] 149ns |----------------------------------------L0.2255-----------------------------------------|" - - "L0.2269[1282,1441] 150ns |----------------------------------------L0.2269-----------------------------------------|" - - "L0.2283[1282,1441] 151ns |----------------------------------------L0.2283-----------------------------------------|" - - "L0.2297[1282,1441] 152ns |----------------------------------------L0.2297-----------------------------------------|" - - "L0.2311[1282,1441] 153ns |----------------------------------------L0.2311-----------------------------------------|" + - "L0.1570[1442,1601] 100ns |----------------------------------------L0.1570-----------------------------------------|" + - "L0.1584[1442,1601] 101ns |----------------------------------------L0.1584-----------------------------------------|" + - "L0.1598[1442,1601] 102ns |----------------------------------------L0.1598-----------------------------------------|" + - "L0.1612[1442,1601] 103ns |----------------------------------------L0.1612-----------------------------------------|" + - "L0.1626[1442,1601] 104ns |----------------------------------------L0.1626-----------------------------------------|" + - "L0.1640[1442,1601] 105ns |----------------------------------------L0.1640-----------------------------------------|" + - "L0.1654[1442,1601] 106ns |----------------------------------------L0.1654-----------------------------------------|" + - "L0.1668[1442,1601] 107ns |----------------------------------------L0.1668-----------------------------------------|" + - "L0.1682[1442,1601] 108ns |----------------------------------------L0.1682-----------------------------------------|" + - "L0.1696[1442,1601] 109ns |----------------------------------------L0.1696-----------------------------------------|" + - "L0.1710[1442,1601] 110ns |----------------------------------------L0.1710-----------------------------------------|" + - "L0.1724[1442,1601] 111ns |----------------------------------------L0.1724-----------------------------------------|" + - "L0.1738[1442,1601] 112ns |----------------------------------------L0.1738-----------------------------------------|" + - "L0.1752[1442,1601] 113ns |----------------------------------------L0.1752-----------------------------------------|" + - "L0.1766[1442,1601] 114ns |----------------------------------------L0.1766-----------------------------------------|" + - "L0.1780[1442,1601] 115ns |----------------------------------------L0.1780-----------------------------------------|" + - "L0.1794[1442,1601] 116ns |----------------------------------------L0.1794-----------------------------------------|" + - "L0.1808[1442,1601] 117ns |----------------------------------------L0.1808-----------------------------------------|" + - "L0.1822[1442,1601] 118ns |----------------------------------------L0.1822-----------------------------------------|" + - "L0.1836[1442,1601] 119ns |----------------------------------------L0.1836-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1282,1441] 153ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1442,1601] 119ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2045, L0.2059, L0.2073, L0.2087, L0.2101, L0.2115, L0.2129, L0.2143, L0.2171, L0.2185, L0.2199, L0.2213, L0.2227, L0.2241, L0.2255, L0.2269, L0.2283, L0.2297, L0.2311, L0.3013" + - " Soft Deleting 20 files: L0.1570, L0.1584, L0.1598, L0.1612, L0.1626, L0.1640, L0.1654, L0.1668, L0.1682, L0.1696, L0.1710, L0.1724, L0.1738, L0.1752, L0.1766, L0.1780, L0.1794, L0.1808, L0.1822, L0.1836" - " Creating 1 files" - - "**** Simulation run 307, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 295, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3014[1442,1601] 134ns |----------------------------------------L0.3014-----------------------------------------|" + - "L0.1850[1442,1601] 120ns |----------------------------------------L0.1850-----------------------------------------|" + - "L0.1864[1442,1601] 121ns |----------------------------------------L0.1864-----------------------------------------|" + - "L0.1878[1442,1601] 122ns |----------------------------------------L0.1878-----------------------------------------|" + - "L0.1892[1442,1601] 123ns |----------------------------------------L0.1892-----------------------------------------|" + - "L0.1906[1442,1601] 124ns |----------------------------------------L0.1906-----------------------------------------|" + - "L0.1920[1442,1601] 125ns |----------------------------------------L0.1920-----------------------------------------|" + - "L0.1934[1442,1601] 126ns |----------------------------------------L0.1934-----------------------------------------|" + - "L0.1948[1442,1601] 127ns |----------------------------------------L0.1948-----------------------------------------|" + - "L0.1962[1442,1601] 128ns |----------------------------------------L0.1962-----------------------------------------|" + - "L0.1976[1442,1601] 129ns |----------------------------------------L0.1976-----------------------------------------|" + - "L0.1990[1442,1601] 130ns |----------------------------------------L0.1990-----------------------------------------|" + - "L0.2004[1442,1601] 131ns |----------------------------------------L0.2004-----------------------------------------|" + - "L0.2018[1442,1601] 132ns |----------------------------------------L0.2018-----------------------------------------|" + - "L0.2032[1442,1601] 133ns |----------------------------------------L0.2032-----------------------------------------|" + - "L0.2158[1442,1601] 134ns |----------------------------------------L0.2158-----------------------------------------|" - "L0.2172[1442,1601] 135ns |----------------------------------------L0.2172-----------------------------------------|" - "L0.2046[1442,1601] 136ns |----------------------------------------L0.2046-----------------------------------------|" - "L0.2060[1442,1601] 137ns |----------------------------------------L0.2060-----------------------------------------|" - "L0.2074[1442,1601] 138ns |----------------------------------------L0.2074-----------------------------------------|" - "L0.2088[1442,1601] 139ns |----------------------------------------L0.2088-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[1442,1601] 139ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1850, L0.1864, L0.1878, L0.1892, L0.1906, L0.1920, L0.1934, L0.1948, L0.1962, L0.1976, L0.1990, L0.2004, L0.2018, L0.2032, L0.2046, L0.2060, L0.2074, L0.2088, L0.2158, L0.2172" + - " Creating 1 files" + - "**** Simulation run 296, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " - "L0.2102[1442,1601] 140ns |----------------------------------------L0.2102-----------------------------------------|" - "L0.2116[1442,1601] 141ns |----------------------------------------L0.2116-----------------------------------------|" - "L0.2130[1442,1601] 142ns |----------------------------------------L0.2130-----------------------------------------|" @@ -8474,373 +8142,300 @@ async fn stuck_l0_large_l0s() { - "L0.2284[1442,1601] 151ns |----------------------------------------L0.2284-----------------------------------------|" - "L0.2298[1442,1601] 152ns |----------------------------------------L0.2298-----------------------------------------|" - "L0.2312[1442,1601] 153ns |----------------------------------------L0.2312-----------------------------------------|" + - "L0.2326[1442,1601] 154ns |----------------------------------------L0.2326-----------------------------------------|" + - "L0.2340[1442,1601] 155ns |----------------------------------------L0.2340-----------------------------------------|" + - "L0.2354[1442,1601] 156ns |----------------------------------------L0.2354-----------------------------------------|" + - "L0.2368[1442,1601] 157ns |----------------------------------------L0.2368-----------------------------------------|" + - "L0.2382[1442,1601] 158ns |----------------------------------------L0.2382-----------------------------------------|" + - "L0.2396[1442,1601] 159ns |----------------------------------------L0.2396-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1442,1601] 153ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1442,1601] 159ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2046, L0.2060, L0.2074, L0.2088, L0.2102, L0.2116, L0.2130, L0.2144, L0.2172, L0.2186, L0.2200, L0.2214, L0.2228, L0.2242, L0.2256, L0.2270, L0.2284, L0.2298, L0.2312, L0.3014" + - " Soft Deleting 20 files: L0.2102, L0.2116, L0.2130, L0.2144, L0.2186, L0.2200, L0.2214, L0.2228, L0.2242, L0.2256, L0.2270, L0.2284, L0.2298, L0.2312, L0.2326, L0.2340, L0.2354, L0.2368, L0.2382, L0.2396" - " Creating 1 files" - - "**** Simulation run 308, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 297, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3015[1602,1761] 134ns |----------------------------------------L0.3015-----------------------------------------|" - - "L0.2173[1602,1761] 135ns |----------------------------------------L0.2173-----------------------------------------|" - - "L0.2047[1602,1761] 136ns |----------------------------------------L0.2047-----------------------------------------|" - - "L0.2061[1602,1761] 137ns |----------------------------------------L0.2061-----------------------------------------|" - - "L0.2075[1602,1761] 138ns |----------------------------------------L0.2075-----------------------------------------|" - - "L0.2089[1602,1761] 139ns |----------------------------------------L0.2089-----------------------------------------|" - - "L0.2103[1602,1761] 140ns |----------------------------------------L0.2103-----------------------------------------|" - - "L0.2117[1602,1761] 141ns |----------------------------------------L0.2117-----------------------------------------|" - - "L0.2131[1602,1761] 142ns |----------------------------------------L0.2131-----------------------------------------|" - - "L0.2145[1602,1761] 143ns |----------------------------------------L0.2145-----------------------------------------|" - - "L0.2187[1602,1761] 144ns |----------------------------------------L0.2187-----------------------------------------|" - - "L0.2201[1602,1761] 145ns |----------------------------------------L0.2201-----------------------------------------|" - - "L0.2215[1602,1761] 146ns |----------------------------------------L0.2215-----------------------------------------|" - - "L0.2229[1602,1761] 147ns |----------------------------------------L0.2229-----------------------------------------|" - - "L0.2243[1602,1761] 148ns |----------------------------------------L0.2243-----------------------------------------|" - - "L0.2257[1602,1761] 149ns |----------------------------------------L0.2257-----------------------------------------|" - - "L0.2271[1602,1761] 150ns |----------------------------------------L0.2271-----------------------------------------|" - - "L0.2285[1602,1761] 151ns |----------------------------------------L0.2285-----------------------------------------|" - - "L0.2299[1602,1761] 152ns |----------------------------------------L0.2299-----------------------------------------|" - - "L0.2313[1602,1761] 153ns |----------------------------------------L0.2313-----------------------------------------|" + - "L0.2410[1442,1601] 160ns |----------------------------------------L0.2410-----------------------------------------|" + - "L0.2424[1442,1601] 161ns |----------------------------------------L0.2424-----------------------------------------|" + - "L0.2437[1442,1601] 162ns |----------------------------------------L0.2437-----------------------------------------|" + - "L0.2450[1442,1601] 163ns |----------------------------------------L0.2450-----------------------------------------|" + - "L0.2463[1442,1601] 164ns |----------------------------------------L0.2463-----------------------------------------|" + - "L0.2476[1442,1601] 165ns |----------------------------------------L0.2476-----------------------------------------|" + - "L0.2489[1442,1601] 166ns |----------------------------------------L0.2489-----------------------------------------|" + - "L0.2502[1442,1601] 167ns |----------------------------------------L0.2502-----------------------------------------|" + - "L0.2515[1442,1601] 168ns |----------------------------------------L0.2515-----------------------------------------|" + - "L0.2528[1442,1601] 169ns |----------------------------------------L0.2528-----------------------------------------|" + - "L0.2541[1442,1601] 170ns |----------------------------------------L0.2541-----------------------------------------|" + - "L0.2554[1442,1601] 171ns |----------------------------------------L0.2554-----------------------------------------|" + - "L0.2567[1442,1601] 172ns |----------------------------------------L0.2567-----------------------------------------|" + - "L0.2580[1442,1601] 173ns |----------------------------------------L0.2580-----------------------------------------|" + - "L0.2593[1442,1601] 174ns |----------------------------------------L0.2593-----------------------------------------|" + - "L0.2606[1442,1601] 175ns |----------------------------------------L0.2606-----------------------------------------|" + - "L0.2619[1442,1601] 176ns |----------------------------------------L0.2619-----------------------------------------|" + - "L0.2632[1442,1601] 177ns |----------------------------------------L0.2632-----------------------------------------|" + - "L0.2645[1442,1601] 178ns |----------------------------------------L0.2645-----------------------------------------|" + - "L0.2658[1442,1601] 179ns |----------------------------------------L0.2658-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1602,1761] 153ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1442,1601] 179ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2047, L0.2061, L0.2075, L0.2089, L0.2103, L0.2117, L0.2131, L0.2145, L0.2173, L0.2187, L0.2201, L0.2215, L0.2229, L0.2243, L0.2257, L0.2271, L0.2285, L0.2299, L0.2313, L0.3015" + - " Soft Deleting 20 files: L0.2410, L0.2424, L0.2437, L0.2450, L0.2463, L0.2476, L0.2489, L0.2502, L0.2515, L0.2528, L0.2541, L0.2554, L0.2567, L0.2580, L0.2593, L0.2606, L0.2619, L0.2632, L0.2645, L0.2658" - " Creating 1 files" - - "**** Simulation run 309, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 298, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3016[1762,2000] 134ns |----------------------------------------L0.3016-----------------------------------------|" - - "L0.2174[1762,2000] 135ns |----------------------------------------L0.2174-----------------------------------------|" - - "L0.2048[1762,2000] 136ns |----------------------------------------L0.2048-----------------------------------------|" - - "L0.2062[1762,2000] 137ns |----------------------------------------L0.2062-----------------------------------------|" - - "L0.2076[1762,2000] 138ns |----------------------------------------L0.2076-----------------------------------------|" - - "L0.2090[1762,2000] 139ns |----------------------------------------L0.2090-----------------------------------------|" - - "L0.2104[1762,2000] 140ns |----------------------------------------L0.2104-----------------------------------------|" - - "L0.2118[1762,2000] 141ns |----------------------------------------L0.2118-----------------------------------------|" - - "L0.2132[1762,2000] 142ns |----------------------------------------L0.2132-----------------------------------------|" - - "L0.2146[1762,2000] 143ns |----------------------------------------L0.2146-----------------------------------------|" - - "L0.2188[1762,2000] 144ns |----------------------------------------L0.2188-----------------------------------------|" - - "L0.2202[1762,2000] 145ns |----------------------------------------L0.2202-----------------------------------------|" - - "L0.2216[1762,2000] 146ns |----------------------------------------L0.2216-----------------------------------------|" - - "L0.2230[1762,2000] 147ns |----------------------------------------L0.2230-----------------------------------------|" - - "L0.2244[1762,2000] 148ns |----------------------------------------L0.2244-----------------------------------------|" - - "L0.2258[1762,2000] 149ns |----------------------------------------L0.2258-----------------------------------------|" - - "L0.2272[1762,2000] 150ns |----------------------------------------L0.2272-----------------------------------------|" - - "L0.2286[1762,2000] 151ns |----------------------------------------L0.2286-----------------------------------------|" - - "L0.2300[1762,2000] 152ns |----------------------------------------L0.2300-----------------------------------------|" - - "L0.2314[1762,2000] 153ns |----------------------------------------L0.2314-----------------------------------------|" + - "L0.2671[1442,1601] 180ns |----------------------------------------L0.2671-----------------------------------------|" + - "L0.2684[1442,1601] 181ns |----------------------------------------L0.2684-----------------------------------------|" + - "L0.2697[1442,1601] 182ns |----------------------------------------L0.2697-----------------------------------------|" + - "L0.2710[1442,1601] 183ns |----------------------------------------L0.2710-----------------------------------------|" + - "L0.2723[1442,1601] 184ns |----------------------------------------L0.2723-----------------------------------------|" + - "L0.2736[1442,1601] 185ns |----------------------------------------L0.2736-----------------------------------------|" + - "L0.2749[1442,1601] 186ns |----------------------------------------L0.2749-----------------------------------------|" + - "L0.2762[1442,1601] 187ns |----------------------------------------L0.2762-----------------------------------------|" + - "L0.2775[1442,1601] 188ns |----------------------------------------L0.2775-----------------------------------------|" + - "L0.2788[1442,1601] 189ns |----------------------------------------L0.2788-----------------------------------------|" + - "L0.2801[1442,1601] 190ns |----------------------------------------L0.2801-----------------------------------------|" + - "L0.2814[1442,1601] 191ns |----------------------------------------L0.2814-----------------------------------------|" + - "L0.2827[1442,1601] 192ns |----------------------------------------L0.2827-----------------------------------------|" + - "L0.2840[1442,1601] 193ns |----------------------------------------L0.2840-----------------------------------------|" + - "L0.2853[1442,1601] 194ns |----------------------------------------L0.2853-----------------------------------------|" + - "L0.2866[1442,1601] 195ns |----------------------------------------L0.2866-----------------------------------------|" + - "L0.2879[1442,1601] 196ns |----------------------------------------L0.2879-----------------------------------------|" + - "L0.2892[1442,1601] 197ns |----------------------------------------L0.2892-----------------------------------------|" + - "L0.2905[1442,1601] 198ns |----------------------------------------L0.2905-----------------------------------------|" + - "L0.2918[1442,1601] 199ns |----------------------------------------L0.2918-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1762,2000] 153ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1442,1601] 199ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2048, L0.2062, L0.2076, L0.2090, L0.2104, L0.2118, L0.2132, L0.2146, L0.2174, L0.2188, L0.2202, L0.2216, L0.2230, L0.2244, L0.2258, L0.2272, L0.2286, L0.2300, L0.2314, L0.3016" + - " Soft Deleting 20 files: L0.2671, L0.2684, L0.2697, L0.2710, L0.2723, L0.2736, L0.2749, L0.2762, L0.2775, L0.2788, L0.2801, L0.2814, L0.2827, L0.2840, L0.2853, L0.2866, L0.2879, L0.2892, L0.2905, L0.2918" - " Creating 1 files" - - "**** Simulation run 310, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.3017[2001,2086] 153ns |----------------------------------------L0.3017-----------------------------------------|" - - "L0.2329[2001,2086] 154ns |----------------------------------------L0.2329-----------------------------------------|" - - "L0.2343[2001,2086] 155ns |----------------------------------------L0.2343-----------------------------------------|" - - "L0.2357[2001,2086] 156ns |----------------------------------------L0.2357-----------------------------------------|" - - "L0.2371[2001,2086] 157ns |----------------------------------------L0.2371-----------------------------------------|" - - "L0.2385[2001,2086] 158ns |----------------------------------------L0.2385-----------------------------------------|" - - "L0.2399[2001,2086] 159ns |----------------------------------------L0.2399-----------------------------------------|" - - "L0.2413[2001,2086] 160ns |----------------------------------------L0.2413-----------------------------------------|" - - "L0.2427[2001,2086] 161ns |----------------------------------------L0.2427-----------------------------------------|" - - "L0.2440[2001,2086] 162ns |----------------------------------------L0.2440-----------------------------------------|" - - "L0.2453[2001,2086] 163ns |----------------------------------------L0.2453-----------------------------------------|" - - "L0.2466[2001,2086] 164ns |----------------------------------------L0.2466-----------------------------------------|" - - "L0.2479[2001,2086] 165ns |----------------------------------------L0.2479-----------------------------------------|" - - "L0.2492[2001,2086] 166ns |----------------------------------------L0.2492-----------------------------------------|" - - "L0.2505[2001,2086] 167ns |----------------------------------------L0.2505-----------------------------------------|" - - "L0.2518[2001,2086] 168ns |----------------------------------------L0.2518-----------------------------------------|" - - "L0.2531[2001,2086] 169ns |----------------------------------------L0.2531-----------------------------------------|" - - "L0.2544[2001,2086] 170ns |----------------------------------------L0.2544-----------------------------------------|" - - "L0.2557[2001,2086] 171ns |----------------------------------------L0.2557-----------------------------------------|" - - "L0.2570[2001,2086] 172ns |----------------------------------------L0.2570-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[2001,2086] 172ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2329, L0.2343, L0.2357, L0.2371, L0.2385, L0.2399, L0.2413, L0.2427, L0.2440, L0.2453, L0.2466, L0.2479, L0.2492, L0.2505, L0.2518, L0.2531, L0.2544, L0.2557, L0.2570, L0.3017" - - " Creating 1 files" - - "**** Simulation run 311, type=compact(ManySmallFiles). 20 Input Files, 1kb total:" - - "L0 " - - "L0.3020[2087,1530000] 153ns 1kb|-----------------------------------L0.3020------------------------------------| " - - "L0.2330[2087,1540000] 154ns 10b|-----------------------------------L0.2330------------------------------------| " - - "L0.2344[2087,1550000] 155ns 10b|------------------------------------L0.2344------------------------------------| " - - "L0.2358[2087,1560000] 156ns 10b|------------------------------------L0.2358------------------------------------| " - - "L0.2372[2087,1570000] 157ns 10b|------------------------------------L0.2372-------------------------------------| " - - "L0.2386[2087,1580000] 158ns 10b|------------------------------------L0.2386-------------------------------------| " - - "L0.2400[2087,1590000] 159ns 10b|-------------------------------------L0.2400-------------------------------------| " - - "L0.2414[2087,1600000] 160ns 10b|-------------------------------------L0.2414-------------------------------------| " - - "L0.2428[2087,1610000] 161ns 10b|-------------------------------------L0.2428--------------------------------------| " - - "L0.2441[2087,1620000] 162ns 10b|-------------------------------------L0.2441--------------------------------------| " - - "L0.2454[2087,1630000] 163ns 10b|--------------------------------------L0.2454--------------------------------------| " - - "L0.2467[2087,1640000] 164ns 10b|--------------------------------------L0.2467--------------------------------------| " - - "L0.2480[2087,1650000] 165ns 10b|--------------------------------------L0.2480---------------------------------------| " - - "L0.2493[2087,1660000] 166ns 10b|--------------------------------------L0.2493---------------------------------------| " - - "L0.2506[2087,1670000] 167ns 10b|---------------------------------------L0.2506---------------------------------------| " - - "L0.2519[2087,1680000] 168ns 10b|---------------------------------------L0.2519---------------------------------------| " - - "L0.2532[2087,1690000] 169ns 10b|---------------------------------------L0.2532----------------------------------------| " - - "L0.2545[2087,1700000] 170ns 10b|---------------------------------------L0.2545----------------------------------------| " - - "L0.2558[2087,1710000] 171ns 10b|----------------------------------------L0.2558----------------------------------------| " - - "L0.2571[2087,1720000] 172ns 10b|----------------------------------------L0.2571-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 1kb total:" - - "L0, all files 1kb " - - "L0.?[2087,1720000] 172ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2330, L0.2344, L0.2358, L0.2372, L0.2386, L0.2400, L0.2414, L0.2428, L0.2441, L0.2454, L0.2467, L0.2480, L0.2493, L0.2506, L0.2519, L0.2532, L0.2545, L0.2558, L0.2571, L0.3020" - - " Creating 1 files" - - "**** Simulation run 312, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[101]). 10 Input Files, 161mb total:" - - "L0 " - - "L0.2415[161,161] 161ns 0b |L0.2415|" - - "L0.2401[160,161] 160ns 0b |L0.2401|" - - "L0.2387[159,161] 159ns 0b |L0.2387|" - - "L0.2373[158,161] 158ns 0b |L0.2373|" - - "L0.2359[157,161] 157ns 0b |L0.2359|" - - "L0.2345[156,161] 156ns 0b |L0.2345|" - - "L0.2331[155,161] 155ns 0b |L0.2331|" - - "L0.2317[154,161] 154ns 0b |L0.2317|" - - "L0.2923[1,161] 19ns 161mb|----------------------------------------L0.2923-----------------------------------------|" - - "L0.3021[20,161] 153ns 0b |-----------------------------------L0.3021-----------------------------------| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 161mb total:" - - "L1 " - - "L1.?[1,101] 161ns 101mb |-------------------------L1.?-------------------------| " - - "L1.?[102,161] 161ns 60mb |-------------L1.?--------------| " - - "Committing partition 1:" - - " Soft Deleting 10 files: L0.2317, L0.2331, L0.2345, L0.2359, L0.2373, L0.2387, L0.2401, L0.2415, L0.2923, L0.3021" - - " Creating 2 files" - - "**** Simulation run 313, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.3022[162,321] 153ns |----------------------------------------L0.3022-----------------------------------------|" - - "L0.2318[162,321] 154ns |----------------------------------------L0.2318-----------------------------------------|" - - "L0.2332[162,321] 155ns |----------------------------------------L0.2332-----------------------------------------|" - - "L0.2346[162,321] 156ns |----------------------------------------L0.2346-----------------------------------------|" - - "L0.2360[162,321] 157ns |----------------------------------------L0.2360-----------------------------------------|" - - "L0.2374[162,321] 158ns |----------------------------------------L0.2374-----------------------------------------|" - - "L0.2388[162,321] 159ns |----------------------------------------L0.2388-----------------------------------------|" - - "L0.2402[162,321] 160ns |----------------------------------------L0.2402-----------------------------------------|" - - "L0.2416[162,321] 161ns |----------------------------------------L0.2416-----------------------------------------|" - - "L0.2429[162,321] 162ns |----------------------------------------L0.2429-----------------------------------------|" - - "L0.2442[163,321] 163ns |----------------------------------------L0.2442----------------------------------------| " - - "L0.2455[164,321] 164ns |---------------------------------------L0.2455----------------------------------------| " - - "L0.2468[165,321] 165ns |---------------------------------------L0.2468----------------------------------------| " - - "L0.2481[166,321] 166ns |---------------------------------------L0.2481---------------------------------------| " - - "L0.2494[167,321] 167ns |---------------------------------------L0.2494---------------------------------------| " - - "L0.2507[168,321] 168ns |--------------------------------------L0.2507---------------------------------------| " - - "L0.2520[169,321] 169ns |--------------------------------------L0.2520---------------------------------------| " - - "L0.2533[170,321] 170ns |--------------------------------------L0.2533--------------------------------------| " - - "L0.2546[171,321] 171ns |-------------------------------------L0.2546--------------------------------------| " - - "L0.2559[172,321] 172ns |-------------------------------------L0.2559--------------------------------------| " - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[162,321] 172ns |------------------------------------------L0.?------------------------------------------|" - - "**** Simulation run 314, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.3024[482,641] 153ns |----------------------------------------L0.3024-----------------------------------------|" - - "L0.2320[482,641] 154ns |----------------------------------------L0.2320-----------------------------------------|" - - "L0.2334[482,641] 155ns |----------------------------------------L0.2334-----------------------------------------|" - - "L0.2348[482,641] 156ns |----------------------------------------L0.2348-----------------------------------------|" - - "L0.2362[482,641] 157ns |----------------------------------------L0.2362-----------------------------------------|" - - "L0.2376[482,641] 158ns |----------------------------------------L0.2376-----------------------------------------|" - - "L0.2390[482,641] 159ns |----------------------------------------L0.2390-----------------------------------------|" - - "L0.2404[482,641] 160ns |----------------------------------------L0.2404-----------------------------------------|" - - "L0.2418[482,641] 161ns |----------------------------------------L0.2418-----------------------------------------|" - - "L0.2431[482,641] 162ns |----------------------------------------L0.2431-----------------------------------------|" - - "L0.2444[482,641] 163ns |----------------------------------------L0.2444-----------------------------------------|" - - "L0.2457[482,641] 164ns |----------------------------------------L0.2457-----------------------------------------|" - - "L0.2470[482,641] 165ns |----------------------------------------L0.2470-----------------------------------------|" - - "L0.2483[482,641] 166ns |----------------------------------------L0.2483-----------------------------------------|" - - "L0.2496[482,641] 167ns |----------------------------------------L0.2496-----------------------------------------|" - - "L0.2509[482,641] 168ns |----------------------------------------L0.2509-----------------------------------------|" - - "L0.2522[482,641] 169ns |----------------------------------------L0.2522-----------------------------------------|" - - "L0.2535[482,641] 170ns |----------------------------------------L0.2535-----------------------------------------|" - - "L0.2548[482,641] 171ns |----------------------------------------L0.2548-----------------------------------------|" - - "L0.2561[482,641] 172ns |----------------------------------------L0.2561-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[482,641] 172ns |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 299, type=compact(ManySmallFiles). 20 Input Files, 160mb total:" + - "L0, all files 8mb " + - "L0.211[1602,1761] 0ns |-----------------------------------------L0.211-----------------------------------------|" + - "L0.223[1602,1761] 1ns |-----------------------------------------L0.223-----------------------------------------|" + - "L0.235[1602,1761] 2ns |-----------------------------------------L0.235-----------------------------------------|" + - "L0.247[1602,1761] 3ns |-----------------------------------------L0.247-----------------------------------------|" + - "L0.259[1602,1761] 4ns |-----------------------------------------L0.259-----------------------------------------|" + - "L0.271[1602,1761] 5ns |-----------------------------------------L0.271-----------------------------------------|" + - "L0.283[1602,1761] 6ns |-----------------------------------------L0.283-----------------------------------------|" + - "L0.295[1602,1761] 7ns |-----------------------------------------L0.295-----------------------------------------|" + - "L0.307[1602,1761] 8ns |-----------------------------------------L0.307-----------------------------------------|" + - "L0.319[1602,1761] 9ns |-----------------------------------------L0.319-----------------------------------------|" + - "L0.331[1602,1761] 10ns |-----------------------------------------L0.331-----------------------------------------|" + - "L0.343[1602,1761] 11ns |-----------------------------------------L0.343-----------------------------------------|" + - "L0.355[1602,1761] 12ns |-----------------------------------------L0.355-----------------------------------------|" + - "L0.367[1602,1761] 13ns |-----------------------------------------L0.367-----------------------------------------|" + - "L0.379[1602,1761] 14ns |-----------------------------------------L0.379-----------------------------------------|" + - "L0.391[1602,1761] 15ns |-----------------------------------------L0.391-----------------------------------------|" + - "L0.403[1602,1761] 16ns |-----------------------------------------L0.403-----------------------------------------|" + - "L0.415[1602,1761] 17ns |-----------------------------------------L0.415-----------------------------------------|" + - "L0.539[1602,1761] 18ns |-----------------------------------------L0.539-----------------------------------------|" + - "L0.551[1602,1761] 19ns |-----------------------------------------L0.551-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 160mb total:" + - "L0, all files 160mb " + - "L0.?[1602,1761] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2320, L0.2334, L0.2348, L0.2362, L0.2376, L0.2390, L0.2404, L0.2418, L0.2431, L0.2444, L0.2457, L0.2470, L0.2483, L0.2496, L0.2509, L0.2522, L0.2535, L0.2548, L0.2561, L0.3024" + - " Soft Deleting 20 files: L0.211, L0.223, L0.235, L0.247, L0.259, L0.271, L0.283, L0.295, L0.307, L0.319, L0.331, L0.343, L0.355, L0.367, L0.379, L0.391, L0.403, L0.415, L0.539, L0.551" - " Creating 1 files" - - "**** Simulation run 315, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 300, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3025[642,801] 153ns |----------------------------------------L0.3025-----------------------------------------|" - - "L0.2321[642,801] 154ns |----------------------------------------L0.2321-----------------------------------------|" - - "L0.2335[642,801] 155ns |----------------------------------------L0.2335-----------------------------------------|" - - "L0.2349[642,801] 156ns |----------------------------------------L0.2349-----------------------------------------|" - - "L0.2363[642,801] 157ns |----------------------------------------L0.2363-----------------------------------------|" - - "L0.2377[642,801] 158ns |----------------------------------------L0.2377-----------------------------------------|" - - "L0.2391[642,801] 159ns |----------------------------------------L0.2391-----------------------------------------|" - - "L0.2405[642,801] 160ns |----------------------------------------L0.2405-----------------------------------------|" - - "L0.2419[642,801] 161ns |----------------------------------------L0.2419-----------------------------------------|" - - "L0.2432[642,801] 162ns |----------------------------------------L0.2432-----------------------------------------|" - - "L0.2445[642,801] 163ns |----------------------------------------L0.2445-----------------------------------------|" - - "L0.2458[642,801] 164ns |----------------------------------------L0.2458-----------------------------------------|" - - "L0.2471[642,801] 165ns |----------------------------------------L0.2471-----------------------------------------|" - - "L0.2484[642,801] 166ns |----------------------------------------L0.2484-----------------------------------------|" - - "L0.2497[642,801] 167ns |----------------------------------------L0.2497-----------------------------------------|" - - "L0.2510[642,801] 168ns |----------------------------------------L0.2510-----------------------------------------|" - - "L0.2523[642,801] 169ns |----------------------------------------L0.2523-----------------------------------------|" - - "L0.2536[642,801] 170ns |----------------------------------------L0.2536-----------------------------------------|" - - "L0.2549[642,801] 171ns |----------------------------------------L0.2549-----------------------------------------|" - - "L0.2562[642,801] 172ns |----------------------------------------L0.2562-----------------------------------------|" + - "L0.427[1602,1761] 20ns |-----------------------------------------L0.427-----------------------------------------|" + - "L0.441[1602,1761] 21ns |-----------------------------------------L0.441-----------------------------------------|" + - "L0.455[1602,1761] 22ns |-----------------------------------------L0.455-----------------------------------------|" + - "L0.469[1602,1761] 23ns |-----------------------------------------L0.469-----------------------------------------|" + - "L0.483[1602,1761] 24ns |-----------------------------------------L0.483-----------------------------------------|" + - "L0.497[1602,1761] 25ns |-----------------------------------------L0.497-----------------------------------------|" + - "L0.511[1602,1761] 26ns |-----------------------------------------L0.511-----------------------------------------|" + - "L0.525[1602,1761] 27ns |-----------------------------------------L0.525-----------------------------------------|" + - "L0.563[1602,1761] 28ns |-----------------------------------------L0.563-----------------------------------------|" + - "L0.577[1602,1761] 29ns |-----------------------------------------L0.577-----------------------------------------|" + - "L0.591[1602,1761] 30ns |-----------------------------------------L0.591-----------------------------------------|" + - "L0.605[1602,1761] 31ns |-----------------------------------------L0.605-----------------------------------------|" + - "L0.619[1602,1761] 32ns |-----------------------------------------L0.619-----------------------------------------|" + - "L0.633[1602,1761] 33ns |-----------------------------------------L0.633-----------------------------------------|" + - "L0.647[1602,1761] 34ns |-----------------------------------------L0.647-----------------------------------------|" + - "L0.661[1602,1761] 35ns |-----------------------------------------L0.661-----------------------------------------|" + - "L0.675[1602,1761] 36ns |-----------------------------------------L0.675-----------------------------------------|" + - "L0.689[1602,1761] 37ns |-----------------------------------------L0.689-----------------------------------------|" + - "L0.703[1602,1761] 38ns |-----------------------------------------L0.703-----------------------------------------|" + - "L0.717[1602,1761] 39ns |-----------------------------------------L0.717-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[642,801] 172ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1602,1761] 39ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2321, L0.2335, L0.2349, L0.2363, L0.2377, L0.2391, L0.2405, L0.2419, L0.2432, L0.2445, L0.2458, L0.2471, L0.2484, L0.2497, L0.2510, L0.2523, L0.2536, L0.2549, L0.2562, L0.3025" + - " Soft Deleting 20 files: L0.427, L0.441, L0.455, L0.469, L0.483, L0.497, L0.511, L0.525, L0.563, L0.577, L0.591, L0.605, L0.619, L0.633, L0.647, L0.661, L0.675, L0.689, L0.703, L0.717" - " Creating 1 files" - - "**** Simulation run 316, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 301, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3026[802,961] 153ns |----------------------------------------L0.3026-----------------------------------------|" - - "L0.2322[802,961] 154ns |----------------------------------------L0.2322-----------------------------------------|" - - "L0.2336[802,961] 155ns |----------------------------------------L0.2336-----------------------------------------|" - - "L0.2350[802,961] 156ns |----------------------------------------L0.2350-----------------------------------------|" - - "L0.2364[802,961] 157ns |----------------------------------------L0.2364-----------------------------------------|" - - "L0.2378[802,961] 158ns |----------------------------------------L0.2378-----------------------------------------|" - - "L0.2392[802,961] 159ns |----------------------------------------L0.2392-----------------------------------------|" - - "L0.2406[802,961] 160ns |----------------------------------------L0.2406-----------------------------------------|" - - "L0.2420[802,961] 161ns |----------------------------------------L0.2420-----------------------------------------|" - - "L0.2433[802,961] 162ns |----------------------------------------L0.2433-----------------------------------------|" - - "L0.2446[802,961] 163ns |----------------------------------------L0.2446-----------------------------------------|" - - "L0.2459[802,961] 164ns |----------------------------------------L0.2459-----------------------------------------|" - - "L0.2472[802,961] 165ns |----------------------------------------L0.2472-----------------------------------------|" - - "L0.2485[802,961] 166ns |----------------------------------------L0.2485-----------------------------------------|" - - "L0.2498[802,961] 167ns |----------------------------------------L0.2498-----------------------------------------|" - - "L0.2511[802,961] 168ns |----------------------------------------L0.2511-----------------------------------------|" - - "L0.2524[802,961] 169ns |----------------------------------------L0.2524-----------------------------------------|" - - "L0.2537[802,961] 170ns |----------------------------------------L0.2537-----------------------------------------|" - - "L0.2550[802,961] 171ns |----------------------------------------L0.2550-----------------------------------------|" - - "L0.2563[802,961] 172ns |----------------------------------------L0.2563-----------------------------------------|" + - "L0.731[1602,1761] 40ns |-----------------------------------------L0.731-----------------------------------------|" + - "L0.745[1602,1761] 41ns |-----------------------------------------L0.745-----------------------------------------|" + - "L0.759[1602,1761] 42ns |-----------------------------------------L0.759-----------------------------------------|" + - "L0.773[1602,1761] 43ns |-----------------------------------------L0.773-----------------------------------------|" + - "L0.787[1602,1761] 44ns |-----------------------------------------L0.787-----------------------------------------|" + - "L0.801[1602,1761] 45ns |-----------------------------------------L0.801-----------------------------------------|" + - "L0.815[1602,1761] 46ns |-----------------------------------------L0.815-----------------------------------------|" + - "L0.829[1602,1761] 47ns |-----------------------------------------L0.829-----------------------------------------|" + - "L0.843[1602,1761] 48ns |-----------------------------------------L0.843-----------------------------------------|" + - "L0.857[1602,1761] 49ns |-----------------------------------------L0.857-----------------------------------------|" + - "L0.871[1602,1761] 50ns |-----------------------------------------L0.871-----------------------------------------|" + - "L0.885[1602,1761] 51ns |-----------------------------------------L0.885-----------------------------------------|" + - "L0.899[1602,1761] 52ns |-----------------------------------------L0.899-----------------------------------------|" + - "L0.913[1602,1761] 53ns |-----------------------------------------L0.913-----------------------------------------|" + - "L0.927[1602,1761] 54ns |-----------------------------------------L0.927-----------------------------------------|" + - "L0.941[1602,1761] 55ns |-----------------------------------------L0.941-----------------------------------------|" + - "L0.955[1602,1761] 56ns |-----------------------------------------L0.955-----------------------------------------|" + - "L0.969[1602,1761] 57ns |-----------------------------------------L0.969-----------------------------------------|" + - "L0.983[1602,1761] 58ns |-----------------------------------------L0.983-----------------------------------------|" + - "L0.997[1602,1761] 59ns |-----------------------------------------L0.997-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[802,961] 172ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1602,1761] 59ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2322, L0.2336, L0.2350, L0.2364, L0.2378, L0.2392, L0.2406, L0.2420, L0.2433, L0.2446, L0.2459, L0.2472, L0.2485, L0.2498, L0.2511, L0.2524, L0.2537, L0.2550, L0.2563, L0.3026" + - " Soft Deleting 20 files: L0.731, L0.745, L0.759, L0.773, L0.787, L0.801, L0.815, L0.829, L0.843, L0.857, L0.871, L0.885, L0.899, L0.913, L0.927, L0.941, L0.955, L0.969, L0.983, L0.997" - " Creating 1 files" - - "**** Simulation run 317, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 302, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3027[962,1121] 153ns |----------------------------------------L0.3027-----------------------------------------|" - - "L0.2323[962,1121] 154ns |----------------------------------------L0.2323-----------------------------------------|" - - "L0.2337[962,1121] 155ns |----------------------------------------L0.2337-----------------------------------------|" - - "L0.2351[962,1121] 156ns |----------------------------------------L0.2351-----------------------------------------|" - - "L0.2365[962,1121] 157ns |----------------------------------------L0.2365-----------------------------------------|" - - "L0.2379[962,1121] 158ns |----------------------------------------L0.2379-----------------------------------------|" - - "L0.2393[962,1121] 159ns |----------------------------------------L0.2393-----------------------------------------|" - - "L0.2407[962,1121] 160ns |----------------------------------------L0.2407-----------------------------------------|" - - "L0.2421[962,1121] 161ns |----------------------------------------L0.2421-----------------------------------------|" - - "L0.2434[962,1121] 162ns |----------------------------------------L0.2434-----------------------------------------|" - - "L0.2447[962,1121] 163ns |----------------------------------------L0.2447-----------------------------------------|" - - "L0.2460[962,1121] 164ns |----------------------------------------L0.2460-----------------------------------------|" - - "L0.2473[962,1121] 165ns |----------------------------------------L0.2473-----------------------------------------|" - - "L0.2486[962,1121] 166ns |----------------------------------------L0.2486-----------------------------------------|" - - "L0.2499[962,1121] 167ns |----------------------------------------L0.2499-----------------------------------------|" - - "L0.2512[962,1121] 168ns |----------------------------------------L0.2512-----------------------------------------|" - - "L0.2525[962,1121] 169ns |----------------------------------------L0.2525-----------------------------------------|" - - "L0.2538[962,1121] 170ns |----------------------------------------L0.2538-----------------------------------------|" - - "L0.2551[962,1121] 171ns |----------------------------------------L0.2551-----------------------------------------|" - - "L0.2564[962,1121] 172ns |----------------------------------------L0.2564-----------------------------------------|" + - "L0.1011[1602,1761] 60ns |----------------------------------------L0.1011-----------------------------------------|" + - "L0.1025[1602,1761] 61ns |----------------------------------------L0.1025-----------------------------------------|" + - "L0.1039[1602,1761] 62ns |----------------------------------------L0.1039-----------------------------------------|" + - "L0.1053[1602,1761] 63ns |----------------------------------------L0.1053-----------------------------------------|" + - "L0.1067[1602,1761] 64ns |----------------------------------------L0.1067-----------------------------------------|" + - "L0.1081[1602,1761] 65ns |----------------------------------------L0.1081-----------------------------------------|" + - "L0.1095[1602,1761] 66ns |----------------------------------------L0.1095-----------------------------------------|" + - "L0.1109[1602,1761] 67ns |----------------------------------------L0.1109-----------------------------------------|" + - "L0.1123[1602,1761] 68ns |----------------------------------------L0.1123-----------------------------------------|" + - "L0.1137[1602,1761] 69ns |----------------------------------------L0.1137-----------------------------------------|" + - "L0.1151[1602,1761] 70ns |----------------------------------------L0.1151-----------------------------------------|" + - "L0.1165[1602,1761] 71ns |----------------------------------------L0.1165-----------------------------------------|" + - "L0.1179[1602,1761] 72ns |----------------------------------------L0.1179-----------------------------------------|" + - "L0.1193[1602,1761] 73ns |----------------------------------------L0.1193-----------------------------------------|" + - "L0.1207[1602,1761] 74ns |----------------------------------------L0.1207-----------------------------------------|" + - "L0.1221[1602,1761] 75ns |----------------------------------------L0.1221-----------------------------------------|" + - "L0.1235[1602,1761] 76ns |----------------------------------------L0.1235-----------------------------------------|" + - "L0.1249[1602,1761] 77ns |----------------------------------------L0.1249-----------------------------------------|" + - "L0.1263[1602,1761] 78ns |----------------------------------------L0.1263-----------------------------------------|" + - "L0.1277[1602,1761] 79ns |----------------------------------------L0.1277-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[962,1121] 172ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1602,1761] 79ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2323, L0.2337, L0.2351, L0.2365, L0.2379, L0.2393, L0.2407, L0.2421, L0.2434, L0.2447, L0.2460, L0.2473, L0.2486, L0.2499, L0.2512, L0.2525, L0.2538, L0.2551, L0.2564, L0.3027" + - " Soft Deleting 20 files: L0.1011, L0.1025, L0.1039, L0.1053, L0.1067, L0.1081, L0.1095, L0.1109, L0.1123, L0.1137, L0.1151, L0.1165, L0.1179, L0.1193, L0.1207, L0.1221, L0.1235, L0.1249, L0.1263, L0.1277" - " Creating 1 files" - - "**** Simulation run 318, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 303, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3028[1122,1281] 153ns |----------------------------------------L0.3028-----------------------------------------|" - - "L0.2324[1122,1281] 154ns |----------------------------------------L0.2324-----------------------------------------|" - - "L0.2338[1122,1281] 155ns |----------------------------------------L0.2338-----------------------------------------|" - - "L0.2352[1122,1281] 156ns |----------------------------------------L0.2352-----------------------------------------|" - - "L0.2366[1122,1281] 157ns |----------------------------------------L0.2366-----------------------------------------|" - - "L0.2380[1122,1281] 158ns |----------------------------------------L0.2380-----------------------------------------|" - - "L0.2394[1122,1281] 159ns |----------------------------------------L0.2394-----------------------------------------|" - - "L0.2408[1122,1281] 160ns |----------------------------------------L0.2408-----------------------------------------|" - - "L0.2422[1122,1281] 161ns |----------------------------------------L0.2422-----------------------------------------|" - - "L0.2435[1122,1281] 162ns |----------------------------------------L0.2435-----------------------------------------|" - - "L0.2448[1122,1281] 163ns |----------------------------------------L0.2448-----------------------------------------|" - - "L0.2461[1122,1281] 164ns |----------------------------------------L0.2461-----------------------------------------|" - - "L0.2474[1122,1281] 165ns |----------------------------------------L0.2474-----------------------------------------|" - - "L0.2487[1122,1281] 166ns |----------------------------------------L0.2487-----------------------------------------|" - - "L0.2500[1122,1281] 167ns |----------------------------------------L0.2500-----------------------------------------|" - - "L0.2513[1122,1281] 168ns |----------------------------------------L0.2513-----------------------------------------|" - - "L0.2526[1122,1281] 169ns |----------------------------------------L0.2526-----------------------------------------|" - - "L0.2539[1122,1281] 170ns |----------------------------------------L0.2539-----------------------------------------|" - - "L0.2552[1122,1281] 171ns |----------------------------------------L0.2552-----------------------------------------|" - - "L0.2565[1122,1281] 172ns |----------------------------------------L0.2565-----------------------------------------|" + - "L0.1291[1602,1761] 80ns |----------------------------------------L0.1291-----------------------------------------|" + - "L0.1305[1602,1761] 81ns |----------------------------------------L0.1305-----------------------------------------|" + - "L0.1319[1602,1761] 82ns |----------------------------------------L0.1319-----------------------------------------|" + - "L0.1333[1602,1761] 83ns |----------------------------------------L0.1333-----------------------------------------|" + - "L0.1347[1602,1761] 84ns |----------------------------------------L0.1347-----------------------------------------|" + - "L0.1361[1602,1761] 85ns |----------------------------------------L0.1361-----------------------------------------|" + - "L0.1375[1602,1761] 86ns |----------------------------------------L0.1375-----------------------------------------|" + - "L0.1389[1602,1761] 87ns |----------------------------------------L0.1389-----------------------------------------|" + - "L0.1403[1602,1761] 88ns |----------------------------------------L0.1403-----------------------------------------|" + - "L0.1417[1602,1761] 89ns |----------------------------------------L0.1417-----------------------------------------|" + - "L0.1431[1602,1761] 90ns |----------------------------------------L0.1431-----------------------------------------|" + - "L0.1445[1602,1761] 91ns |----------------------------------------L0.1445-----------------------------------------|" + - "L0.1459[1602,1761] 92ns |----------------------------------------L0.1459-----------------------------------------|" + - "L0.1473[1602,1761] 93ns |----------------------------------------L0.1473-----------------------------------------|" + - "L0.1487[1602,1761] 94ns |----------------------------------------L0.1487-----------------------------------------|" + - "L0.1501[1602,1761] 95ns |----------------------------------------L0.1501-----------------------------------------|" + - "L0.1515[1602,1761] 96ns |----------------------------------------L0.1515-----------------------------------------|" + - "L0.1529[1602,1761] 97ns |----------------------------------------L0.1529-----------------------------------------|" + - "L0.1543[1602,1761] 98ns |----------------------------------------L0.1543-----------------------------------------|" + - "L0.1557[1602,1761] 99ns |----------------------------------------L0.1557-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1122,1281] 172ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1602,1761] 99ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2324, L0.2338, L0.2352, L0.2366, L0.2380, L0.2394, L0.2408, L0.2422, L0.2435, L0.2448, L0.2461, L0.2474, L0.2487, L0.2500, L0.2513, L0.2526, L0.2539, L0.2552, L0.2565, L0.3028" + - " Soft Deleting 20 files: L0.1291, L0.1305, L0.1319, L0.1333, L0.1347, L0.1361, L0.1375, L0.1389, L0.1403, L0.1417, L0.1431, L0.1445, L0.1459, L0.1473, L0.1487, L0.1501, L0.1515, L0.1529, L0.1543, L0.1557" - " Creating 1 files" - - "**** Simulation run 319, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 304, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3029[1282,1441] 153ns |----------------------------------------L0.3029-----------------------------------------|" - - "L0.2325[1282,1441] 154ns |----------------------------------------L0.2325-----------------------------------------|" - - "L0.2339[1282,1441] 155ns |----------------------------------------L0.2339-----------------------------------------|" - - "L0.2353[1282,1441] 156ns |----------------------------------------L0.2353-----------------------------------------|" - - "L0.2367[1282,1441] 157ns |----------------------------------------L0.2367-----------------------------------------|" - - "L0.2381[1282,1441] 158ns |----------------------------------------L0.2381-----------------------------------------|" - - "L0.2395[1282,1441] 159ns |----------------------------------------L0.2395-----------------------------------------|" - - "L0.2409[1282,1441] 160ns |----------------------------------------L0.2409-----------------------------------------|" - - "L0.2423[1282,1441] 161ns |----------------------------------------L0.2423-----------------------------------------|" - - "L0.2436[1282,1441] 162ns |----------------------------------------L0.2436-----------------------------------------|" - - "L0.2449[1282,1441] 163ns |----------------------------------------L0.2449-----------------------------------------|" - - "L0.2462[1282,1441] 164ns |----------------------------------------L0.2462-----------------------------------------|" - - "L0.2475[1282,1441] 165ns |----------------------------------------L0.2475-----------------------------------------|" - - "L0.2488[1282,1441] 166ns |----------------------------------------L0.2488-----------------------------------------|" - - "L0.2501[1282,1441] 167ns |----------------------------------------L0.2501-----------------------------------------|" - - "L0.2514[1282,1441] 168ns |----------------------------------------L0.2514-----------------------------------------|" - - "L0.2527[1282,1441] 169ns |----------------------------------------L0.2527-----------------------------------------|" - - "L0.2540[1282,1441] 170ns |----------------------------------------L0.2540-----------------------------------------|" - - "L0.2553[1282,1441] 171ns |----------------------------------------L0.2553-----------------------------------------|" - - "L0.2566[1282,1441] 172ns |----------------------------------------L0.2566-----------------------------------------|" + - "L0.1571[1602,1761] 100ns |----------------------------------------L0.1571-----------------------------------------|" + - "L0.1585[1602,1761] 101ns |----------------------------------------L0.1585-----------------------------------------|" + - "L0.1599[1602,1761] 102ns |----------------------------------------L0.1599-----------------------------------------|" + - "L0.1613[1602,1761] 103ns |----------------------------------------L0.1613-----------------------------------------|" + - "L0.1627[1602,1761] 104ns |----------------------------------------L0.1627-----------------------------------------|" + - "L0.1641[1602,1761] 105ns |----------------------------------------L0.1641-----------------------------------------|" + - "L0.1655[1602,1761] 106ns |----------------------------------------L0.1655-----------------------------------------|" + - "L0.1669[1602,1761] 107ns |----------------------------------------L0.1669-----------------------------------------|" + - "L0.1683[1602,1761] 108ns |----------------------------------------L0.1683-----------------------------------------|" + - "L0.1697[1602,1761] 109ns |----------------------------------------L0.1697-----------------------------------------|" + - "L0.1711[1602,1761] 110ns |----------------------------------------L0.1711-----------------------------------------|" + - "L0.1725[1602,1761] 111ns |----------------------------------------L0.1725-----------------------------------------|" + - "L0.1739[1602,1761] 112ns |----------------------------------------L0.1739-----------------------------------------|" + - "L0.1753[1602,1761] 113ns |----------------------------------------L0.1753-----------------------------------------|" + - "L0.1767[1602,1761] 114ns |----------------------------------------L0.1767-----------------------------------------|" + - "L0.1781[1602,1761] 115ns |----------------------------------------L0.1781-----------------------------------------|" + - "L0.1795[1602,1761] 116ns |----------------------------------------L0.1795-----------------------------------------|" + - "L0.1809[1602,1761] 117ns |----------------------------------------L0.1809-----------------------------------------|" + - "L0.1823[1602,1761] 118ns |----------------------------------------L0.1823-----------------------------------------|" + - "L0.1837[1602,1761] 119ns |----------------------------------------L0.1837-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1282,1441] 172ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1602,1761] 119ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2325, L0.2339, L0.2353, L0.2367, L0.2381, L0.2395, L0.2409, L0.2423, L0.2436, L0.2449, L0.2462, L0.2475, L0.2488, L0.2501, L0.2514, L0.2527, L0.2540, L0.2553, L0.2566, L0.3029" + - " Soft Deleting 20 files: L0.1571, L0.1585, L0.1599, L0.1613, L0.1627, L0.1641, L0.1655, L0.1669, L0.1683, L0.1697, L0.1711, L0.1725, L0.1739, L0.1753, L0.1767, L0.1781, L0.1795, L0.1809, L0.1823, L0.1837" - " Creating 1 files" - - "**** Simulation run 320, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 305, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3030[1442,1601] 153ns |----------------------------------------L0.3030-----------------------------------------|" - - "L0.2326[1442,1601] 154ns |----------------------------------------L0.2326-----------------------------------------|" - - "L0.2340[1442,1601] 155ns |----------------------------------------L0.2340-----------------------------------------|" - - "L0.2354[1442,1601] 156ns |----------------------------------------L0.2354-----------------------------------------|" - - "L0.2368[1442,1601] 157ns |----------------------------------------L0.2368-----------------------------------------|" - - "L0.2382[1442,1601] 158ns |----------------------------------------L0.2382-----------------------------------------|" - - "L0.2396[1442,1601] 159ns |----------------------------------------L0.2396-----------------------------------------|" - - "L0.2410[1442,1601] 160ns |----------------------------------------L0.2410-----------------------------------------|" - - "L0.2424[1442,1601] 161ns |----------------------------------------L0.2424-----------------------------------------|" - - "L0.2437[1442,1601] 162ns |----------------------------------------L0.2437-----------------------------------------|" - - "L0.2450[1442,1601] 163ns |----------------------------------------L0.2450-----------------------------------------|" - - "L0.2463[1442,1601] 164ns |----------------------------------------L0.2463-----------------------------------------|" - - "L0.2476[1442,1601] 165ns |----------------------------------------L0.2476-----------------------------------------|" - - "L0.2489[1442,1601] 166ns |----------------------------------------L0.2489-----------------------------------------|" - - "L0.2502[1442,1601] 167ns |----------------------------------------L0.2502-----------------------------------------|" - - "L0.2515[1442,1601] 168ns |----------------------------------------L0.2515-----------------------------------------|" - - "L0.2528[1442,1601] 169ns |----------------------------------------L0.2528-----------------------------------------|" - - "L0.2541[1442,1601] 170ns |----------------------------------------L0.2541-----------------------------------------|" - - "L0.2554[1442,1601] 171ns |----------------------------------------L0.2554-----------------------------------------|" - - "L0.2567[1442,1601] 172ns |----------------------------------------L0.2567-----------------------------------------|" + - "L0.1851[1602,1761] 120ns |----------------------------------------L0.1851-----------------------------------------|" + - "L0.1865[1602,1761] 121ns |----------------------------------------L0.1865-----------------------------------------|" + - "L0.1879[1602,1761] 122ns |----------------------------------------L0.1879-----------------------------------------|" + - "L0.1893[1602,1761] 123ns |----------------------------------------L0.1893-----------------------------------------|" + - "L0.1907[1602,1761] 124ns |----------------------------------------L0.1907-----------------------------------------|" + - "L0.1921[1602,1761] 125ns |----------------------------------------L0.1921-----------------------------------------|" + - "L0.1935[1602,1761] 126ns |----------------------------------------L0.1935-----------------------------------------|" + - "L0.1949[1602,1761] 127ns |----------------------------------------L0.1949-----------------------------------------|" + - "L0.1963[1602,1761] 128ns |----------------------------------------L0.1963-----------------------------------------|" + - "L0.1977[1602,1761] 129ns |----------------------------------------L0.1977-----------------------------------------|" + - "L0.1991[1602,1761] 130ns |----------------------------------------L0.1991-----------------------------------------|" + - "L0.2005[1602,1761] 131ns |----------------------------------------L0.2005-----------------------------------------|" + - "L0.2019[1602,1761] 132ns |----------------------------------------L0.2019-----------------------------------------|" + - "L0.2033[1602,1761] 133ns |----------------------------------------L0.2033-----------------------------------------|" + - "L0.2159[1602,1761] 134ns |----------------------------------------L0.2159-----------------------------------------|" + - "L0.2173[1602,1761] 135ns |----------------------------------------L0.2173-----------------------------------------|" + - "L0.2047[1602,1761] 136ns |----------------------------------------L0.2047-----------------------------------------|" + - "L0.2061[1602,1761] 137ns |----------------------------------------L0.2061-----------------------------------------|" + - "L0.2075[1602,1761] 138ns |----------------------------------------L0.2075-----------------------------------------|" + - "L0.2089[1602,1761] 139ns |----------------------------------------L0.2089-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1442,1601] 172ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1602,1761] 139ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2326, L0.2340, L0.2354, L0.2368, L0.2382, L0.2396, L0.2410, L0.2424, L0.2437, L0.2450, L0.2463, L0.2476, L0.2489, L0.2502, L0.2515, L0.2528, L0.2541, L0.2554, L0.2567, L0.3030" + - " Soft Deleting 20 files: L0.1851, L0.1865, L0.1879, L0.1893, L0.1907, L0.1921, L0.1935, L0.1949, L0.1963, L0.1977, L0.1991, L0.2005, L0.2019, L0.2033, L0.2047, L0.2061, L0.2075, L0.2089, L0.2159, L0.2173" - " Creating 1 files" - - "**** Simulation run 321, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 306, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3031[1602,1761] 153ns |----------------------------------------L0.3031-----------------------------------------|" + - "L0.2103[1602,1761] 140ns |----------------------------------------L0.2103-----------------------------------------|" + - "L0.2117[1602,1761] 141ns |----------------------------------------L0.2117-----------------------------------------|" + - "L0.2131[1602,1761] 142ns |----------------------------------------L0.2131-----------------------------------------|" + - "L0.2145[1602,1761] 143ns |----------------------------------------L0.2145-----------------------------------------|" + - "L0.2187[1602,1761] 144ns |----------------------------------------L0.2187-----------------------------------------|" + - "L0.2201[1602,1761] 145ns |----------------------------------------L0.2201-----------------------------------------|" + - "L0.2215[1602,1761] 146ns |----------------------------------------L0.2215-----------------------------------------|" + - "L0.2229[1602,1761] 147ns |----------------------------------------L0.2229-----------------------------------------|" + - "L0.2243[1602,1761] 148ns |----------------------------------------L0.2243-----------------------------------------|" + - "L0.2257[1602,1761] 149ns |----------------------------------------L0.2257-----------------------------------------|" + - "L0.2271[1602,1761] 150ns |----------------------------------------L0.2271-----------------------------------------|" + - "L0.2285[1602,1761] 151ns |----------------------------------------L0.2285-----------------------------------------|" + - "L0.2299[1602,1761] 152ns |----------------------------------------L0.2299-----------------------------------------|" + - "L0.2313[1602,1761] 153ns |----------------------------------------L0.2313-----------------------------------------|" - "L0.2327[1602,1761] 154ns |----------------------------------------L0.2327-----------------------------------------|" - "L0.2341[1602,1761] 155ns |----------------------------------------L0.2341-----------------------------------------|" - "L0.2355[1602,1761] 156ns |----------------------------------------L0.2355-----------------------------------------|" - "L0.2369[1602,1761] 157ns |----------------------------------------L0.2369-----------------------------------------|" - "L0.2383[1602,1761] 158ns |----------------------------------------L0.2383-----------------------------------------|" - "L0.2397[1602,1761] 159ns |----------------------------------------L0.2397-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[1602,1761] 159ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.2103, L0.2117, L0.2131, L0.2145, L0.2187, L0.2201, L0.2215, L0.2229, L0.2243, L0.2257, L0.2271, L0.2285, L0.2299, L0.2313, L0.2327, L0.2341, L0.2355, L0.2369, L0.2383, L0.2397" + - " Creating 1 files" + - "**** Simulation run 307, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " - "L0.2411[1602,1761] 160ns |----------------------------------------L0.2411-----------------------------------------|" - "L0.2425[1602,1761] 161ns |----------------------------------------L0.2425-----------------------------------------|" - "L0.2438[1602,1761] 162ns |----------------------------------------L0.2438-----------------------------------------|" @@ -8854,52 +8449,242 @@ async fn stuck_l0_large_l0s() { - "L0.2542[1602,1761] 170ns |----------------------------------------L0.2542-----------------------------------------|" - "L0.2555[1602,1761] 171ns |----------------------------------------L0.2555-----------------------------------------|" - "L0.2568[1602,1761] 172ns |----------------------------------------L0.2568-----------------------------------------|" + - "L0.2581[1602,1761] 173ns |----------------------------------------L0.2581-----------------------------------------|" + - "L0.2594[1602,1761] 174ns |----------------------------------------L0.2594-----------------------------------------|" + - "L0.2607[1602,1761] 175ns |----------------------------------------L0.2607-----------------------------------------|" + - "L0.2620[1602,1761] 176ns |----------------------------------------L0.2620-----------------------------------------|" + - "L0.2633[1602,1761] 177ns |----------------------------------------L0.2633-----------------------------------------|" + - "L0.2646[1602,1761] 178ns |----------------------------------------L0.2646-----------------------------------------|" + - "L0.2659[1602,1761] 179ns |----------------------------------------L0.2659-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[1602,1761] 179ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.2411, L0.2425, L0.2438, L0.2451, L0.2464, L0.2477, L0.2490, L0.2503, L0.2516, L0.2529, L0.2542, L0.2555, L0.2568, L0.2581, L0.2594, L0.2607, L0.2620, L0.2633, L0.2646, L0.2659" + - " Creating 1 files" + - "**** Simulation run 308, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.2672[1602,1761] 180ns |----------------------------------------L0.2672-----------------------------------------|" + - "L0.2685[1602,1761] 181ns |----------------------------------------L0.2685-----------------------------------------|" + - "L0.2698[1602,1761] 182ns |----------------------------------------L0.2698-----------------------------------------|" + - "L0.2711[1602,1761] 183ns |----------------------------------------L0.2711-----------------------------------------|" + - "L0.2724[1602,1761] 184ns |----------------------------------------L0.2724-----------------------------------------|" + - "L0.2737[1602,1761] 185ns |----------------------------------------L0.2737-----------------------------------------|" + - "L0.2750[1602,1761] 186ns |----------------------------------------L0.2750-----------------------------------------|" + - "L0.2763[1602,1761] 187ns |----------------------------------------L0.2763-----------------------------------------|" + - "L0.2776[1602,1761] 188ns |----------------------------------------L0.2776-----------------------------------------|" + - "L0.2789[1602,1761] 189ns |----------------------------------------L0.2789-----------------------------------------|" + - "L0.2802[1602,1761] 190ns |----------------------------------------L0.2802-----------------------------------------|" + - "L0.2815[1602,1761] 191ns |----------------------------------------L0.2815-----------------------------------------|" + - "L0.2828[1602,1761] 192ns |----------------------------------------L0.2828-----------------------------------------|" + - "L0.2841[1602,1761] 193ns |----------------------------------------L0.2841-----------------------------------------|" + - "L0.2854[1602,1761] 194ns |----------------------------------------L0.2854-----------------------------------------|" + - "L0.2867[1602,1761] 195ns |----------------------------------------L0.2867-----------------------------------------|" + - "L0.2880[1602,1761] 196ns |----------------------------------------L0.2880-----------------------------------------|" + - "L0.2893[1602,1761] 197ns |----------------------------------------L0.2893-----------------------------------------|" + - "L0.2906[1602,1761] 198ns |----------------------------------------L0.2906-----------------------------------------|" + - "L0.2919[1602,1761] 199ns |----------------------------------------L0.2919-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1602,1761] 172ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1602,1761] 199ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2327, L0.2341, L0.2355, L0.2369, L0.2383, L0.2397, L0.2411, L0.2425, L0.2438, L0.2451, L0.2464, L0.2477, L0.2490, L0.2503, L0.2516, L0.2529, L0.2542, L0.2555, L0.2568, L0.3031" + - " Soft Deleting 20 files: L0.2672, L0.2685, L0.2698, L0.2711, L0.2724, L0.2737, L0.2750, L0.2763, L0.2776, L0.2789, L0.2802, L0.2815, L0.2828, L0.2841, L0.2854, L0.2867, L0.2880, L0.2893, L0.2906, L0.2919" - " Creating 1 files" + - "**** Simulation run 309, type=compact(ManySmallFiles). 20 Input Files, 239mb total:" + - "L0, all files 12mb " + - "L0.212[1762,2000] 0ns |-----------------------------------------L0.212-----------------------------------------|" + - "L0.224[1762,2000] 1ns |-----------------------------------------L0.224-----------------------------------------|" + - "L0.236[1762,2000] 2ns |-----------------------------------------L0.236-----------------------------------------|" + - "L0.248[1762,2000] 3ns |-----------------------------------------L0.248-----------------------------------------|" + - "L0.260[1762,2000] 4ns |-----------------------------------------L0.260-----------------------------------------|" + - "L0.272[1762,2000] 5ns |-----------------------------------------L0.272-----------------------------------------|" + - "L0.284[1762,2000] 6ns |-----------------------------------------L0.284-----------------------------------------|" + - "L0.296[1762,2000] 7ns |-----------------------------------------L0.296-----------------------------------------|" + - "L0.308[1762,2000] 8ns |-----------------------------------------L0.308-----------------------------------------|" + - "L0.320[1762,2000] 9ns |-----------------------------------------L0.320-----------------------------------------|" + - "L0.332[1762,2000] 10ns |-----------------------------------------L0.332-----------------------------------------|" + - "L0.344[1762,2000] 11ns |-----------------------------------------L0.344-----------------------------------------|" + - "L0.356[1762,2000] 12ns |-----------------------------------------L0.356-----------------------------------------|" + - "L0.368[1762,2000] 13ns |-----------------------------------------L0.368-----------------------------------------|" + - "L0.380[1762,2000] 14ns |-----------------------------------------L0.380-----------------------------------------|" + - "L0.392[1762,2000] 15ns |-----------------------------------------L0.392-----------------------------------------|" + - "L0.404[1762,2000] 16ns |-----------------------------------------L0.404-----------------------------------------|" + - "L0.416[1762,2000] 17ns |-----------------------------------------L0.416-----------------------------------------|" + - "L0.540[1762,2000] 18ns |-----------------------------------------L0.540-----------------------------------------|" + - "L0.552[1762,2000] 19ns |-----------------------------------------L0.552-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 239mb total:" + - "L0, all files 239mb " + - "L0.?[1762,2000] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2318, L0.2332, L0.2346, L0.2360, L0.2374, L0.2388, L0.2402, L0.2416, L0.2429, L0.2442, L0.2455, L0.2468, L0.2481, L0.2494, L0.2507, L0.2520, L0.2533, L0.2546, L0.2559, L0.3022" + - " Soft Deleting 20 files: L0.212, L0.224, L0.236, L0.248, L0.260, L0.272, L0.284, L0.296, L0.308, L0.320, L0.332, L0.344, L0.356, L0.368, L0.380, L0.392, L0.404, L0.416, L0.540, L0.552" - " Creating 1 files" - - "**** Simulation run 322, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 310, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3023[322,481] 153ns |----------------------------------------L0.3023-----------------------------------------|" - - "L0.2319[322,481] 154ns |----------------------------------------L0.2319-----------------------------------------|" - - "L0.2333[322,481] 155ns |----------------------------------------L0.2333-----------------------------------------|" - - "L0.2347[322,481] 156ns |----------------------------------------L0.2347-----------------------------------------|" - - "L0.2361[322,481] 157ns |----------------------------------------L0.2361-----------------------------------------|" - - "L0.2375[322,481] 158ns |----------------------------------------L0.2375-----------------------------------------|" - - "L0.2389[322,481] 159ns |----------------------------------------L0.2389-----------------------------------------|" - - "L0.2403[322,481] 160ns |----------------------------------------L0.2403-----------------------------------------|" - - "L0.2417[322,481] 161ns |----------------------------------------L0.2417-----------------------------------------|" - - "L0.2430[322,481] 162ns |----------------------------------------L0.2430-----------------------------------------|" - - "L0.2443[322,481] 163ns |----------------------------------------L0.2443-----------------------------------------|" - - "L0.2456[322,481] 164ns |----------------------------------------L0.2456-----------------------------------------|" - - "L0.2469[322,481] 165ns |----------------------------------------L0.2469-----------------------------------------|" - - "L0.2482[322,481] 166ns |----------------------------------------L0.2482-----------------------------------------|" - - "L0.2495[322,481] 167ns |----------------------------------------L0.2495-----------------------------------------|" - - "L0.2508[322,481] 168ns |----------------------------------------L0.2508-----------------------------------------|" - - "L0.2521[322,481] 169ns |----------------------------------------L0.2521-----------------------------------------|" - - "L0.2534[322,481] 170ns |----------------------------------------L0.2534-----------------------------------------|" - - "L0.2547[322,481] 171ns |----------------------------------------L0.2547-----------------------------------------|" - - "L0.2560[322,481] 172ns |----------------------------------------L0.2560-----------------------------------------|" + - "L0.428[1762,2000] 20ns |-----------------------------------------L0.428-----------------------------------------|" + - "L0.442[1762,2000] 21ns |-----------------------------------------L0.442-----------------------------------------|" + - "L0.456[1762,2000] 22ns |-----------------------------------------L0.456-----------------------------------------|" + - "L0.470[1762,2000] 23ns |-----------------------------------------L0.470-----------------------------------------|" + - "L0.484[1762,2000] 24ns |-----------------------------------------L0.484-----------------------------------------|" + - "L0.498[1762,2000] 25ns |-----------------------------------------L0.498-----------------------------------------|" + - "L0.512[1762,2000] 26ns |-----------------------------------------L0.512-----------------------------------------|" + - "L0.526[1762,2000] 27ns |-----------------------------------------L0.526-----------------------------------------|" + - "L0.564[1762,2000] 28ns |-----------------------------------------L0.564-----------------------------------------|" + - "L0.578[1762,2000] 29ns |-----------------------------------------L0.578-----------------------------------------|" + - "L0.592[1762,2000] 30ns |-----------------------------------------L0.592-----------------------------------------|" + - "L0.606[1762,2000] 31ns |-----------------------------------------L0.606-----------------------------------------|" + - "L0.620[1762,2000] 32ns |-----------------------------------------L0.620-----------------------------------------|" + - "L0.634[1762,2000] 33ns |-----------------------------------------L0.634-----------------------------------------|" + - "L0.648[1762,2000] 34ns |-----------------------------------------L0.648-----------------------------------------|" + - "L0.662[1762,2000] 35ns |-----------------------------------------L0.662-----------------------------------------|" + - "L0.676[1762,2000] 36ns |-----------------------------------------L0.676-----------------------------------------|" + - "L0.690[1762,2000] 37ns |-----------------------------------------L0.690-----------------------------------------|" + - "L0.704[1762,2000] 38ns |-----------------------------------------L0.704-----------------------------------------|" + - "L0.718[1762,2000] 39ns |-----------------------------------------L0.718-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[322,481] 172ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1762,2000] 39ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2319, L0.2333, L0.2347, L0.2361, L0.2375, L0.2389, L0.2403, L0.2417, L0.2430, L0.2443, L0.2456, L0.2469, L0.2482, L0.2495, L0.2508, L0.2521, L0.2534, L0.2547, L0.2560, L0.3023" + - " Soft Deleting 20 files: L0.428, L0.442, L0.456, L0.470, L0.484, L0.498, L0.512, L0.526, L0.564, L0.578, L0.592, L0.606, L0.620, L0.634, L0.648, L0.662, L0.676, L0.690, L0.704, L0.718" - " Creating 1 files" - - "**** Simulation run 323, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 311, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.732[1762,2000] 40ns |-----------------------------------------L0.732-----------------------------------------|" + - "L0.746[1762,2000] 41ns |-----------------------------------------L0.746-----------------------------------------|" + - "L0.760[1762,2000] 42ns |-----------------------------------------L0.760-----------------------------------------|" + - "L0.774[1762,2000] 43ns |-----------------------------------------L0.774-----------------------------------------|" + - "L0.788[1762,2000] 44ns |-----------------------------------------L0.788-----------------------------------------|" + - "L0.802[1762,2000] 45ns |-----------------------------------------L0.802-----------------------------------------|" + - "L0.816[1762,2000] 46ns |-----------------------------------------L0.816-----------------------------------------|" + - "L0.830[1762,2000] 47ns |-----------------------------------------L0.830-----------------------------------------|" + - "L0.844[1762,2000] 48ns |-----------------------------------------L0.844-----------------------------------------|" + - "L0.858[1762,2000] 49ns |-----------------------------------------L0.858-----------------------------------------|" + - "L0.872[1762,2000] 50ns |-----------------------------------------L0.872-----------------------------------------|" + - "L0.886[1762,2000] 51ns |-----------------------------------------L0.886-----------------------------------------|" + - "L0.900[1762,2000] 52ns |-----------------------------------------L0.900-----------------------------------------|" + - "L0.914[1762,2000] 53ns |-----------------------------------------L0.914-----------------------------------------|" + - "L0.928[1762,2000] 54ns |-----------------------------------------L0.928-----------------------------------------|" + - "L0.942[1762,2000] 55ns |-----------------------------------------L0.942-----------------------------------------|" + - "L0.956[1762,2000] 56ns |-----------------------------------------L0.956-----------------------------------------|" + - "L0.970[1762,2000] 57ns |-----------------------------------------L0.970-----------------------------------------|" + - "L0.984[1762,2000] 58ns |-----------------------------------------L0.984-----------------------------------------|" + - "L0.998[1762,2000] 59ns |-----------------------------------------L0.998-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.3032[1762,2000] 153ns |----------------------------------------L0.3032-----------------------------------------|" + - "L0.?[1762,2000] 59ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.732, L0.746, L0.760, L0.774, L0.788, L0.802, L0.816, L0.830, L0.844, L0.858, L0.872, L0.886, L0.900, L0.914, L0.928, L0.942, L0.956, L0.970, L0.984, L0.998" + - " Creating 1 files" + - "**** Simulation run 312, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.1012[1762,2000] 60ns |----------------------------------------L0.1012-----------------------------------------|" + - "L0.1026[1762,2000] 61ns |----------------------------------------L0.1026-----------------------------------------|" + - "L0.1040[1762,2000] 62ns |----------------------------------------L0.1040-----------------------------------------|" + - "L0.1054[1762,2000] 63ns |----------------------------------------L0.1054-----------------------------------------|" + - "L0.1068[1762,2000] 64ns |----------------------------------------L0.1068-----------------------------------------|" + - "L0.1082[1762,2000] 65ns |----------------------------------------L0.1082-----------------------------------------|" + - "L0.1096[1762,2000] 66ns |----------------------------------------L0.1096-----------------------------------------|" + - "L0.1110[1762,2000] 67ns |----------------------------------------L0.1110-----------------------------------------|" + - "L0.1124[1762,2000] 68ns |----------------------------------------L0.1124-----------------------------------------|" + - "L0.1138[1762,2000] 69ns |----------------------------------------L0.1138-----------------------------------------|" + - "L0.1152[1762,2000] 70ns |----------------------------------------L0.1152-----------------------------------------|" + - "L0.1166[1762,2000] 71ns |----------------------------------------L0.1166-----------------------------------------|" + - "L0.1180[1762,2000] 72ns |----------------------------------------L0.1180-----------------------------------------|" + - "L0.1194[1762,2000] 73ns |----------------------------------------L0.1194-----------------------------------------|" + - "L0.1208[1762,2000] 74ns |----------------------------------------L0.1208-----------------------------------------|" + - "L0.1222[1762,2000] 75ns |----------------------------------------L0.1222-----------------------------------------|" + - "L0.1236[1762,2000] 76ns |----------------------------------------L0.1236-----------------------------------------|" + - "L0.1250[1762,2000] 77ns |----------------------------------------L0.1250-----------------------------------------|" + - "L0.1264[1762,2000] 78ns |----------------------------------------L0.1264-----------------------------------------|" + - "L0.1278[1762,2000] 79ns |----------------------------------------L0.1278-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[1762,2000] 79ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1012, L0.1026, L0.1040, L0.1054, L0.1068, L0.1082, L0.1096, L0.1110, L0.1124, L0.1138, L0.1152, L0.1166, L0.1180, L0.1194, L0.1208, L0.1222, L0.1236, L0.1250, L0.1264, L0.1278" + - " Creating 1 files" + - "**** Simulation run 313, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.1292[1762,2000] 80ns |----------------------------------------L0.1292-----------------------------------------|" + - "L0.1306[1762,2000] 81ns |----------------------------------------L0.1306-----------------------------------------|" + - "L0.1320[1762,2000] 82ns |----------------------------------------L0.1320-----------------------------------------|" + - "L0.1334[1762,2000] 83ns |----------------------------------------L0.1334-----------------------------------------|" + - "L0.1348[1762,2000] 84ns |----------------------------------------L0.1348-----------------------------------------|" + - "L0.1362[1762,2000] 85ns |----------------------------------------L0.1362-----------------------------------------|" + - "L0.1376[1762,2000] 86ns |----------------------------------------L0.1376-----------------------------------------|" + - "L0.1390[1762,2000] 87ns |----------------------------------------L0.1390-----------------------------------------|" + - "L0.1404[1762,2000] 88ns |----------------------------------------L0.1404-----------------------------------------|" + - "L0.1418[1762,2000] 89ns |----------------------------------------L0.1418-----------------------------------------|" + - "L0.1432[1762,2000] 90ns |----------------------------------------L0.1432-----------------------------------------|" + - "L0.1446[1762,2000] 91ns |----------------------------------------L0.1446-----------------------------------------|" + - "L0.1460[1762,2000] 92ns |----------------------------------------L0.1460-----------------------------------------|" + - "L0.1474[1762,2000] 93ns |----------------------------------------L0.1474-----------------------------------------|" + - "L0.1488[1762,2000] 94ns |----------------------------------------L0.1488-----------------------------------------|" + - "L0.1502[1762,2000] 95ns |----------------------------------------L0.1502-----------------------------------------|" + - "L0.1516[1762,2000] 96ns |----------------------------------------L0.1516-----------------------------------------|" + - "L0.1530[1762,2000] 97ns |----------------------------------------L0.1530-----------------------------------------|" + - "L0.1544[1762,2000] 98ns |----------------------------------------L0.1544-----------------------------------------|" + - "L0.1558[1762,2000] 99ns |----------------------------------------L0.1558-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[1762,2000] 99ns |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 314, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.1852[1762,2000] 120ns |----------------------------------------L0.1852-----------------------------------------|" + - "L0.1866[1762,2000] 121ns |----------------------------------------L0.1866-----------------------------------------|" + - "L0.1880[1762,2000] 122ns |----------------------------------------L0.1880-----------------------------------------|" + - "L0.1894[1762,2000] 123ns |----------------------------------------L0.1894-----------------------------------------|" + - "L0.1908[1762,2000] 124ns |----------------------------------------L0.1908-----------------------------------------|" + - "L0.1922[1762,2000] 125ns |----------------------------------------L0.1922-----------------------------------------|" + - "L0.1936[1762,2000] 126ns |----------------------------------------L0.1936-----------------------------------------|" + - "L0.1950[1762,2000] 127ns |----------------------------------------L0.1950-----------------------------------------|" + - "L0.1964[1762,2000] 128ns |----------------------------------------L0.1964-----------------------------------------|" + - "L0.1978[1762,2000] 129ns |----------------------------------------L0.1978-----------------------------------------|" + - "L0.1992[1762,2000] 130ns |----------------------------------------L0.1992-----------------------------------------|" + - "L0.2006[1762,2000] 131ns |----------------------------------------L0.2006-----------------------------------------|" + - "L0.2020[1762,2000] 132ns |----------------------------------------L0.2020-----------------------------------------|" + - "L0.2034[1762,2000] 133ns |----------------------------------------L0.2034-----------------------------------------|" + - "L0.2160[1762,2000] 134ns |----------------------------------------L0.2160-----------------------------------------|" + - "L0.2174[1762,2000] 135ns |----------------------------------------L0.2174-----------------------------------------|" + - "L0.2048[1762,2000] 136ns |----------------------------------------L0.2048-----------------------------------------|" + - "L0.2062[1762,2000] 137ns |----------------------------------------L0.2062-----------------------------------------|" + - "L0.2076[1762,2000] 138ns |----------------------------------------L0.2076-----------------------------------------|" + - "L0.2090[1762,2000] 139ns |----------------------------------------L0.2090-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[1762,2000] 139ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1852, L0.1866, L0.1880, L0.1894, L0.1908, L0.1922, L0.1936, L0.1950, L0.1964, L0.1978, L0.1992, L0.2006, L0.2020, L0.2034, L0.2048, L0.2062, L0.2076, L0.2090, L0.2160, L0.2174" + - " Creating 1 files" + - "**** Simulation run 315, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.2104[1762,2000] 140ns |----------------------------------------L0.2104-----------------------------------------|" + - "L0.2118[1762,2000] 141ns |----------------------------------------L0.2118-----------------------------------------|" + - "L0.2132[1762,2000] 142ns |----------------------------------------L0.2132-----------------------------------------|" + - "L0.2146[1762,2000] 143ns |----------------------------------------L0.2146-----------------------------------------|" + - "L0.2188[1762,2000] 144ns |----------------------------------------L0.2188-----------------------------------------|" + - "L0.2202[1762,2000] 145ns |----------------------------------------L0.2202-----------------------------------------|" + - "L0.2216[1762,2000] 146ns |----------------------------------------L0.2216-----------------------------------------|" + - "L0.2230[1762,2000] 147ns |----------------------------------------L0.2230-----------------------------------------|" + - "L0.2244[1762,2000] 148ns |----------------------------------------L0.2244-----------------------------------------|" + - "L0.2258[1762,2000] 149ns |----------------------------------------L0.2258-----------------------------------------|" + - "L0.2272[1762,2000] 150ns |----------------------------------------L0.2272-----------------------------------------|" + - "L0.2286[1762,2000] 151ns |----------------------------------------L0.2286-----------------------------------------|" + - "L0.2300[1762,2000] 152ns |----------------------------------------L0.2300-----------------------------------------|" + - "L0.2314[1762,2000] 153ns |----------------------------------------L0.2314-----------------------------------------|" - "L0.2328[1762,2000] 154ns |----------------------------------------L0.2328-----------------------------------------|" - "L0.2342[1762,2000] 155ns |----------------------------------------L0.2342-----------------------------------------|" - "L0.2356[1762,2000] 156ns |----------------------------------------L0.2356-----------------------------------------|" - "L0.2370[1762,2000] 157ns |----------------------------------------L0.2370-----------------------------------------|" - "L0.2384[1762,2000] 158ns |----------------------------------------L0.2384-----------------------------------------|" - "L0.2398[1762,2000] 159ns |----------------------------------------L0.2398-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[1762,2000] 159ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.2104, L0.2118, L0.2132, L0.2146, L0.2188, L0.2202, L0.2216, L0.2230, L0.2244, L0.2258, L0.2272, L0.2286, L0.2300, L0.2314, L0.2328, L0.2342, L0.2356, L0.2370, L0.2384, L0.2398" + - " Creating 1 files" + - "**** Simulation run 316, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "L0, all files 0b " - "L0.2412[1762,2000] 160ns |----------------------------------------L0.2412-----------------------------------------|" - "L0.2426[1762,2000] 161ns |----------------------------------------L0.2426-----------------------------------------|" - "L0.2439[1762,2000] 162ns |----------------------------------------L0.2439-----------------------------------------|" @@ -8909,665 +8694,889 @@ async fn stuck_l0_large_l0s() { - "L0.2491[1762,2000] 166ns |----------------------------------------L0.2491-----------------------------------------|" - "L0.2504[1762,2000] 167ns |----------------------------------------L0.2504-----------------------------------------|" - "L0.2517[1762,2000] 168ns |----------------------------------------L0.2517-----------------------------------------|" - - "L0.2530[1762,2000] 169ns |----------------------------------------L0.2530-----------------------------------------|" - - "L0.2543[1762,2000] 170ns |----------------------------------------L0.2543-----------------------------------------|" - - "L0.2556[1762,2000] 171ns |----------------------------------------L0.2556-----------------------------------------|" - - "L0.2569[1762,2000] 172ns |----------------------------------------L0.2569-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[1762,2000] 172ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2328, L0.2342, L0.2356, L0.2370, L0.2384, L0.2398, L0.2412, L0.2426, L0.2439, L0.2452, L0.2465, L0.2478, L0.2491, L0.2504, L0.2517, L0.2530, L0.2543, L0.2556, L0.2569, L0.3032" - - " Creating 1 files" - - "**** Simulation run 324, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.3033[2001,2086] 172ns |----------------------------------------L0.3033-----------------------------------------|" - - "L0.2583[2001,2086] 173ns |----------------------------------------L0.2583-----------------------------------------|" - - "L0.2596[2001,2086] 174ns |----------------------------------------L0.2596-----------------------------------------|" - - "L0.2609[2001,2086] 175ns |----------------------------------------L0.2609-----------------------------------------|" - - "L0.2622[2001,2086] 176ns |----------------------------------------L0.2622-----------------------------------------|" - - "L0.2635[2001,2086] 177ns |----------------------------------------L0.2635-----------------------------------------|" - - "L0.2648[2001,2086] 178ns |----------------------------------------L0.2648-----------------------------------------|" - - "L0.2661[2001,2086] 179ns |----------------------------------------L0.2661-----------------------------------------|" - - "L0.2674[2001,2086] 180ns |----------------------------------------L0.2674-----------------------------------------|" - - "L0.2687[2001,2086] 181ns |----------------------------------------L0.2687-----------------------------------------|" - - "L0.2700[2001,2086] 182ns |----------------------------------------L0.2700-----------------------------------------|" - - "L0.2713[2001,2086] 183ns |----------------------------------------L0.2713-----------------------------------------|" - - "L0.2726[2001,2086] 184ns |----------------------------------------L0.2726-----------------------------------------|" - - "L0.2739[2001,2086] 185ns |----------------------------------------L0.2739-----------------------------------------|" - - "L0.2752[2001,2086] 186ns |----------------------------------------L0.2752-----------------------------------------|" - - "L0.2765[2001,2086] 187ns |----------------------------------------L0.2765-----------------------------------------|" - - "L0.2778[2001,2086] 188ns |----------------------------------------L0.2778-----------------------------------------|" - - "L0.2791[2001,2086] 189ns |----------------------------------------L0.2791-----------------------------------------|" - - "L0.2804[2001,2086] 190ns |----------------------------------------L0.2804-----------------------------------------|" - - "L0.2817[2001,2086] 191ns |----------------------------------------L0.2817-----------------------------------------|" + - "L0.2530[1762,2000] 169ns |----------------------------------------L0.2530-----------------------------------------|" + - "L0.2543[1762,2000] 170ns |----------------------------------------L0.2543-----------------------------------------|" + - "L0.2556[1762,2000] 171ns |----------------------------------------L0.2556-----------------------------------------|" + - "L0.2569[1762,2000] 172ns |----------------------------------------L0.2569-----------------------------------------|" + - "L0.2582[1762,2000] 173ns |----------------------------------------L0.2582-----------------------------------------|" + - "L0.2595[1762,2000] 174ns |----------------------------------------L0.2595-----------------------------------------|" + - "L0.2608[1762,2000] 175ns |----------------------------------------L0.2608-----------------------------------------|" + - "L0.2621[1762,2000] 176ns |----------------------------------------L0.2621-----------------------------------------|" + - "L0.2634[1762,2000] 177ns |----------------------------------------L0.2634-----------------------------------------|" + - "L0.2647[1762,2000] 178ns |----------------------------------------L0.2647-----------------------------------------|" + - "L0.2660[1762,2000] 179ns |----------------------------------------L0.2660-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[2001,2086] 191ns |------------------------------------------L0.?------------------------------------------|" - - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2583, L0.2596, L0.2609, L0.2622, L0.2635, L0.2648, L0.2661, L0.2674, L0.2687, L0.2700, L0.2713, L0.2726, L0.2739, L0.2752, L0.2765, L0.2778, L0.2791, L0.2804, L0.2817, L0.3033" - - " Creating 1 files" - - "**** Simulation run 325, type=compact(ManySmallFiles). 20 Input Files, 2kb total:" - - "L0 " - - "L0.3034[2087,1720000] 172ns 1kb|------------------------------------L0.3034------------------------------------| " - - "L0.2584[2087,1730000] 173ns 10b|------------------------------------L0.2584------------------------------------| " - - "L0.2597[2087,1740000] 174ns 10b|------------------------------------L0.2597------------------------------------| " - - "L0.2610[2087,1750000] 175ns 10b|------------------------------------L0.2610-------------------------------------| " - - "L0.2623[2087,1760000] 176ns 10b|------------------------------------L0.2623-------------------------------------| " - - "L0.2636[2087,1770000] 177ns 10b|-------------------------------------L0.2636-------------------------------------| " - - "L0.2649[2087,1780000] 178ns 10b|-------------------------------------L0.2649-------------------------------------| " - - "L0.2662[2087,1790000] 179ns 10b|-------------------------------------L0.2662--------------------------------------| " - - "L0.2675[2087,1800000] 180ns 10b|-------------------------------------L0.2675--------------------------------------| " - - "L0.2688[2087,1810000] 181ns 10b|--------------------------------------L0.2688--------------------------------------| " - - "L0.2701[2087,1820000] 182ns 10b|--------------------------------------L0.2701--------------------------------------| " - - "L0.2714[2087,1830000] 183ns 10b|--------------------------------------L0.2714---------------------------------------| " - - "L0.2727[2087,1840000] 184ns 10b|--------------------------------------L0.2727---------------------------------------| " - - "L0.2740[2087,1850000] 185ns 10b|---------------------------------------L0.2740---------------------------------------| " - - "L0.2753[2087,1860000] 186ns 10b|---------------------------------------L0.2753---------------------------------------| " - - "L0.2766[2087,1870000] 187ns 10b|---------------------------------------L0.2766----------------------------------------| " - - "L0.2779[2087,1880000] 188ns 10b|---------------------------------------L0.2779----------------------------------------| " - - "L0.2792[2087,1890000] 189ns 10b|----------------------------------------L0.2792----------------------------------------| " - - "L0.2805[2087,1900000] 190ns 10b|----------------------------------------L0.2805----------------------------------------| " - - "L0.2818[2087,1910000] 191ns 10b|----------------------------------------L0.2818-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 2kb total:" - - "L0, all files 2kb " - - "L0.?[2087,1910000] 191ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1762,2000] 179ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2584, L0.2597, L0.2610, L0.2623, L0.2636, L0.2649, L0.2662, L0.2675, L0.2688, L0.2701, L0.2714, L0.2727, L0.2740, L0.2753, L0.2766, L0.2779, L0.2792, L0.2805, L0.2818, L0.3034" + - " Soft Deleting 20 files: L0.2412, L0.2426, L0.2439, L0.2452, L0.2465, L0.2478, L0.2491, L0.2504, L0.2517, L0.2530, L0.2543, L0.2556, L0.2569, L0.2582, L0.2595, L0.2608, L0.2621, L0.2634, L0.2647, L0.2660" - " Creating 1 files" - - "**** Simulation run 326, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 317, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3045[162,321] 172ns |----------------------------------------L0.3045-----------------------------------------|" - - "L0.2572[173,321] 173ns |-------------------------------------L0.2572-------------------------------------| " - - "L0.2585[174,321] 174ns |-------------------------------------L0.2585-------------------------------------| " - - "L0.2598[175,321] 175ns |------------------------------------L0.2598-------------------------------------| " - - "L0.2611[176,321] 176ns |------------------------------------L0.2611-------------------------------------| " - - "L0.2624[177,321] 177ns |------------------------------------L0.2624------------------------------------| " - - "L0.2637[178,321] 178ns |-----------------------------------L0.2637------------------------------------| " - - "L0.2650[179,321] 179ns |-----------------------------------L0.2650------------------------------------| " - - "L0.2663[180,321] 180ns |-----------------------------------L0.2663-----------------------------------| " - - "L0.2676[181,321] 181ns |-----------------------------------L0.2676-----------------------------------| " - - "L0.2689[182,321] 182ns |----------------------------------L0.2689-----------------------------------| " - - "L0.2702[183,321] 183ns |----------------------------------L0.2702-----------------------------------| " - - "L0.2715[184,321] 184ns |----------------------------------L0.2715----------------------------------| " - - "L0.2728[185,321] 185ns |---------------------------------L0.2728----------------------------------| " - - "L0.2741[186,321] 186ns |---------------------------------L0.2741----------------------------------| " - - "L0.2754[187,321] 187ns |---------------------------------L0.2754---------------------------------| " - - "L0.2767[188,321] 188ns |---------------------------------L0.2767---------------------------------| " - - "L0.2780[189,321] 189ns |--------------------------------L0.2780---------------------------------| " - - "L0.2793[190,321] 190ns |--------------------------------L0.2793---------------------------------| " - - "L0.2806[191,321] 191ns |--------------------------------L0.2806--------------------------------| " + - "L0.2673[1762,2000] 180ns |----------------------------------------L0.2673-----------------------------------------|" + - "L0.2686[1762,2000] 181ns |----------------------------------------L0.2686-----------------------------------------|" + - "L0.2699[1762,2000] 182ns |----------------------------------------L0.2699-----------------------------------------|" + - "L0.2712[1762,2000] 183ns |----------------------------------------L0.2712-----------------------------------------|" + - "L0.2725[1762,2000] 184ns |----------------------------------------L0.2725-----------------------------------------|" + - "L0.2738[1762,2000] 185ns |----------------------------------------L0.2738-----------------------------------------|" + - "L0.2751[1762,2000] 186ns |----------------------------------------L0.2751-----------------------------------------|" + - "L0.2764[1762,2000] 187ns |----------------------------------------L0.2764-----------------------------------------|" + - "L0.2777[1762,2000] 188ns |----------------------------------------L0.2777-----------------------------------------|" + - "L0.2790[1762,2000] 189ns |----------------------------------------L0.2790-----------------------------------------|" + - "L0.2803[1762,2000] 190ns |----------------------------------------L0.2803-----------------------------------------|" + - "L0.2816[1762,2000] 191ns |----------------------------------------L0.2816-----------------------------------------|" + - "L0.2829[1762,2000] 192ns |----------------------------------------L0.2829-----------------------------------------|" + - "L0.2842[1762,2000] 193ns |----------------------------------------L0.2842-----------------------------------------|" + - "L0.2855[1762,2000] 194ns |----------------------------------------L0.2855-----------------------------------------|" + - "L0.2868[1762,2000] 195ns |----------------------------------------L0.2868-----------------------------------------|" + - "L0.2881[1762,2000] 196ns |----------------------------------------L0.2881-----------------------------------------|" + - "L0.2894[1762,2000] 197ns |----------------------------------------L0.2894-----------------------------------------|" + - "L0.2907[1762,2000] 198ns |----------------------------------------L0.2907-----------------------------------------|" + - "L0.2920[1762,2000] 199ns |----------------------------------------L0.2920-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[162,321] 191ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1762,2000] 199ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2572, L0.2585, L0.2598, L0.2611, L0.2624, L0.2637, L0.2650, L0.2663, L0.2676, L0.2689, L0.2702, L0.2715, L0.2728, L0.2741, L0.2754, L0.2767, L0.2780, L0.2793, L0.2806, L0.3045" + - " Soft Deleting 20 files: L0.2673, L0.2686, L0.2699, L0.2712, L0.2725, L0.2738, L0.2751, L0.2764, L0.2777, L0.2790, L0.2803, L0.2816, L0.2829, L0.2842, L0.2855, L0.2868, L0.2881, L0.2894, L0.2907, L0.2920" - " Creating 1 files" - - "**** Simulation run 327, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.3046[322,481] 172ns |----------------------------------------L0.3046-----------------------------------------|" - - "L0.2573[322,481] 173ns |----------------------------------------L0.2573-----------------------------------------|" - - "L0.2586[322,481] 174ns |----------------------------------------L0.2586-----------------------------------------|" - - "L0.2599[322,481] 175ns |----------------------------------------L0.2599-----------------------------------------|" - - "L0.2612[322,481] 176ns |----------------------------------------L0.2612-----------------------------------------|" - - "L0.2625[322,481] 177ns |----------------------------------------L0.2625-----------------------------------------|" - - "L0.2638[322,481] 178ns |----------------------------------------L0.2638-----------------------------------------|" - - "L0.2651[322,481] 179ns |----------------------------------------L0.2651-----------------------------------------|" - - "L0.2664[322,481] 180ns |----------------------------------------L0.2664-----------------------------------------|" - - "L0.2677[322,481] 181ns |----------------------------------------L0.2677-----------------------------------------|" - - "L0.2690[322,481] 182ns |----------------------------------------L0.2690-----------------------------------------|" - - "L0.2703[322,481] 183ns |----------------------------------------L0.2703-----------------------------------------|" - - "L0.2716[322,481] 184ns |----------------------------------------L0.2716-----------------------------------------|" - - "L0.2729[322,481] 185ns |----------------------------------------L0.2729-----------------------------------------|" - - "L0.2742[322,481] 186ns |----------------------------------------L0.2742-----------------------------------------|" - - "L0.2755[322,481] 187ns |----------------------------------------L0.2755-----------------------------------------|" - - "L0.2768[322,481] 188ns |----------------------------------------L0.2768-----------------------------------------|" - - "L0.2781[322,481] 189ns |----------------------------------------L0.2781-----------------------------------------|" - - "L0.2794[322,481] 190ns |----------------------------------------L0.2794-----------------------------------------|" - - "L0.2807[322,481] 191ns |----------------------------------------L0.2807-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[322,481] 191ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2573, L0.2586, L0.2599, L0.2612, L0.2625, L0.2638, L0.2651, L0.2664, L0.2677, L0.2690, L0.2703, L0.2716, L0.2729, L0.2742, L0.2755, L0.2768, L0.2781, L0.2794, L0.2807, L0.3046" + - " Soft Deleting 20 files: L0.1292, L0.1306, L0.1320, L0.1334, L0.1348, L0.1362, L0.1376, L0.1390, L0.1404, L0.1418, L0.1432, L0.1446, L0.1460, L0.1474, L0.1488, L0.1502, L0.1516, L0.1530, L0.1544, L0.1558" - " Creating 1 files" - - "**** Simulation run 328, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 318, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3037[482,641] 172ns |----------------------------------------L0.3037-----------------------------------------|" - - "L0.2574[482,641] 173ns |----------------------------------------L0.2574-----------------------------------------|" - - "L0.2587[482,641] 174ns |----------------------------------------L0.2587-----------------------------------------|" - - "L0.2600[482,641] 175ns |----------------------------------------L0.2600-----------------------------------------|" - - "L0.2613[482,641] 176ns |----------------------------------------L0.2613-----------------------------------------|" - - "L0.2626[482,641] 177ns |----------------------------------------L0.2626-----------------------------------------|" - - "L0.2639[482,641] 178ns |----------------------------------------L0.2639-----------------------------------------|" - - "L0.2652[482,641] 179ns |----------------------------------------L0.2652-----------------------------------------|" - - "L0.2665[482,641] 180ns |----------------------------------------L0.2665-----------------------------------------|" - - "L0.2678[482,641] 181ns |----------------------------------------L0.2678-----------------------------------------|" - - "L0.2691[482,641] 182ns |----------------------------------------L0.2691-----------------------------------------|" - - "L0.2704[482,641] 183ns |----------------------------------------L0.2704-----------------------------------------|" - - "L0.2717[482,641] 184ns |----------------------------------------L0.2717-----------------------------------------|" - - "L0.2730[482,641] 185ns |----------------------------------------L0.2730-----------------------------------------|" - - "L0.2743[482,641] 186ns |----------------------------------------L0.2743-----------------------------------------|" - - "L0.2756[482,641] 187ns |----------------------------------------L0.2756-----------------------------------------|" - - "L0.2769[482,641] 188ns |----------------------------------------L0.2769-----------------------------------------|" - - "L0.2782[482,641] 189ns |----------------------------------------L0.2782-----------------------------------------|" - - "L0.2795[482,641] 190ns |----------------------------------------L0.2795-----------------------------------------|" - - "L0.2808[482,641] 191ns |----------------------------------------L0.2808-----------------------------------------|" + - "L0.1572[1762,2000] 100ns |----------------------------------------L0.1572-----------------------------------------|" + - "L0.1586[1762,2000] 101ns |----------------------------------------L0.1586-----------------------------------------|" + - "L0.1600[1762,2000] 102ns |----------------------------------------L0.1600-----------------------------------------|" + - "L0.1614[1762,2000] 103ns |----------------------------------------L0.1614-----------------------------------------|" + - "L0.1628[1762,2000] 104ns |----------------------------------------L0.1628-----------------------------------------|" + - "L0.1642[1762,2000] 105ns |----------------------------------------L0.1642-----------------------------------------|" + - "L0.1656[1762,2000] 106ns |----------------------------------------L0.1656-----------------------------------------|" + - "L0.1670[1762,2000] 107ns |----------------------------------------L0.1670-----------------------------------------|" + - "L0.1684[1762,2000] 108ns |----------------------------------------L0.1684-----------------------------------------|" + - "L0.1698[1762,2000] 109ns |----------------------------------------L0.1698-----------------------------------------|" + - "L0.1712[1762,2000] 110ns |----------------------------------------L0.1712-----------------------------------------|" + - "L0.1726[1762,2000] 111ns |----------------------------------------L0.1726-----------------------------------------|" + - "L0.1740[1762,2000] 112ns |----------------------------------------L0.1740-----------------------------------------|" + - "L0.1754[1762,2000] 113ns |----------------------------------------L0.1754-----------------------------------------|" + - "L0.1768[1762,2000] 114ns |----------------------------------------L0.1768-----------------------------------------|" + - "L0.1782[1762,2000] 115ns |----------------------------------------L0.1782-----------------------------------------|" + - "L0.1796[1762,2000] 116ns |----------------------------------------L0.1796-----------------------------------------|" + - "L0.1810[1762,2000] 117ns |----------------------------------------L0.1810-----------------------------------------|" + - "L0.1824[1762,2000] 118ns |----------------------------------------L0.1824-----------------------------------------|" + - "L0.1838[1762,2000] 119ns |----------------------------------------L0.1838-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[482,641] 191ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[1762,2000] 119ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2574, L0.2587, L0.2600, L0.2613, L0.2626, L0.2639, L0.2652, L0.2665, L0.2678, L0.2691, L0.2704, L0.2717, L0.2730, L0.2743, L0.2756, L0.2769, L0.2782, L0.2795, L0.2808, L0.3037" + - " Soft Deleting 20 files: L0.1572, L0.1586, L0.1600, L0.1614, L0.1628, L0.1642, L0.1656, L0.1670, L0.1684, L0.1698, L0.1712, L0.1726, L0.1740, L0.1754, L0.1768, L0.1782, L0.1796, L0.1810, L0.1824, L0.1838" - " Creating 1 files" - - "**** Simulation run 329, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 319, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3038[642,801] 172ns |----------------------------------------L0.3038-----------------------------------------|" - - "L0.2575[642,801] 173ns |----------------------------------------L0.2575-----------------------------------------|" - - "L0.2588[642,801] 174ns |----------------------------------------L0.2588-----------------------------------------|" - - "L0.2601[642,801] 175ns |----------------------------------------L0.2601-----------------------------------------|" - - "L0.2614[642,801] 176ns |----------------------------------------L0.2614-----------------------------------------|" - - "L0.2627[642,801] 177ns |----------------------------------------L0.2627-----------------------------------------|" - - "L0.2640[642,801] 178ns |----------------------------------------L0.2640-----------------------------------------|" - - "L0.2653[642,801] 179ns |----------------------------------------L0.2653-----------------------------------------|" - - "L0.2666[642,801] 180ns |----------------------------------------L0.2666-----------------------------------------|" - - "L0.2679[642,801] 181ns |----------------------------------------L0.2679-----------------------------------------|" - - "L0.2692[642,801] 182ns |----------------------------------------L0.2692-----------------------------------------|" - - "L0.2705[642,801] 183ns |----------------------------------------L0.2705-----------------------------------------|" - - "L0.2718[642,801] 184ns |----------------------------------------L0.2718-----------------------------------------|" - - "L0.2731[642,801] 185ns |----------------------------------------L0.2731-----------------------------------------|" - - "L0.2744[642,801] 186ns |----------------------------------------L0.2744-----------------------------------------|" - - "L0.2757[642,801] 187ns |----------------------------------------L0.2757-----------------------------------------|" - - "L0.2770[642,801] 188ns |----------------------------------------L0.2770-----------------------------------------|" - - "L0.2783[642,801] 189ns |----------------------------------------L0.2783-----------------------------------------|" - - "L0.2796[642,801] 190ns |----------------------------------------L0.2796-----------------------------------------|" - - "L0.2809[642,801] 191ns |----------------------------------------L0.2809-----------------------------------------|" + - "L0.429[2001,2086] 20ns |-----------------------------------------L0.429-----------------------------------------|" + - "L0.443[2001,2086] 21ns |-----------------------------------------L0.443-----------------------------------------|" + - "L0.457[2001,2086] 22ns |-----------------------------------------L0.457-----------------------------------------|" + - "L0.471[2001,2086] 23ns |-----------------------------------------L0.471-----------------------------------------|" + - "L0.485[2001,2086] 24ns |-----------------------------------------L0.485-----------------------------------------|" + - "L0.499[2001,2086] 25ns |-----------------------------------------L0.499-----------------------------------------|" + - "L0.513[2001,2086] 26ns |-----------------------------------------L0.513-----------------------------------------|" + - "L0.527[2001,2086] 27ns |-----------------------------------------L0.527-----------------------------------------|" + - "L0.565[2001,2086] 28ns |-----------------------------------------L0.565-----------------------------------------|" + - "L0.579[2001,2086] 29ns |-----------------------------------------L0.579-----------------------------------------|" + - "L0.593[2001,2086] 30ns |-----------------------------------------L0.593-----------------------------------------|" + - "L0.607[2001,2086] 31ns |-----------------------------------------L0.607-----------------------------------------|" + - "L0.621[2001,2086] 32ns |-----------------------------------------L0.621-----------------------------------------|" + - "L0.635[2001,2086] 33ns |-----------------------------------------L0.635-----------------------------------------|" + - "L0.649[2001,2086] 34ns |-----------------------------------------L0.649-----------------------------------------|" + - "L0.663[2001,2086] 35ns |-----------------------------------------L0.663-----------------------------------------|" + - "L0.677[2001,2086] 36ns |-----------------------------------------L0.677-----------------------------------------|" + - "L0.691[2001,2086] 37ns |-----------------------------------------L0.691-----------------------------------------|" + - "L0.705[2001,2086] 38ns |-----------------------------------------L0.705-----------------------------------------|" + - "L0.719[2001,2086] 39ns |-----------------------------------------L0.719-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[642,801] 191ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[2001,2086] 39ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2575, L0.2588, L0.2601, L0.2614, L0.2627, L0.2640, L0.2653, L0.2666, L0.2679, L0.2692, L0.2705, L0.2718, L0.2731, L0.2744, L0.2757, L0.2770, L0.2783, L0.2796, L0.2809, L0.3038" + - " Soft Deleting 20 files: L0.429, L0.443, L0.457, L0.471, L0.485, L0.499, L0.513, L0.527, L0.565, L0.579, L0.593, L0.607, L0.621, L0.635, L0.649, L0.663, L0.677, L0.691, L0.705, L0.719" - " Creating 1 files" - - "**** Simulation run 330, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 320, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3039[802,961] 172ns |----------------------------------------L0.3039-----------------------------------------|" - - "L0.2576[802,961] 173ns |----------------------------------------L0.2576-----------------------------------------|" - - "L0.2589[802,961] 174ns |----------------------------------------L0.2589-----------------------------------------|" - - "L0.2602[802,961] 175ns |----------------------------------------L0.2602-----------------------------------------|" - - "L0.2615[802,961] 176ns |----------------------------------------L0.2615-----------------------------------------|" - - "L0.2628[802,961] 177ns |----------------------------------------L0.2628-----------------------------------------|" - - "L0.2641[802,961] 178ns |----------------------------------------L0.2641-----------------------------------------|" - - "L0.2654[802,961] 179ns |----------------------------------------L0.2654-----------------------------------------|" - - "L0.2667[802,961] 180ns |----------------------------------------L0.2667-----------------------------------------|" - - "L0.2680[802,961] 181ns |----------------------------------------L0.2680-----------------------------------------|" - - "L0.2693[802,961] 182ns |----------------------------------------L0.2693-----------------------------------------|" - - "L0.2706[802,961] 183ns |----------------------------------------L0.2706-----------------------------------------|" - - "L0.2719[802,961] 184ns |----------------------------------------L0.2719-----------------------------------------|" - - "L0.2732[802,961] 185ns |----------------------------------------L0.2732-----------------------------------------|" - - "L0.2745[802,961] 186ns |----------------------------------------L0.2745-----------------------------------------|" - - "L0.2758[802,961] 187ns |----------------------------------------L0.2758-----------------------------------------|" - - "L0.2771[802,961] 188ns |----------------------------------------L0.2771-----------------------------------------|" - - "L0.2784[802,961] 189ns |----------------------------------------L0.2784-----------------------------------------|" - - "L0.2797[802,961] 190ns |----------------------------------------L0.2797-----------------------------------------|" - - "L0.2810[802,961] 191ns |----------------------------------------L0.2810-----------------------------------------|" + - "L0.733[2001,2086] 40ns |-----------------------------------------L0.733-----------------------------------------|" + - "L0.747[2001,2086] 41ns |-----------------------------------------L0.747-----------------------------------------|" + - "L0.761[2001,2086] 42ns |-----------------------------------------L0.761-----------------------------------------|" + - "L0.775[2001,2086] 43ns |-----------------------------------------L0.775-----------------------------------------|" + - "L0.789[2001,2086] 44ns |-----------------------------------------L0.789-----------------------------------------|" + - "L0.803[2001,2086] 45ns |-----------------------------------------L0.803-----------------------------------------|" + - "L0.817[2001,2086] 46ns |-----------------------------------------L0.817-----------------------------------------|" + - "L0.831[2001,2086] 47ns |-----------------------------------------L0.831-----------------------------------------|" + - "L0.845[2001,2086] 48ns |-----------------------------------------L0.845-----------------------------------------|" + - "L0.859[2001,2086] 49ns |-----------------------------------------L0.859-----------------------------------------|" + - "L0.873[2001,2086] 50ns |-----------------------------------------L0.873-----------------------------------------|" + - "L0.887[2001,2086] 51ns |-----------------------------------------L0.887-----------------------------------------|" + - "L0.901[2001,2086] 52ns |-----------------------------------------L0.901-----------------------------------------|" + - "L0.915[2001,2086] 53ns |-----------------------------------------L0.915-----------------------------------------|" + - "L0.929[2001,2086] 54ns |-----------------------------------------L0.929-----------------------------------------|" + - "L0.943[2001,2086] 55ns |-----------------------------------------L0.943-----------------------------------------|" + - "L0.957[2001,2086] 56ns |-----------------------------------------L0.957-----------------------------------------|" + - "L0.971[2001,2086] 57ns |-----------------------------------------L0.971-----------------------------------------|" + - "L0.985[2001,2086] 58ns |-----------------------------------------L0.985-----------------------------------------|" + - "L0.999[2001,2086] 59ns |-----------------------------------------L0.999-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[802,961] 191ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[2001,2086] 59ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2576, L0.2589, L0.2602, L0.2615, L0.2628, L0.2641, L0.2654, L0.2667, L0.2680, L0.2693, L0.2706, L0.2719, L0.2732, L0.2745, L0.2758, L0.2771, L0.2784, L0.2797, L0.2810, L0.3039" + - " Soft Deleting 20 files: L0.733, L0.747, L0.761, L0.775, L0.789, L0.803, L0.817, L0.831, L0.845, L0.859, L0.873, L0.887, L0.901, L0.915, L0.929, L0.943, L0.957, L0.971, L0.985, L0.999" - " Creating 1 files" - - "**** Simulation run 331, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 321, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3040[962,1121] 172ns |----------------------------------------L0.3040-----------------------------------------|" - - "L0.2577[962,1121] 173ns |----------------------------------------L0.2577-----------------------------------------|" - - "L0.2590[962,1121] 174ns |----------------------------------------L0.2590-----------------------------------------|" - - "L0.2603[962,1121] 175ns |----------------------------------------L0.2603-----------------------------------------|" - - "L0.2616[962,1121] 176ns |----------------------------------------L0.2616-----------------------------------------|" - - "L0.2629[962,1121] 177ns |----------------------------------------L0.2629-----------------------------------------|" - - "L0.2642[962,1121] 178ns |----------------------------------------L0.2642-----------------------------------------|" - - "L0.2655[962,1121] 179ns |----------------------------------------L0.2655-----------------------------------------|" - - "L0.2668[962,1121] 180ns |----------------------------------------L0.2668-----------------------------------------|" - - "L0.2681[962,1121] 181ns |----------------------------------------L0.2681-----------------------------------------|" - - "L0.2694[962,1121] 182ns |----------------------------------------L0.2694-----------------------------------------|" - - "L0.2707[962,1121] 183ns |----------------------------------------L0.2707-----------------------------------------|" - - "L0.2720[962,1121] 184ns |----------------------------------------L0.2720-----------------------------------------|" - - "L0.2733[962,1121] 185ns |----------------------------------------L0.2733-----------------------------------------|" - - "L0.2746[962,1121] 186ns |----------------------------------------L0.2746-----------------------------------------|" - - "L0.2759[962,1121] 187ns |----------------------------------------L0.2759-----------------------------------------|" - - "L0.2772[962,1121] 188ns |----------------------------------------L0.2772-----------------------------------------|" - - "L0.2785[962,1121] 189ns |----------------------------------------L0.2785-----------------------------------------|" - - "L0.2798[962,1121] 190ns |----------------------------------------L0.2798-----------------------------------------|" - - "L0.2811[962,1121] 191ns |----------------------------------------L0.2811-----------------------------------------|" + - "L0.1013[2001,2086] 60ns |----------------------------------------L0.1013-----------------------------------------|" + - "L0.1027[2001,2086] 61ns |----------------------------------------L0.1027-----------------------------------------|" + - "L0.1041[2001,2086] 62ns |----------------------------------------L0.1041-----------------------------------------|" + - "L0.1055[2001,2086] 63ns |----------------------------------------L0.1055-----------------------------------------|" + - "L0.1069[2001,2086] 64ns |----------------------------------------L0.1069-----------------------------------------|" + - "L0.1083[2001,2086] 65ns |----------------------------------------L0.1083-----------------------------------------|" + - "L0.1097[2001,2086] 66ns |----------------------------------------L0.1097-----------------------------------------|" + - "L0.1111[2001,2086] 67ns |----------------------------------------L0.1111-----------------------------------------|" + - "L0.1125[2001,2086] 68ns |----------------------------------------L0.1125-----------------------------------------|" + - "L0.1139[2001,2086] 69ns |----------------------------------------L0.1139-----------------------------------------|" + - "L0.1153[2001,2086] 70ns |----------------------------------------L0.1153-----------------------------------------|" + - "L0.1167[2001,2086] 71ns |----------------------------------------L0.1167-----------------------------------------|" + - "L0.1181[2001,2086] 72ns |----------------------------------------L0.1181-----------------------------------------|" + - "L0.1195[2001,2086] 73ns |----------------------------------------L0.1195-----------------------------------------|" + - "L0.1209[2001,2086] 74ns |----------------------------------------L0.1209-----------------------------------------|" + - "L0.1223[2001,2086] 75ns |----------------------------------------L0.1223-----------------------------------------|" + - "L0.1237[2001,2086] 76ns |----------------------------------------L0.1237-----------------------------------------|" + - "L0.1251[2001,2086] 77ns |----------------------------------------L0.1251-----------------------------------------|" + - "L0.1265[2001,2086] 78ns |----------------------------------------L0.1265-----------------------------------------|" + - "L0.1279[2001,2086] 79ns |----------------------------------------L0.1279-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[962,1121] 191ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[2001,2086] 79ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2577, L0.2590, L0.2603, L0.2616, L0.2629, L0.2642, L0.2655, L0.2668, L0.2681, L0.2694, L0.2707, L0.2720, L0.2733, L0.2746, L0.2759, L0.2772, L0.2785, L0.2798, L0.2811, L0.3040" + - " Soft Deleting 20 files: L0.1013, L0.1027, L0.1041, L0.1055, L0.1069, L0.1083, L0.1097, L0.1111, L0.1125, L0.1139, L0.1153, L0.1167, L0.1181, L0.1195, L0.1209, L0.1223, L0.1237, L0.1251, L0.1265, L0.1279" - " Creating 1 files" - - "**** Simulation run 332, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 322, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3041[1122,1281] 172ns |----------------------------------------L0.3041-----------------------------------------|" - - "L0.2578[1122,1281] 173ns |----------------------------------------L0.2578-----------------------------------------|" - - "L0.2591[1122,1281] 174ns |----------------------------------------L0.2591-----------------------------------------|" - - "L0.2604[1122,1281] 175ns |----------------------------------------L0.2604-----------------------------------------|" - - "L0.2617[1122,1281] 176ns |----------------------------------------L0.2617-----------------------------------------|" - - "L0.2630[1122,1281] 177ns |----------------------------------------L0.2630-----------------------------------------|" - - "L0.2643[1122,1281] 178ns |----------------------------------------L0.2643-----------------------------------------|" - - "L0.2656[1122,1281] 179ns |----------------------------------------L0.2656-----------------------------------------|" - - "L0.2669[1122,1281] 180ns |----------------------------------------L0.2669-----------------------------------------|" - - "L0.2682[1122,1281] 181ns |----------------------------------------L0.2682-----------------------------------------|" - - "L0.2695[1122,1281] 182ns |----------------------------------------L0.2695-----------------------------------------|" - - "L0.2708[1122,1281] 183ns |----------------------------------------L0.2708-----------------------------------------|" - - "L0.2721[1122,1281] 184ns |----------------------------------------L0.2721-----------------------------------------|" - - "L0.2734[1122,1281] 185ns |----------------------------------------L0.2734-----------------------------------------|" - - "L0.2747[1122,1281] 186ns |----------------------------------------L0.2747-----------------------------------------|" - - "L0.2760[1122,1281] 187ns |----------------------------------------L0.2760-----------------------------------------|" - - "L0.2773[1122,1281] 188ns |----------------------------------------L0.2773-----------------------------------------|" - - "L0.2786[1122,1281] 189ns |----------------------------------------L0.2786-----------------------------------------|" - - "L0.2799[1122,1281] 190ns |----------------------------------------L0.2799-----------------------------------------|" - - "L0.2812[1122,1281] 191ns |----------------------------------------L0.2812-----------------------------------------|" + - "L0.1293[2001,2086] 80ns |----------------------------------------L0.1293-----------------------------------------|" + - "L0.1307[2001,2086] 81ns |----------------------------------------L0.1307-----------------------------------------|" + - "L0.1321[2001,2086] 82ns |----------------------------------------L0.1321-----------------------------------------|" + - "L0.1335[2001,2086] 83ns |----------------------------------------L0.1335-----------------------------------------|" + - "L0.1349[2001,2086] 84ns |----------------------------------------L0.1349-----------------------------------------|" + - "L0.1363[2001,2086] 85ns |----------------------------------------L0.1363-----------------------------------------|" + - "L0.1377[2001,2086] 86ns |----------------------------------------L0.1377-----------------------------------------|" + - "L0.1391[2001,2086] 87ns |----------------------------------------L0.1391-----------------------------------------|" + - "L0.1405[2001,2086] 88ns |----------------------------------------L0.1405-----------------------------------------|" + - "L0.1419[2001,2086] 89ns |----------------------------------------L0.1419-----------------------------------------|" + - "L0.1433[2001,2086] 90ns |----------------------------------------L0.1433-----------------------------------------|" + - "L0.1447[2001,2086] 91ns |----------------------------------------L0.1447-----------------------------------------|" + - "L0.1461[2001,2086] 92ns |----------------------------------------L0.1461-----------------------------------------|" + - "L0.1475[2001,2086] 93ns |----------------------------------------L0.1475-----------------------------------------|" + - "L0.1489[2001,2086] 94ns |----------------------------------------L0.1489-----------------------------------------|" + - "L0.1503[2001,2086] 95ns |----------------------------------------L0.1503-----------------------------------------|" + - "L0.1517[2001,2086] 96ns |----------------------------------------L0.1517-----------------------------------------|" + - "L0.1531[2001,2086] 97ns |----------------------------------------L0.1531-----------------------------------------|" + - "L0.1545[2001,2086] 98ns |----------------------------------------L0.1545-----------------------------------------|" + - "L0.1559[2001,2086] 99ns |----------------------------------------L0.1559-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1122,1281] 191ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[2001,2086] 99ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2578, L0.2591, L0.2604, L0.2617, L0.2630, L0.2643, L0.2656, L0.2669, L0.2682, L0.2695, L0.2708, L0.2721, L0.2734, L0.2747, L0.2760, L0.2773, L0.2786, L0.2799, L0.2812, L0.3041" + - " Soft Deleting 20 files: L0.1293, L0.1307, L0.1321, L0.1335, L0.1349, L0.1363, L0.1377, L0.1391, L0.1405, L0.1419, L0.1433, L0.1447, L0.1461, L0.1475, L0.1489, L0.1503, L0.1517, L0.1531, L0.1545, L0.1559" - " Creating 1 files" - - "**** Simulation run 333, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 323, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3042[1282,1441] 172ns |----------------------------------------L0.3042-----------------------------------------|" - - "L0.2579[1282,1441] 173ns |----------------------------------------L0.2579-----------------------------------------|" - - "L0.2592[1282,1441] 174ns |----------------------------------------L0.2592-----------------------------------------|" - - "L0.2605[1282,1441] 175ns |----------------------------------------L0.2605-----------------------------------------|" - - "L0.2618[1282,1441] 176ns |----------------------------------------L0.2618-----------------------------------------|" - - "L0.2631[1282,1441] 177ns |----------------------------------------L0.2631-----------------------------------------|" - - "L0.2644[1282,1441] 178ns |----------------------------------------L0.2644-----------------------------------------|" - - "L0.2657[1282,1441] 179ns |----------------------------------------L0.2657-----------------------------------------|" - - "L0.2670[1282,1441] 180ns |----------------------------------------L0.2670-----------------------------------------|" - - "L0.2683[1282,1441] 181ns |----------------------------------------L0.2683-----------------------------------------|" - - "L0.2696[1282,1441] 182ns |----------------------------------------L0.2696-----------------------------------------|" - - "L0.2709[1282,1441] 183ns |----------------------------------------L0.2709-----------------------------------------|" - - "L0.2722[1282,1441] 184ns |----------------------------------------L0.2722-----------------------------------------|" - - "L0.2735[1282,1441] 185ns |----------------------------------------L0.2735-----------------------------------------|" - - "L0.2748[1282,1441] 186ns |----------------------------------------L0.2748-----------------------------------------|" - - "L0.2761[1282,1441] 187ns |----------------------------------------L0.2761-----------------------------------------|" - - "L0.2774[1282,1441] 188ns |----------------------------------------L0.2774-----------------------------------------|" - - "L0.2787[1282,1441] 189ns |----------------------------------------L0.2787-----------------------------------------|" - - "L0.2800[1282,1441] 190ns |----------------------------------------L0.2800-----------------------------------------|" - - "L0.2813[1282,1441] 191ns |----------------------------------------L0.2813-----------------------------------------|" + - "L0.1573[2001,2086] 100ns |----------------------------------------L0.1573-----------------------------------------|" + - "L0.1587[2001,2086] 101ns |----------------------------------------L0.1587-----------------------------------------|" + - "L0.1601[2001,2086] 102ns |----------------------------------------L0.1601-----------------------------------------|" + - "L0.1615[2001,2086] 103ns |----------------------------------------L0.1615-----------------------------------------|" + - "L0.1629[2001,2086] 104ns |----------------------------------------L0.1629-----------------------------------------|" + - "L0.1643[2001,2086] 105ns |----------------------------------------L0.1643-----------------------------------------|" + - "L0.1657[2001,2086] 106ns |----------------------------------------L0.1657-----------------------------------------|" + - "L0.1671[2001,2086] 107ns |----------------------------------------L0.1671-----------------------------------------|" + - "L0.1685[2001,2086] 108ns |----------------------------------------L0.1685-----------------------------------------|" + - "L0.1699[2001,2086] 109ns |----------------------------------------L0.1699-----------------------------------------|" + - "L0.1713[2001,2086] 110ns |----------------------------------------L0.1713-----------------------------------------|" + - "L0.1727[2001,2086] 111ns |----------------------------------------L0.1727-----------------------------------------|" + - "L0.1741[2001,2086] 112ns |----------------------------------------L0.1741-----------------------------------------|" + - "L0.1755[2001,2086] 113ns |----------------------------------------L0.1755-----------------------------------------|" + - "L0.1769[2001,2086] 114ns |----------------------------------------L0.1769-----------------------------------------|" + - "L0.1783[2001,2086] 115ns |----------------------------------------L0.1783-----------------------------------------|" + - "L0.1797[2001,2086] 116ns |----------------------------------------L0.1797-----------------------------------------|" + - "L0.1811[2001,2086] 117ns |----------------------------------------L0.1811-----------------------------------------|" + - "L0.1825[2001,2086] 118ns |----------------------------------------L0.1825-----------------------------------------|" + - "L0.1839[2001,2086] 119ns |----------------------------------------L0.1839-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1282,1441] 191ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[2001,2086] 119ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2579, L0.2592, L0.2605, L0.2618, L0.2631, L0.2644, L0.2657, L0.2670, L0.2683, L0.2696, L0.2709, L0.2722, L0.2735, L0.2748, L0.2761, L0.2774, L0.2787, L0.2800, L0.2813, L0.3042" + - " Soft Deleting 20 files: L0.1573, L0.1587, L0.1601, L0.1615, L0.1629, L0.1643, L0.1657, L0.1671, L0.1685, L0.1699, L0.1713, L0.1727, L0.1741, L0.1755, L0.1769, L0.1783, L0.1797, L0.1811, L0.1825, L0.1839" - " Creating 1 files" - - "**** Simulation run 334, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 324, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3043[1442,1601] 172ns |----------------------------------------L0.3043-----------------------------------------|" - - "L0.2580[1442,1601] 173ns |----------------------------------------L0.2580-----------------------------------------|" - - "L0.2593[1442,1601] 174ns |----------------------------------------L0.2593-----------------------------------------|" - - "L0.2606[1442,1601] 175ns |----------------------------------------L0.2606-----------------------------------------|" - - "L0.2619[1442,1601] 176ns |----------------------------------------L0.2619-----------------------------------------|" - - "L0.2632[1442,1601] 177ns |----------------------------------------L0.2632-----------------------------------------|" - - "L0.2645[1442,1601] 178ns |----------------------------------------L0.2645-----------------------------------------|" - - "L0.2658[1442,1601] 179ns |----------------------------------------L0.2658-----------------------------------------|" - - "L0.2671[1442,1601] 180ns |----------------------------------------L0.2671-----------------------------------------|" - - "L0.2684[1442,1601] 181ns |----------------------------------------L0.2684-----------------------------------------|" - - "L0.2697[1442,1601] 182ns |----------------------------------------L0.2697-----------------------------------------|" - - "L0.2710[1442,1601] 183ns |----------------------------------------L0.2710-----------------------------------------|" - - "L0.2723[1442,1601] 184ns |----------------------------------------L0.2723-----------------------------------------|" - - "L0.2736[1442,1601] 185ns |----------------------------------------L0.2736-----------------------------------------|" - - "L0.2749[1442,1601] 186ns |----------------------------------------L0.2749-----------------------------------------|" - - "L0.2762[1442,1601] 187ns |----------------------------------------L0.2762-----------------------------------------|" - - "L0.2775[1442,1601] 188ns |----------------------------------------L0.2775-----------------------------------------|" - - "L0.2788[1442,1601] 189ns |----------------------------------------L0.2788-----------------------------------------|" - - "L0.2801[1442,1601] 190ns |----------------------------------------L0.2801-----------------------------------------|" - - "L0.2814[1442,1601] 191ns |----------------------------------------L0.2814-----------------------------------------|" + - "L0.1853[2001,2086] 120ns |----------------------------------------L0.1853-----------------------------------------|" + - "L0.1867[2001,2086] 121ns |----------------------------------------L0.1867-----------------------------------------|" + - "L0.1881[2001,2086] 122ns |----------------------------------------L0.1881-----------------------------------------|" + - "L0.1895[2001,2086] 123ns |----------------------------------------L0.1895-----------------------------------------|" + - "L0.1909[2001,2086] 124ns |----------------------------------------L0.1909-----------------------------------------|" + - "L0.1923[2001,2086] 125ns |----------------------------------------L0.1923-----------------------------------------|" + - "L0.1937[2001,2086] 126ns |----------------------------------------L0.1937-----------------------------------------|" + - "L0.1951[2001,2086] 127ns |----------------------------------------L0.1951-----------------------------------------|" + - "L0.1965[2001,2086] 128ns |----------------------------------------L0.1965-----------------------------------------|" + - "L0.1979[2001,2086] 129ns |----------------------------------------L0.1979-----------------------------------------|" + - "L0.1993[2001,2086] 130ns |----------------------------------------L0.1993-----------------------------------------|" + - "L0.2007[2001,2086] 131ns |----------------------------------------L0.2007-----------------------------------------|" + - "L0.2021[2001,2086] 132ns |----------------------------------------L0.2021-----------------------------------------|" + - "L0.2035[2001,2086] 133ns |----------------------------------------L0.2035-----------------------------------------|" + - "L0.2161[2001,2086] 134ns |----------------------------------------L0.2161-----------------------------------------|" + - "L0.2175[2001,2086] 135ns |----------------------------------------L0.2175-----------------------------------------|" + - "L0.2049[2001,2086] 136ns |----------------------------------------L0.2049-----------------------------------------|" + - "L0.2063[2001,2086] 137ns |----------------------------------------L0.2063-----------------------------------------|" + - "L0.2077[2001,2086] 138ns |----------------------------------------L0.2077-----------------------------------------|" + - "L0.2091[2001,2086] 139ns |----------------------------------------L0.2091-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1442,1601] 191ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[2001,2086] 139ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2580, L0.2593, L0.2606, L0.2619, L0.2632, L0.2645, L0.2658, L0.2671, L0.2684, L0.2697, L0.2710, L0.2723, L0.2736, L0.2749, L0.2762, L0.2775, L0.2788, L0.2801, L0.2814, L0.3043" + - " Soft Deleting 20 files: L0.1853, L0.1867, L0.1881, L0.1895, L0.1909, L0.1923, L0.1937, L0.1951, L0.1965, L0.1979, L0.1993, L0.2007, L0.2021, L0.2035, L0.2049, L0.2063, L0.2077, L0.2091, L0.2161, L0.2175" - " Creating 1 files" - - "**** Simulation run 335, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 325, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3044[1602,1761] 172ns |----------------------------------------L0.3044-----------------------------------------|" - - "L0.2581[1602,1761] 173ns |----------------------------------------L0.2581-----------------------------------------|" - - "L0.2594[1602,1761] 174ns |----------------------------------------L0.2594-----------------------------------------|" - - "L0.2607[1602,1761] 175ns |----------------------------------------L0.2607-----------------------------------------|" - - "L0.2620[1602,1761] 176ns |----------------------------------------L0.2620-----------------------------------------|" - - "L0.2633[1602,1761] 177ns |----------------------------------------L0.2633-----------------------------------------|" - - "L0.2646[1602,1761] 178ns |----------------------------------------L0.2646-----------------------------------------|" - - "L0.2659[1602,1761] 179ns |----------------------------------------L0.2659-----------------------------------------|" - - "L0.2672[1602,1761] 180ns |----------------------------------------L0.2672-----------------------------------------|" - - "L0.2685[1602,1761] 181ns |----------------------------------------L0.2685-----------------------------------------|" - - "L0.2698[1602,1761] 182ns |----------------------------------------L0.2698-----------------------------------------|" - - "L0.2711[1602,1761] 183ns |----------------------------------------L0.2711-----------------------------------------|" - - "L0.2724[1602,1761] 184ns |----------------------------------------L0.2724-----------------------------------------|" - - "L0.2737[1602,1761] 185ns |----------------------------------------L0.2737-----------------------------------------|" - - "L0.2750[1602,1761] 186ns |----------------------------------------L0.2750-----------------------------------------|" - - "L0.2763[1602,1761] 187ns |----------------------------------------L0.2763-----------------------------------------|" - - "L0.2776[1602,1761] 188ns |----------------------------------------L0.2776-----------------------------------------|" - - "L0.2789[1602,1761] 189ns |----------------------------------------L0.2789-----------------------------------------|" - - "L0.2802[1602,1761] 190ns |----------------------------------------L0.2802-----------------------------------------|" - - "L0.2815[1602,1761] 191ns |----------------------------------------L0.2815-----------------------------------------|" + - "L0.2105[2001,2086] 140ns |----------------------------------------L0.2105-----------------------------------------|" + - "L0.2119[2001,2086] 141ns |----------------------------------------L0.2119-----------------------------------------|" + - "L0.2133[2001,2086] 142ns |----------------------------------------L0.2133-----------------------------------------|" + - "L0.2147[2001,2086] 143ns |----------------------------------------L0.2147-----------------------------------------|" + - "L0.2189[2001,2086] 144ns |----------------------------------------L0.2189-----------------------------------------|" + - "L0.2203[2001,2086] 145ns |----------------------------------------L0.2203-----------------------------------------|" + - "L0.2217[2001,2086] 146ns |----------------------------------------L0.2217-----------------------------------------|" + - "L0.2231[2001,2086] 147ns |----------------------------------------L0.2231-----------------------------------------|" + - "L0.2245[2001,2086] 148ns |----------------------------------------L0.2245-----------------------------------------|" + - "L0.2259[2001,2086] 149ns |----------------------------------------L0.2259-----------------------------------------|" + - "L0.2273[2001,2086] 150ns |----------------------------------------L0.2273-----------------------------------------|" + - "L0.2287[2001,2086] 151ns |----------------------------------------L0.2287-----------------------------------------|" + - "L0.2301[2001,2086] 152ns |----------------------------------------L0.2301-----------------------------------------|" + - "L0.2315[2001,2086] 153ns |----------------------------------------L0.2315-----------------------------------------|" + - "L0.2329[2001,2086] 154ns |----------------------------------------L0.2329-----------------------------------------|" + - "L0.2343[2001,2086] 155ns |----------------------------------------L0.2343-----------------------------------------|" + - "L0.2357[2001,2086] 156ns |----------------------------------------L0.2357-----------------------------------------|" + - "L0.2371[2001,2086] 157ns |----------------------------------------L0.2371-----------------------------------------|" + - "L0.2385[2001,2086] 158ns |----------------------------------------L0.2385-----------------------------------------|" + - "L0.2399[2001,2086] 159ns |----------------------------------------L0.2399-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1602,1761] 191ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[2001,2086] 159ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2581, L0.2594, L0.2607, L0.2620, L0.2633, L0.2646, L0.2659, L0.2672, L0.2685, L0.2698, L0.2711, L0.2724, L0.2737, L0.2750, L0.2763, L0.2776, L0.2789, L0.2802, L0.2815, L0.3044" + - " Soft Deleting 20 files: L0.2105, L0.2119, L0.2133, L0.2147, L0.2189, L0.2203, L0.2217, L0.2231, L0.2245, L0.2259, L0.2273, L0.2287, L0.2301, L0.2315, L0.2329, L0.2343, L0.2357, L0.2371, L0.2385, L0.2399" - " Creating 1 files" - - "**** Simulation run 336, type=compact(ManySmallFiles). 20 Input Files, 0b total:" + - "**** Simulation run 326, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3047[1762,2000] 172ns |----------------------------------------L0.3047-----------------------------------------|" - - "L0.2582[1762,2000] 173ns |----------------------------------------L0.2582-----------------------------------------|" - - "L0.2595[1762,2000] 174ns |----------------------------------------L0.2595-----------------------------------------|" - - "L0.2608[1762,2000] 175ns |----------------------------------------L0.2608-----------------------------------------|" - - "L0.2621[1762,2000] 176ns |----------------------------------------L0.2621-----------------------------------------|" - - "L0.2634[1762,2000] 177ns |----------------------------------------L0.2634-----------------------------------------|" - - "L0.2647[1762,2000] 178ns |----------------------------------------L0.2647-----------------------------------------|" - - "L0.2660[1762,2000] 179ns |----------------------------------------L0.2660-----------------------------------------|" - - "L0.2673[1762,2000] 180ns |----------------------------------------L0.2673-----------------------------------------|" - - "L0.2686[1762,2000] 181ns |----------------------------------------L0.2686-----------------------------------------|" - - "L0.2699[1762,2000] 182ns |----------------------------------------L0.2699-----------------------------------------|" - - "L0.2712[1762,2000] 183ns |----------------------------------------L0.2712-----------------------------------------|" - - "L0.2725[1762,2000] 184ns |----------------------------------------L0.2725-----------------------------------------|" - - "L0.2738[1762,2000] 185ns |----------------------------------------L0.2738-----------------------------------------|" - - "L0.2751[1762,2000] 186ns |----------------------------------------L0.2751-----------------------------------------|" - - "L0.2764[1762,2000] 187ns |----------------------------------------L0.2764-----------------------------------------|" - - "L0.2777[1762,2000] 188ns |----------------------------------------L0.2777-----------------------------------------|" - - "L0.2790[1762,2000] 189ns |----------------------------------------L0.2790-----------------------------------------|" - - "L0.2803[1762,2000] 190ns |----------------------------------------L0.2803-----------------------------------------|" - - "L0.2816[1762,2000] 191ns |----------------------------------------L0.2816-----------------------------------------|" + - "L0.2413[2001,2086] 160ns |----------------------------------------L0.2413-----------------------------------------|" + - "L0.2427[2001,2086] 161ns |----------------------------------------L0.2427-----------------------------------------|" + - "L0.2440[2001,2086] 162ns |----------------------------------------L0.2440-----------------------------------------|" + - "L0.2453[2001,2086] 163ns |----------------------------------------L0.2453-----------------------------------------|" + - "L0.2466[2001,2086] 164ns |----------------------------------------L0.2466-----------------------------------------|" + - "L0.2479[2001,2086] 165ns |----------------------------------------L0.2479-----------------------------------------|" + - "L0.2492[2001,2086] 166ns |----------------------------------------L0.2492-----------------------------------------|" + - "L0.2505[2001,2086] 167ns |----------------------------------------L0.2505-----------------------------------------|" + - "L0.2518[2001,2086] 168ns |----------------------------------------L0.2518-----------------------------------------|" + - "L0.2531[2001,2086] 169ns |----------------------------------------L0.2531-----------------------------------------|" + - "L0.2544[2001,2086] 170ns |----------------------------------------L0.2544-----------------------------------------|" + - "L0.2557[2001,2086] 171ns |----------------------------------------L0.2557-----------------------------------------|" + - "L0.2570[2001,2086] 172ns |----------------------------------------L0.2570-----------------------------------------|" + - "L0.2583[2001,2086] 173ns |----------------------------------------L0.2583-----------------------------------------|" + - "L0.2596[2001,2086] 174ns |----------------------------------------L0.2596-----------------------------------------|" + - "L0.2609[2001,2086] 175ns |----------------------------------------L0.2609-----------------------------------------|" + - "L0.2622[2001,2086] 176ns |----------------------------------------L0.2622-----------------------------------------|" + - "L0.2635[2001,2086] 177ns |----------------------------------------L0.2635-----------------------------------------|" + - "L0.2648[2001,2086] 178ns |----------------------------------------L0.2648-----------------------------------------|" + - "L0.2661[2001,2086] 179ns |----------------------------------------L0.2661-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1762,2000] 191ns |------------------------------------------L0.?------------------------------------------|" + - "L0.?[2001,2086] 179ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 20 files: L0.2582, L0.2595, L0.2608, L0.2621, L0.2634, L0.2647, L0.2660, L0.2673, L0.2686, L0.2699, L0.2712, L0.2725, L0.2738, L0.2751, L0.2764, L0.2777, L0.2790, L0.2803, L0.2816, L0.3047" + - " Soft Deleting 20 files: L0.2413, L0.2427, L0.2440, L0.2453, L0.2466, L0.2479, L0.2492, L0.2505, L0.2518, L0.2531, L0.2544, L0.2557, L0.2570, L0.2583, L0.2596, L0.2609, L0.2622, L0.2635, L0.2648, L0.2661" - " Creating 1 files" - - "**** Simulation run 337, type=compact(TotalSizeLessThanMaxCompactSize). 9 Input Files, 0b total:" + - "**** Simulation run 327, type=compact(ManySmallFiles). 20 Input Files, 0b total:" - "L0, all files 0b " - - "L0.2921[2001,2086] 199ns |----------------------------------------L0.2921-----------------------------------------|" - - "L0.2908[2001,2086] 198ns |----------------------------------------L0.2908-----------------------------------------|" - - "L0.2895[2001,2086] 197ns |----------------------------------------L0.2895-----------------------------------------|" - - "L0.2882[2001,2086] 196ns |----------------------------------------L0.2882-----------------------------------------|" - - "L0.2869[2001,2086] 195ns |----------------------------------------L0.2869-----------------------------------------|" - - "L0.2856[2001,2086] 194ns |----------------------------------------L0.2856-----------------------------------------|" - - "L0.2843[2001,2086] 193ns |----------------------------------------L0.2843-----------------------------------------|" + - "L0.2674[2001,2086] 180ns |----------------------------------------L0.2674-----------------------------------------|" + - "L0.2687[2001,2086] 181ns |----------------------------------------L0.2687-----------------------------------------|" + - "L0.2700[2001,2086] 182ns |----------------------------------------L0.2700-----------------------------------------|" + - "L0.2713[2001,2086] 183ns |----------------------------------------L0.2713-----------------------------------------|" + - "L0.2726[2001,2086] 184ns |----------------------------------------L0.2726-----------------------------------------|" + - "L0.2739[2001,2086] 185ns |----------------------------------------L0.2739-----------------------------------------|" + - "L0.2752[2001,2086] 186ns |----------------------------------------L0.2752-----------------------------------------|" + - "L0.2765[2001,2086] 187ns |----------------------------------------L0.2765-----------------------------------------|" + - "L0.2778[2001,2086] 188ns |----------------------------------------L0.2778-----------------------------------------|" + - "L0.2791[2001,2086] 189ns |----------------------------------------L0.2791-----------------------------------------|" + - "L0.2804[2001,2086] 190ns |----------------------------------------L0.2804-----------------------------------------|" + - "L0.2817[2001,2086] 191ns |----------------------------------------L0.2817-----------------------------------------|" - "L0.2830[2001,2086] 192ns |----------------------------------------L0.2830-----------------------------------------|" - - "L0.3048[2001,2086] 191ns |----------------------------------------L0.3048-----------------------------------------|" + - "L0.2843[2001,2086] 193ns |----------------------------------------L0.2843-----------------------------------------|" + - "L0.2856[2001,2086] 194ns |----------------------------------------L0.2856-----------------------------------------|" + - "L0.2869[2001,2086] 195ns |----------------------------------------L0.2869-----------------------------------------|" + - "L0.2882[2001,2086] 196ns |----------------------------------------L0.2882-----------------------------------------|" + - "L0.2895[2001,2086] 197ns |----------------------------------------L0.2895-----------------------------------------|" + - "L0.2908[2001,2086] 198ns |----------------------------------------L0.2908-----------------------------------------|" + - "L0.2921[2001,2086] 199ns |----------------------------------------L0.2921-----------------------------------------|" - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L1, all files 0b " - - "L1.?[2001,2086] 199ns |------------------------------------------L1.?------------------------------------------|" + - "L0, all files 0b " + - "L0.?[2001,2086] 199ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 9 files: L0.2830, L0.2843, L0.2856, L0.2869, L0.2882, L0.2895, L0.2908, L0.2921, L0.3048" + - " Soft Deleting 20 files: L0.2674, L0.2687, L0.2700, L0.2713, L0.2726, L0.2739, L0.2752, L0.2765, L0.2778, L0.2791, L0.2804, L0.2817, L0.2830, L0.2843, L0.2856, L0.2869, L0.2882, L0.2895, L0.2908, L0.2921" - " Creating 1 files" - - "**** Simulation run 338, type=compact(TotalSizeLessThanMaxCompactSize). 9 Input Files, 2kb total:" - - "L0 " - - "L0.2922[2087,1990000] 199ns 10b|----------------------------------------L0.2922-----------------------------------------|" - - "L0.2909[2087,1980000] 198ns 10b|----------------------------------------L0.2909----------------------------------------| " - - "L0.2896[2087,1970000] 197ns 10b|----------------------------------------L0.2896----------------------------------------| " - - "L0.2883[2087,1960000] 196ns 10b|---------------------------------------L0.2883----------------------------------------| " - - "L0.2870[2087,1950000] 195ns 10b|---------------------------------------L0.2870----------------------------------------| " - - "L0.2857[2087,1940000] 194ns 10b|---------------------------------------L0.2857---------------------------------------| " - - "L0.2844[2087,1930000] 193ns 10b|---------------------------------------L0.2844---------------------------------------| " - - "L0.2831[2087,1920000] 192ns 10b|--------------------------------------L0.2831---------------------------------------| " - - "L0.3049[2087,1910000] 191ns 2kb|--------------------------------------L0.3049---------------------------------------| " - - "**** 1 Output Files (parquet_file_id not yet assigned), 2kb total:" - - "L1, all files 2kb " - - "L1.?[2087,1990000] 199ns |------------------------------------------L1.?------------------------------------------|" + - "**** Simulation run 328, type=compact(ManySmallFiles). 20 Input Files, 200b total:" + - "L0, all files 10b " + - "L0.430[2087,200000] 20ns |------------------L0.430-------------------| " + - "L0.444[2087,210000] 21ns |--------------------L0.444--------------------| " + - "L0.458[2087,220000] 22ns |---------------------L0.458---------------------| " + - "L0.472[2087,230000] 23ns |----------------------L0.472----------------------| " + - "L0.486[2087,240000] 24ns |-----------------------L0.486------------------------| " + - "L0.500[2087,250000] 25ns |------------------------L0.500-------------------------| " + - "L0.514[2087,260000] 26ns |-------------------------L0.514--------------------------| " + - "L0.528[2087,270000] 27ns |---------------------------L0.528---------------------------| " + - "L0.566[2087,280000] 28ns |----------------------------L0.566----------------------------| " + - "L0.580[2087,290000] 29ns |-----------------------------L0.580-----------------------------| " + - "L0.594[2087,300000] 30ns |------------------------------L0.594-------------------------------| " + - "L0.608[2087,310000] 31ns |-------------------------------L0.608--------------------------------| " + - "L0.622[2087,320000] 32ns |--------------------------------L0.622---------------------------------| " + - "L0.636[2087,330000] 33ns |----------------------------------L0.636----------------------------------| " + - "L0.650[2087,340000] 34ns |-----------------------------------L0.650-----------------------------------| " + - "L0.664[2087,350000] 35ns |------------------------------------L0.664------------------------------------| " + - "L0.678[2087,360000] 36ns |-------------------------------------L0.678--------------------------------------| " + - "L0.692[2087,370000] 37ns |--------------------------------------L0.692---------------------------------------| " + - "L0.706[2087,380000] 38ns |---------------------------------------L0.706----------------------------------------| " + - "L0.720[2087,390000] 39ns |-----------------------------------------L0.720-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 200b total:" + - "L0, all files 200b " + - "L0.?[2087,390000] 39ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.430, L0.444, L0.458, L0.472, L0.486, L0.500, L0.514, L0.528, L0.566, L0.580, L0.594, L0.608, L0.622, L0.636, L0.650, L0.664, L0.678, L0.692, L0.706, L0.720" + - " Creating 1 files" + - "**** Simulation run 329, type=compact(ManySmallFiles). 20 Input Files, 200b total:" + - "L0, all files 10b " + - "L0.734[2087,400000] 40ns |--------------------------L0.734--------------------------| " + - "L0.748[2087,410000] 41ns |---------------------------L0.748---------------------------| " + - "L0.762[2087,420000] 42ns |---------------------------L0.762----------------------------| " + - "L0.776[2087,430000] 43ns |----------------------------L0.776-----------------------------| " + - "L0.790[2087,440000] 44ns |-----------------------------L0.790------------------------------| " + - "L0.804[2087,450000] 45ns |------------------------------L0.804------------------------------| " + - "L0.818[2087,460000] 46ns |-------------------------------L0.818-------------------------------| " + - "L0.832[2087,470000] 47ns |-------------------------------L0.832--------------------------------| " + - "L0.846[2087,480000] 48ns |--------------------------------L0.846---------------------------------| " + - "L0.860[2087,490000] 49ns |---------------------------------L0.860---------------------------------| " + - "L0.874[2087,500000] 50ns |----------------------------------L0.874----------------------------------| " + - "L0.888[2087,510000] 51ns |----------------------------------L0.888-----------------------------------| " + - "L0.902[2087,520000] 52ns |-----------------------------------L0.902------------------------------------| " + - "L0.916[2087,530000] 53ns |------------------------------------L0.916------------------------------------| " + - "L0.930[2087,540000] 54ns |-------------------------------------L0.930-------------------------------------| " + - "L0.944[2087,550000] 55ns |-------------------------------------L0.944--------------------------------------| " + - "L0.958[2087,560000] 56ns |--------------------------------------L0.958---------------------------------------| " + - "L0.972[2087,570000] 57ns |---------------------------------------L0.972---------------------------------------| " + - "L0.986[2087,580000] 58ns |----------------------------------------L0.986----------------------------------------| " + - "L0.1000[2087,590000] 59ns|----------------------------------------L0.1000-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 200b total:" + - "L0, all files 200b " + - "L0.?[2087,590000] 59ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.734, L0.748, L0.762, L0.776, L0.790, L0.804, L0.818, L0.832, L0.846, L0.860, L0.874, L0.888, L0.902, L0.916, L0.930, L0.944, L0.958, L0.972, L0.986, L0.1000" + - " Creating 1 files" + - "**** Simulation run 330, type=compact(ManySmallFiles). 20 Input Files, 200b total:" + - "L0, all files 10b " + - "L0.1014[2087,600000] 60ns|-----------------------------L0.1014------------------------------| " + - "L0.1028[2087,610000] 61ns|------------------------------L0.1028------------------------------| " + - "L0.1042[2087,620000] 62ns|------------------------------L0.1042-------------------------------| " + - "L0.1056[2087,630000] 63ns|-------------------------------L0.1056-------------------------------| " + - "L0.1070[2087,640000] 64ns|-------------------------------L0.1070--------------------------------| " + - "L0.1084[2087,650000] 65ns|--------------------------------L0.1084---------------------------------| " + - "L0.1098[2087,660000] 66ns|---------------------------------L0.1098---------------------------------| " + - "L0.1112[2087,670000] 67ns|---------------------------------L0.1112----------------------------------| " + - "L0.1126[2087,680000] 68ns|----------------------------------L0.1126----------------------------------| " + - "L0.1140[2087,690000] 69ns|----------------------------------L0.1140-----------------------------------| " + - "L0.1154[2087,700000] 70ns|-----------------------------------L0.1154-----------------------------------| " + - "L0.1168[2087,710000] 71ns|-----------------------------------L0.1168------------------------------------| " + - "L0.1182[2087,720000] 72ns|------------------------------------L0.1182-------------------------------------| " + - "L0.1196[2087,730000] 73ns|-------------------------------------L0.1196-------------------------------------| " + - "L0.1210[2087,740000] 74ns|-------------------------------------L0.1210--------------------------------------| " + - "L0.1224[2087,750000] 75ns|--------------------------------------L0.1224--------------------------------------| " + - "L0.1238[2087,760000] 76ns|--------------------------------------L0.1238---------------------------------------| " + - "L0.1252[2087,770000] 77ns|---------------------------------------L0.1252---------------------------------------| " + - "L0.1266[2087,780000] 78ns|---------------------------------------L0.1266----------------------------------------| " + - "L0.1280[2087,790000] 79ns|----------------------------------------L0.1280-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 200b total:" + - "L0, all files 200b " + - "L0.?[2087,790000] 79ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1014, L0.1028, L0.1042, L0.1056, L0.1070, L0.1084, L0.1098, L0.1112, L0.1126, L0.1140, L0.1154, L0.1168, L0.1182, L0.1196, L0.1210, L0.1224, L0.1238, L0.1252, L0.1266, L0.1280" + - " Creating 1 files" + - "**** Simulation run 331, type=compact(ManySmallFiles). 20 Input Files, 200b total:" + - "L0, all files 10b " + - "L0.1294[2087,800000] 80ns|-------------------------------L0.1294--------------------------------| " + - "L0.1308[2087,810000] 81ns|--------------------------------L0.1308--------------------------------| " + - "L0.1322[2087,820000] 82ns|--------------------------------L0.1322---------------------------------| " + - "L0.1336[2087,830000] 83ns|---------------------------------L0.1336---------------------------------| " + - "L0.1350[2087,840000] 84ns|---------------------------------L0.1350----------------------------------| " + - "L0.1364[2087,850000] 85ns|----------------------------------L0.1364----------------------------------| " + - "L0.1378[2087,860000] 86ns|----------------------------------L0.1378-----------------------------------| " + - "L0.1392[2087,870000] 87ns|-----------------------------------L0.1392-----------------------------------| " + - "L0.1406[2087,880000] 88ns|-----------------------------------L0.1406-----------------------------------| " + - "L0.1420[2087,890000] 89ns|-----------------------------------L0.1420------------------------------------| " + - "L0.1434[2087,900000] 90ns|------------------------------------L0.1434------------------------------------| " + - "L0.1448[2087,910000] 91ns|------------------------------------L0.1448-------------------------------------| " + - "L0.1462[2087,920000] 92ns|-------------------------------------L0.1462-------------------------------------| " + - "L0.1476[2087,930000] 93ns|-------------------------------------L0.1476--------------------------------------| " + - "L0.1490[2087,940000] 94ns|--------------------------------------L0.1490--------------------------------------| " + - "L0.1504[2087,950000] 95ns|--------------------------------------L0.1504---------------------------------------| " + - "L0.1518[2087,960000] 96ns|---------------------------------------L0.1518---------------------------------------| " + - "L0.1532[2087,970000] 97ns|---------------------------------------L0.1532----------------------------------------| " + - "L0.1546[2087,980000] 98ns|----------------------------------------L0.1546----------------------------------------| " + - "L0.1560[2087,990000] 99ns|----------------------------------------L0.1560-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 200b total:" + - "L0, all files 200b " + - "L0.?[2087,990000] 99ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1294, L0.1308, L0.1322, L0.1336, L0.1350, L0.1364, L0.1378, L0.1392, L0.1406, L0.1420, L0.1434, L0.1448, L0.1462, L0.1476, L0.1490, L0.1504, L0.1518, L0.1532, L0.1546, L0.1560" + - " Creating 1 files" + - "**** Simulation run 332, type=compact(ManySmallFiles). 20 Input Files, 200b total:" + - "L0, all files 10b " + - "L0.1574[2087,1000000] 100ns|---------------------------------L0.1574---------------------------------| " + - "L0.1588[2087,1010000] 101ns|---------------------------------L0.1588----------------------------------| " + - "L0.1602[2087,1020000] 102ns|----------------------------------L0.1602----------------------------------| " + - "L0.1616[2087,1030000] 103ns|----------------------------------L0.1616----------------------------------| " + - "L0.1630[2087,1040000] 104ns|----------------------------------L0.1630-----------------------------------| " + - "L0.1644[2087,1050000] 105ns|-----------------------------------L0.1644-----------------------------------| " + - "L0.1658[2087,1060000] 106ns|-----------------------------------L0.1658------------------------------------| " + - "L0.1672[2087,1070000] 107ns|-----------------------------------L0.1672------------------------------------| " + - "L0.1686[2087,1080000] 108ns|------------------------------------L0.1686------------------------------------| " + - "L0.1700[2087,1090000] 109ns|------------------------------------L0.1700-------------------------------------| " + - "L0.1714[2087,1100000] 110ns|-------------------------------------L0.1714-------------------------------------| " + - "L0.1728[2087,1110000] 111ns|-------------------------------------L0.1728-------------------------------------| " + - "L0.1742[2087,1120000] 112ns|-------------------------------------L0.1742--------------------------------------| " + - "L0.1756[2087,1130000] 113ns|--------------------------------------L0.1756--------------------------------------| " + - "L0.1770[2087,1140000] 114ns|--------------------------------------L0.1770---------------------------------------| " + - "L0.1784[2087,1150000] 115ns|--------------------------------------L0.1784---------------------------------------| " + - "L0.1798[2087,1160000] 116ns|---------------------------------------L0.1798---------------------------------------| " + - "L0.1812[2087,1170000] 117ns|---------------------------------------L0.1812----------------------------------------| " + - "L0.1826[2087,1180000] 118ns|----------------------------------------L0.1826----------------------------------------| " + - "L0.1840[2087,1190000] 119ns|----------------------------------------L0.1840-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 200b total:" + - "L0, all files 200b " + - "L0.?[2087,1190000] 119ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1574, L0.1588, L0.1602, L0.1616, L0.1630, L0.1644, L0.1658, L0.1672, L0.1686, L0.1700, L0.1714, L0.1728, L0.1742, L0.1756, L0.1770, L0.1784, L0.1798, L0.1812, L0.1826, L0.1840" + - " Creating 1 files" + - "**** Simulation run 333, type=compact(ManySmallFiles). 20 Input Files, 200b total:" + - "L0, all files 10b " + - "L0.1854[2087,1200000] 120ns|----------------------------------L0.1854----------------------------------| " + - "L0.1868[2087,1210000] 121ns|----------------------------------L0.1868-----------------------------------| " + - "L0.1882[2087,1220000] 122ns|----------------------------------L0.1882-----------------------------------| " + - "L0.1896[2087,1230000] 123ns|-----------------------------------L0.1896-----------------------------------| " + - "L0.1910[2087,1240000] 124ns|-----------------------------------L0.1910------------------------------------| " + - "L0.1924[2087,1250000] 125ns|-----------------------------------L0.1924------------------------------------| " + - "L0.1938[2087,1260000] 126ns|------------------------------------L0.1938------------------------------------| " + - "L0.1952[2087,1270000] 127ns|------------------------------------L0.1952-------------------------------------| " + - "L0.1966[2087,1280000] 128ns|------------------------------------L0.1966-------------------------------------| " + - "L0.1980[2087,1290000] 129ns|-------------------------------------L0.1980-------------------------------------| " + - "L0.1994[2087,1300000] 130ns|-------------------------------------L0.1994--------------------------------------| " + - "L0.2008[2087,1310000] 131ns|-------------------------------------L0.2008--------------------------------------| " + - "L0.2022[2087,1320000] 132ns|--------------------------------------L0.2022--------------------------------------| " + - "L0.2036[2087,1330000] 133ns|--------------------------------------L0.2036---------------------------------------| " + - "L0.2162[2087,1340000] 134ns|--------------------------------------L0.2162---------------------------------------| " + - "L0.2176[2087,1350000] 135ns|---------------------------------------L0.2176---------------------------------------| " + - "L0.2050[2087,1360000] 136ns|---------------------------------------L0.2050----------------------------------------| " + - "L0.2064[2087,1370000] 137ns|---------------------------------------L0.2064----------------------------------------| " + - "L0.2078[2087,1380000] 138ns|----------------------------------------L0.2078----------------------------------------| " + - "L0.2092[2087,1390000] 139ns|----------------------------------------L0.2092-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 200b total:" + - "L0, all files 200b " + - "L0.?[2087,1390000] 139ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.1854, L0.1868, L0.1882, L0.1896, L0.1910, L0.1924, L0.1938, L0.1952, L0.1966, L0.1980, L0.1994, L0.2008, L0.2022, L0.2036, L0.2050, L0.2064, L0.2078, L0.2092, L0.2162, L0.2176" + - " Creating 1 files" + - "**** Simulation run 334, type=compact(ManySmallFiles). 20 Input Files, 200b total:" + - "L0, all files 10b " + - "L0.2106[2087,1400000] 140ns|-----------------------------------L0.2106-----------------------------------| " + - "L0.2120[2087,1410000] 141ns|-----------------------------------L0.2120-----------------------------------| " + - "L0.2134[2087,1420000] 142ns|-----------------------------------L0.2134------------------------------------| " + - "L0.2148[2087,1430000] 143ns|-----------------------------------L0.2148------------------------------------| " + - "L0.2190[2087,1440000] 144ns|------------------------------------L0.2190------------------------------------| " + - "L0.2204[2087,1450000] 145ns|------------------------------------L0.2204-------------------------------------| " + - "L0.2218[2087,1460000] 146ns|------------------------------------L0.2218-------------------------------------| " + - "L0.2232[2087,1470000] 147ns|-------------------------------------L0.2232-------------------------------------| " + - "L0.2246[2087,1480000] 148ns|-------------------------------------L0.2246-------------------------------------| " + - "L0.2260[2087,1490000] 149ns|-------------------------------------L0.2260--------------------------------------| " + - "L0.2274[2087,1500000] 150ns|-------------------------------------L0.2274--------------------------------------| " + - "L0.2288[2087,1510000] 151ns|--------------------------------------L0.2288--------------------------------------| " + - "L0.2302[2087,1520000] 152ns|--------------------------------------L0.2302---------------------------------------| " + - "L0.2316[2087,1530000] 153ns|--------------------------------------L0.2316---------------------------------------| " + - "L0.2330[2087,1540000] 154ns|---------------------------------------L0.2330---------------------------------------| " + - "L0.2344[2087,1550000] 155ns|---------------------------------------L0.2344---------------------------------------| " + - "L0.2358[2087,1560000] 156ns|---------------------------------------L0.2358----------------------------------------| " + - "L0.2372[2087,1570000] 157ns|---------------------------------------L0.2372----------------------------------------| " + - "L0.2386[2087,1580000] 158ns|----------------------------------------L0.2386----------------------------------------| " + - "L0.2400[2087,1590000] 159ns|----------------------------------------L0.2400-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 200b total:" + - "L0, all files 200b " + - "L0.?[2087,1590000] 159ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.2106, L0.2120, L0.2134, L0.2148, L0.2190, L0.2204, L0.2218, L0.2232, L0.2246, L0.2260, L0.2274, L0.2288, L0.2302, L0.2316, L0.2330, L0.2344, L0.2358, L0.2372, L0.2386, L0.2400" + - " Creating 1 files" + - "**** Simulation run 335, type=compact(ManySmallFiles). 20 Input Files, 200b total:" + - "L0, all files 10b " + - "L0.2414[2087,1600000] 160ns|-----------------------------------L0.2414------------------------------------| " + - "L0.2428[2087,1610000] 161ns|-----------------------------------L0.2428------------------------------------| " + - "L0.2441[2087,1620000] 162ns|------------------------------------L0.2441------------------------------------| " + - "L0.2454[2087,1630000] 163ns|------------------------------------L0.2454------------------------------------| " + - "L0.2467[2087,1640000] 164ns|------------------------------------L0.2467-------------------------------------| " + - "L0.2480[2087,1650000] 165ns|------------------------------------L0.2480-------------------------------------| " + - "L0.2493[2087,1660000] 166ns|-------------------------------------L0.2493-------------------------------------| " + - "L0.2506[2087,1670000] 167ns|-------------------------------------L0.2506-------------------------------------| " + - "L0.2519[2087,1680000] 168ns|-------------------------------------L0.2519--------------------------------------| " + - "L0.2532[2087,1690000] 169ns|-------------------------------------L0.2532--------------------------------------| " + - "L0.2545[2087,1700000] 170ns|--------------------------------------L0.2545--------------------------------------| " + - "L0.2558[2087,1710000] 171ns|--------------------------------------L0.2558--------------------------------------| " + - "L0.2571[2087,1720000] 172ns|--------------------------------------L0.2571---------------------------------------| " + - "L0.2584[2087,1730000] 173ns|--------------------------------------L0.2584---------------------------------------| " + - "L0.2597[2087,1740000] 174ns|---------------------------------------L0.2597---------------------------------------| " + - "L0.2610[2087,1750000] 175ns|---------------------------------------L0.2610---------------------------------------| " + - "L0.2623[2087,1760000] 176ns|---------------------------------------L0.2623----------------------------------------| " + - "L0.2636[2087,1770000] 177ns|---------------------------------------L0.2636----------------------------------------| " + - "L0.2649[2087,1780000] 178ns|----------------------------------------L0.2649----------------------------------------| " + - "L0.2662[2087,1790000] 179ns|----------------------------------------L0.2662-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 200b total:" + - "L0, all files 200b " + - "L0.?[2087,1790000] 179ns |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 20 files: L0.2414, L0.2428, L0.2441, L0.2454, L0.2467, L0.2480, L0.2493, L0.2506, L0.2519, L0.2532, L0.2545, L0.2558, L0.2571, L0.2584, L0.2597, L0.2610, L0.2623, L0.2636, L0.2649, L0.2662" + - " Creating 1 files" + - "**** Simulation run 336, type=compact(ManySmallFiles). 20 Input Files, 200b total:" + - "L0, all files 10b " + - "L0.2675[2087,1800000] 180ns|------------------------------------L0.2675------------------------------------| " + - "L0.2688[2087,1810000] 181ns|------------------------------------L0.2688------------------------------------| " + - "L0.2701[2087,1820000] 182ns|------------------------------------L0.2701-------------------------------------| " + - "L0.2714[2087,1830000] 183ns|------------------------------------L0.2714-------------------------------------| " + - "L0.2727[2087,1840000] 184ns|-------------------------------------L0.2727-------------------------------------| " + - "L0.2740[2087,1850000] 185ns|-------------------------------------L0.2740-------------------------------------| " + - "L0.2753[2087,1860000] 186ns|-------------------------------------L0.2753--------------------------------------| " + - "L0.2766[2087,1870000] 187ns|-------------------------------------L0.2766--------------------------------------| " + - "L0.2779[2087,1880000] 188ns|--------------------------------------L0.2779--------------------------------------| " + - "L0.2792[2087,1890000] 189ns|--------------------------------------L0.2792--------------------------------------| " + - "L0.2805[2087,1900000] 190ns|--------------------------------------L0.2805--------------------------------------| " + - "L0.2818[2087,1910000] 191ns|--------------------------------------L0.2818---------------------------------------| " + - "L0.2831[2087,1920000] 192ns|--------------------------------------L0.2831---------------------------------------| " + - "L0.2844[2087,1930000] 193ns|---------------------------------------L0.2844---------------------------------------| " + - "L0.2857[2087,1940000] 194ns|---------------------------------------L0.2857---------------------------------------| " + - "L0.2870[2087,1950000] 195ns|---------------------------------------L0.2870----------------------------------------| " + - "L0.2883[2087,1960000] 196ns|---------------------------------------L0.2883----------------------------------------| " + - "L0.2896[2087,1970000] 197ns|----------------------------------------L0.2896----------------------------------------| " + - "L0.2909[2087,1980000] 198ns|----------------------------------------L0.2909----------------------------------------| " + - "L0.2922[2087,1990000] 199ns|----------------------------------------L0.2922-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 200b total:" + - "L0, all files 200b " + - "L0.?[2087,1990000] 199ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 9 files: L0.2831, L0.2844, L0.2857, L0.2870, L0.2883, L0.2896, L0.2909, L0.2922, L0.3049" + - " Soft Deleting 20 files: L0.2675, L0.2688, L0.2701, L0.2714, L0.2727, L0.2740, L0.2753, L0.2766, L0.2779, L0.2792, L0.2805, L0.2818, L0.2831, L0.2844, L0.2857, L0.2870, L0.2883, L0.2896, L0.2909, L0.2922" - " Creating 1 files" - - "**** Simulation run 339, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[262]). 10 Input Files, 160mb total:" + - "**** Simulation run 337, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[101]). 9 Input Files, 161mb total:" - "L0 " - - "L0.2910[199,321] 199ns 0b |------------------------------L0.2910------------------------------| " - - "L0.2897[198,321] 198ns 0b |------------------------------L0.2897------------------------------| " - - "L0.2884[197,321] 197ns 0b |------------------------------L0.2884-------------------------------| " - - "L0.2871[196,321] 196ns 0b |------------------------------L0.2871-------------------------------| " - - "L0.2858[195,321] 195ns 0b |-------------------------------L0.2858-------------------------------| " - - "L0.2845[194,321] 194ns 0b |-------------------------------L0.2845-------------------------------| " - - "L0.2832[193,321] 193ns 0b |-------------------------------L0.2832--------------------------------| " - - "L0.2819[192,321] 192ns 0b |--------------------------------L0.2819--------------------------------| " - - "L0.2924[162,321] 19ns 160mb|----------------------------------------L0.2924-----------------------------------------|" - - "L0.3050[162,321] 191ns 0b|----------------------------------------L0.3050-----------------------------------------|" + - "L0.2931[160,161] 161ns 0b |L0.2931|" + - "L0.2930[140,161] 159ns 0b |-L0.2930-| " + - "L0.2929[120,161] 139ns 0b |-------L0.2929-------| " + - "L0.2928[100,161] 119ns 0b |------------L0.2928-------------| " + - "L0.2927[80,161] 99ns 0b |------------------L0.2927------------------| " + - "L0.2926[60,161] 79ns 0b |-----------------------L0.2926------------------------| " + - "L0.2925[40,161] 59ns 0b |-----------------------------L0.2925------------------------------| " + - "L0.2924[20,161] 39ns 0b |-----------------------------------L0.2924-----------------------------------| " + - "L0.2923[1,161] 19ns 161mb|----------------------------------------L0.2923-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 161mb total:" + - "L1 " + - "L1.?[1,101] 161ns 101mb |-------------------------L1.?-------------------------| " + - "L1.?[102,161] 161ns 60mb |-------------L1.?--------------| " + - "Committing partition 1:" + - " Soft Deleting 9 files: L0.2923, L0.2924, L0.2925, L0.2926, L0.2927, L0.2928, L0.2929, L0.2930, L0.2931" + - " Creating 2 files" + - "**** Simulation run 338, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[262]). 10 Input Files, 160mb total:" + - "L0 " + - "L0.2941[162,321] 59ns 0b |----------------------------------------L0.2941-----------------------------------------|" + - "L0.2933[162,321] 39ns 0b |----------------------------------------L0.2933-----------------------------------------|" + - "L0.2940[180,321] 199ns 0b |-----------------------------------L0.2940-----------------------------------| " + - "L0.2939[162,321] 179ns 0b|----------------------------------------L0.2939-----------------------------------------|" + - "L0.2938[162,321] 159ns 0b|----------------------------------------L0.2938-----------------------------------------|" + - "L0.2937[162,321] 139ns 0b|----------------------------------------L0.2937-----------------------------------------|" + - "L0.2936[162,321] 119ns 0b|----------------------------------------L0.2936-----------------------------------------|" + - "L0.2935[162,321] 99ns 0b |----------------------------------------L0.2935-----------------------------------------|" + - "L0.2934[162,321] 79ns 0b |----------------------------------------L0.2934-----------------------------------------|" + - "L0.2932[162,321] 19ns 160mb|----------------------------------------L0.2932-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 160mb total:" - "L1 " - "L1.?[162,262] 199ns 101mb|-------------------------L1.?-------------------------| " - "L1.?[263,321] 199ns 59mb |-------------L1.?-------------| " - "Committing partition 1:" - - " Soft Deleting 10 files: L0.2819, L0.2832, L0.2845, L0.2858, L0.2871, L0.2884, L0.2897, L0.2910, L0.2924, L0.3050" + - " Soft Deleting 10 files: L0.2932, L0.2933, L0.2934, L0.2935, L0.2936, L0.2937, L0.2938, L0.2939, L0.2940, L0.2941" - " Creating 2 files" - - "**** Simulation run 340, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[422]). 10 Input Files, 160mb total:" - - "L0 " - - "L0.2911[322,481] 199ns 0b|----------------------------------------L0.2911-----------------------------------------|" - - "L0.2898[322,481] 198ns 0b|----------------------------------------L0.2898-----------------------------------------|" - - "L0.2885[322,481] 197ns 0b|----------------------------------------L0.2885-----------------------------------------|" - - "L0.2872[322,481] 196ns 0b|----------------------------------------L0.2872-----------------------------------------|" - - "L0.2859[322,481] 195ns 0b|----------------------------------------L0.2859-----------------------------------------|" - - "L0.2846[322,481] 194ns 0b|----------------------------------------L0.2846-----------------------------------------|" - - "L0.2833[322,481] 193ns 0b|----------------------------------------L0.2833-----------------------------------------|" - - "L0.2820[322,481] 192ns 0b|----------------------------------------L0.2820-----------------------------------------|" - - "L0.2925[322,481] 19ns 160mb|----------------------------------------L0.2925-----------------------------------------|" - - "L0.3051[322,481] 191ns 0b|----------------------------------------L0.3051-----------------------------------------|" + - "**** Simulation run 339, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[422]). 10 Input Files, 160mb total:" + - "L0 " + - "L0.2951[322,481] 199ns 0b|----------------------------------------L0.2951-----------------------------------------|" + - "L0.2950[322,481] 179ns 0b|----------------------------------------L0.2950-----------------------------------------|" + - "L0.2949[322,481] 159ns 0b|----------------------------------------L0.2949-----------------------------------------|" + - "L0.2948[322,481] 139ns 0b|----------------------------------------L0.2948-----------------------------------------|" + - "L0.2947[322,481] 119ns 0b|----------------------------------------L0.2947-----------------------------------------|" + - "L0.2946[322,481] 99ns 0b |----------------------------------------L0.2946-----------------------------------------|" + - "L0.2945[322,481] 79ns 0b |----------------------------------------L0.2945-----------------------------------------|" + - "L0.2944[322,481] 59ns 0b |----------------------------------------L0.2944-----------------------------------------|" + - "L0.2943[322,481] 39ns 0b |----------------------------------------L0.2943-----------------------------------------|" + - "L0.2942[322,481] 19ns 160mb|----------------------------------------L0.2942-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 160mb total:" - "L1 " - "L1.?[322,422] 199ns 101mb|-------------------------L1.?-------------------------| " - "L1.?[423,481] 199ns 59mb |-------------L1.?-------------| " - "Committing partition 1:" - - " Soft Deleting 10 files: L0.2820, L0.2833, L0.2846, L0.2859, L0.2872, L0.2885, L0.2898, L0.2911, L0.2925, L0.3051" + - " Soft Deleting 10 files: L0.2942, L0.2943, L0.2944, L0.2945, L0.2946, L0.2947, L0.2948, L0.2949, L0.2950, L0.2951" - " Creating 2 files" - - "**** Simulation run 341, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[582]). 10 Input Files, 160mb total:" - - "L0 " - - "L0.2912[482,641] 199ns 0b|----------------------------------------L0.2912-----------------------------------------|" - - "L0.2899[482,641] 198ns 0b|----------------------------------------L0.2899-----------------------------------------|" - - "L0.2886[482,641] 197ns 0b|----------------------------------------L0.2886-----------------------------------------|" - - "L0.2873[482,641] 196ns 0b|----------------------------------------L0.2873-----------------------------------------|" - - "L0.2860[482,641] 195ns 0b|----------------------------------------L0.2860-----------------------------------------|" - - "L0.2847[482,641] 194ns 0b|----------------------------------------L0.2847-----------------------------------------|" - - "L0.2834[482,641] 193ns 0b|----------------------------------------L0.2834-----------------------------------------|" - - "L0.2821[482,641] 192ns 0b|----------------------------------------L0.2821-----------------------------------------|" - - "L0.2926[482,641] 19ns 160mb|----------------------------------------L0.2926-----------------------------------------|" - - "L0.3052[482,641] 191ns 0b|----------------------------------------L0.3052-----------------------------------------|" + - "**** Simulation run 340, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[582]). 10 Input Files, 160mb total:" + - "L0 " + - "L0.2961[482,641] 179ns 0b|----------------------------------------L0.2961-----------------------------------------|" + - "L0.2960[482,641] 159ns 0b|----------------------------------------L0.2960-----------------------------------------|" + - "L0.2959[482,641] 199ns 0b|----------------------------------------L0.2959-----------------------------------------|" + - "L0.2958[482,641] 139ns 0b|----------------------------------------L0.2958-----------------------------------------|" + - "L0.2957[482,641] 119ns 0b|----------------------------------------L0.2957-----------------------------------------|" + - "L0.2956[482,641] 99ns 0b |----------------------------------------L0.2956-----------------------------------------|" + - "L0.2955[482,641] 79ns 0b |----------------------------------------L0.2955-----------------------------------------|" + - "L0.2954[482,641] 59ns 0b |----------------------------------------L0.2954-----------------------------------------|" + - "L0.2953[482,641] 39ns 0b |----------------------------------------L0.2953-----------------------------------------|" + - "L0.2952[482,641] 19ns 160mb|----------------------------------------L0.2952-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 160mb total:" - "L1 " - "L1.?[482,582] 199ns 101mb|-------------------------L1.?-------------------------| " - "L1.?[583,641] 199ns 59mb |-------------L1.?-------------| " - "Committing partition 1:" - - " Soft Deleting 10 files: L0.2821, L0.2834, L0.2847, L0.2860, L0.2873, L0.2886, L0.2899, L0.2912, L0.2926, L0.3052" + - " Soft Deleting 10 files: L0.2952, L0.2953, L0.2954, L0.2955, L0.2956, L0.2957, L0.2958, L0.2959, L0.2960, L0.2961" - " Creating 2 files" - - "**** Simulation run 342, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[742]). 10 Input Files, 160mb total:" - - "L0 " - - "L0.2913[642,801] 199ns 0b|----------------------------------------L0.2913-----------------------------------------|" - - "L0.2900[642,801] 198ns 0b|----------------------------------------L0.2900-----------------------------------------|" - - "L0.2887[642,801] 197ns 0b|----------------------------------------L0.2887-----------------------------------------|" - - "L0.2874[642,801] 196ns 0b|----------------------------------------L0.2874-----------------------------------------|" - - "L0.2861[642,801] 195ns 0b|----------------------------------------L0.2861-----------------------------------------|" - - "L0.2848[642,801] 194ns 0b|----------------------------------------L0.2848-----------------------------------------|" - - "L0.2835[642,801] 193ns 0b|----------------------------------------L0.2835-----------------------------------------|" - - "L0.2822[642,801] 192ns 0b|----------------------------------------L0.2822-----------------------------------------|" - - "L0.2927[642,801] 19ns 160mb|----------------------------------------L0.2927-----------------------------------------|" - - "L0.3053[642,801] 191ns 0b|----------------------------------------L0.3053-----------------------------------------|" + - "**** Simulation run 341, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[742]). 10 Input Files, 160mb total:" + - "L0 " + - "L0.2971[642,801] 199ns 0b|----------------------------------------L0.2971-----------------------------------------|" + - "L0.2970[642,801] 179ns 0b|----------------------------------------L0.2970-----------------------------------------|" + - "L0.2969[642,801] 159ns 0b|----------------------------------------L0.2969-----------------------------------------|" + - "L0.2968[642,801] 139ns 0b|----------------------------------------L0.2968-----------------------------------------|" + - "L0.2967[642,801] 119ns 0b|----------------------------------------L0.2967-----------------------------------------|" + - "L0.2966[642,801] 99ns 0b |----------------------------------------L0.2966-----------------------------------------|" + - "L0.2965[642,801] 79ns 0b |----------------------------------------L0.2965-----------------------------------------|" + - "L0.2964[642,801] 59ns 0b |----------------------------------------L0.2964-----------------------------------------|" + - "L0.2963[642,801] 39ns 0b |----------------------------------------L0.2963-----------------------------------------|" + - "L0.2962[642,801] 19ns 160mb|----------------------------------------L0.2962-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 160mb total:" - "L1 " - "L1.?[642,742] 199ns 101mb|-------------------------L1.?-------------------------| " - "L1.?[743,801] 199ns 59mb |-------------L1.?-------------| " - "Committing partition 1:" - - " Soft Deleting 10 files: L0.2822, L0.2835, L0.2848, L0.2861, L0.2874, L0.2887, L0.2900, L0.2913, L0.2927, L0.3053" + - " Soft Deleting 10 files: L0.2962, L0.2963, L0.2964, L0.2965, L0.2966, L0.2967, L0.2968, L0.2969, L0.2970, L0.2971" - " Creating 2 files" - - "**** Simulation run 343, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[902]). 10 Input Files, 160mb total:" - - "L0 " - - "L0.2914[802,961] 199ns 0b|----------------------------------------L0.2914-----------------------------------------|" - - "L0.2901[802,961] 198ns 0b|----------------------------------------L0.2901-----------------------------------------|" - - "L0.2888[802,961] 197ns 0b|----------------------------------------L0.2888-----------------------------------------|" - - "L0.2875[802,961] 196ns 0b|----------------------------------------L0.2875-----------------------------------------|" - - "L0.2862[802,961] 195ns 0b|----------------------------------------L0.2862-----------------------------------------|" - - "L0.2849[802,961] 194ns 0b|----------------------------------------L0.2849-----------------------------------------|" - - "L0.2836[802,961] 193ns 0b|----------------------------------------L0.2836-----------------------------------------|" - - "L0.2823[802,961] 192ns 0b|----------------------------------------L0.2823-----------------------------------------|" - - "L0.2928[802,961] 19ns 160mb|----------------------------------------L0.2928-----------------------------------------|" - - "L0.3054[802,961] 191ns 0b|----------------------------------------L0.3054-----------------------------------------|" + - "**** Simulation run 342, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[902]). 10 Input Files, 160mb total:" + - "L0 " + - "L0.2981[802,961] 199ns 0b|----------------------------------------L0.2981-----------------------------------------|" + - "L0.2980[802,961] 179ns 0b|----------------------------------------L0.2980-----------------------------------------|" + - "L0.2979[802,961] 159ns 0b|----------------------------------------L0.2979-----------------------------------------|" + - "L0.2978[802,961] 139ns 0b|----------------------------------------L0.2978-----------------------------------------|" + - "L0.2977[802,961] 119ns 0b|----------------------------------------L0.2977-----------------------------------------|" + - "L0.2976[802,961] 99ns 0b |----------------------------------------L0.2976-----------------------------------------|" + - "L0.2975[802,961] 79ns 0b |----------------------------------------L0.2975-----------------------------------------|" + - "L0.2974[802,961] 59ns 0b |----------------------------------------L0.2974-----------------------------------------|" + - "L0.2973[802,961] 39ns 0b |----------------------------------------L0.2973-----------------------------------------|" + - "L0.2972[802,961] 19ns 160mb|----------------------------------------L0.2972-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 160mb total:" - "L1 " - "L1.?[802,902] 199ns 101mb|-------------------------L1.?-------------------------| " - "L1.?[903,961] 199ns 59mb |-------------L1.?-------------| " - "Committing partition 1:" - - " Soft Deleting 10 files: L0.2823, L0.2836, L0.2849, L0.2862, L0.2875, L0.2888, L0.2901, L0.2914, L0.2928, L0.3054" + - " Soft Deleting 10 files: L0.2972, L0.2973, L0.2974, L0.2975, L0.2976, L0.2977, L0.2978, L0.2979, L0.2980, L0.2981" - " Creating 2 files" - - "**** Simulation run 344, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1062]). 10 Input Files, 160mb total:" - - "L0 " - - "L0.2915[962,1121] 199ns 0b|----------------------------------------L0.2915-----------------------------------------|" - - "L0.2902[962,1121] 198ns 0b|----------------------------------------L0.2902-----------------------------------------|" - - "L0.2889[962,1121] 197ns 0b|----------------------------------------L0.2889-----------------------------------------|" - - "L0.2876[962,1121] 196ns 0b|----------------------------------------L0.2876-----------------------------------------|" - - "L0.2863[962,1121] 195ns 0b|----------------------------------------L0.2863-----------------------------------------|" - - "L0.2850[962,1121] 194ns 0b|----------------------------------------L0.2850-----------------------------------------|" - - "L0.2837[962,1121] 193ns 0b|----------------------------------------L0.2837-----------------------------------------|" - - "L0.2824[962,1121] 192ns 0b|----------------------------------------L0.2824-----------------------------------------|" - - "L0.2929[962,1121] 19ns 160mb|----------------------------------------L0.2929-----------------------------------------|" - - "L0.3055[962,1121] 191ns 0b|----------------------------------------L0.3055-----------------------------------------|" + - "**** Simulation run 343, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1062]). 10 Input Files, 160mb total:" + - "L0 " + - "L0.2991[962,1121] 99ns 0b|----------------------------------------L0.2991-----------------------------------------|" + - "L0.2990[962,1121] 79ns 0b|----------------------------------------L0.2990-----------------------------------------|" + - "L0.2989[962,1121] 199ns 0b|----------------------------------------L0.2989-----------------------------------------|" + - "L0.2988[962,1121] 179ns 0b|----------------------------------------L0.2988-----------------------------------------|" + - "L0.2987[962,1121] 159ns 0b|----------------------------------------L0.2987-----------------------------------------|" + - "L0.2986[962,1121] 139ns 0b|----------------------------------------L0.2986-----------------------------------------|" + - "L0.2985[962,1121] 119ns 0b|----------------------------------------L0.2985-----------------------------------------|" + - "L0.2984[962,1121] 59ns 0b|----------------------------------------L0.2984-----------------------------------------|" + - "L0.2983[962,1121] 39ns 0b|----------------------------------------L0.2983-----------------------------------------|" + - "L0.2982[962,1121] 19ns 160mb|----------------------------------------L0.2982-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 160mb total:" - "L1 " - "L1.?[962,1062] 199ns 101mb|-------------------------L1.?-------------------------| " - "L1.?[1063,1121] 199ns 59mb |-------------L1.?-------------| " - "Committing partition 1:" - - " Soft Deleting 10 files: L0.2824, L0.2837, L0.2850, L0.2863, L0.2876, L0.2889, L0.2902, L0.2915, L0.2929, L0.3055" + - " Soft Deleting 10 files: L0.2982, L0.2983, L0.2984, L0.2985, L0.2986, L0.2987, L0.2988, L0.2989, L0.2990, L0.2991" - " Creating 2 files" - - "**** Simulation run 345, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1222]). 10 Input Files, 160mb total:" - - "L0 " - - "L0.2916[1122,1281] 199ns 0b|----------------------------------------L0.2916-----------------------------------------|" - - "L0.2903[1122,1281] 198ns 0b|----------------------------------------L0.2903-----------------------------------------|" - - "L0.2890[1122,1281] 197ns 0b|----------------------------------------L0.2890-----------------------------------------|" - - "L0.2877[1122,1281] 196ns 0b|----------------------------------------L0.2877-----------------------------------------|" - - "L0.2864[1122,1281] 195ns 0b|----------------------------------------L0.2864-----------------------------------------|" - - "L0.2851[1122,1281] 194ns 0b|----------------------------------------L0.2851-----------------------------------------|" - - "L0.2838[1122,1281] 193ns 0b|----------------------------------------L0.2838-----------------------------------------|" - - "L0.2825[1122,1281] 192ns 0b|----------------------------------------L0.2825-----------------------------------------|" - - "L0.2930[1122,1281] 19ns 160mb|----------------------------------------L0.2930-----------------------------------------|" - - "L0.3056[1122,1281] 191ns 0b|----------------------------------------L0.3056-----------------------------------------|" + - "**** Simulation run 344, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1222]). 10 Input Files, 160mb total:" + - "L0 " + - "L0.3001[1122,1281] 199ns 0b|----------------------------------------L0.3001-----------------------------------------|" + - "L0.3000[1122,1281] 179ns 0b|----------------------------------------L0.3000-----------------------------------------|" + - "L0.2999[1122,1281] 159ns 0b|----------------------------------------L0.2999-----------------------------------------|" + - "L0.2998[1122,1281] 139ns 0b|----------------------------------------L0.2998-----------------------------------------|" + - "L0.2997[1122,1281] 119ns 0b|----------------------------------------L0.2997-----------------------------------------|" + - "L0.2996[1122,1281] 99ns 0b|----------------------------------------L0.2996-----------------------------------------|" + - "L0.2995[1122,1281] 79ns 0b|----------------------------------------L0.2995-----------------------------------------|" + - "L0.2994[1122,1281] 59ns 0b|----------------------------------------L0.2994-----------------------------------------|" + - "L0.2993[1122,1281] 39ns 0b|----------------------------------------L0.2993-----------------------------------------|" + - "L0.2992[1122,1281] 19ns 160mb|----------------------------------------L0.2992-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 160mb total:" - "L1 " - "L1.?[1122,1222] 199ns 101mb|-------------------------L1.?-------------------------| " - "L1.?[1223,1281] 199ns 59mb |-------------L1.?-------------| " - "Committing partition 1:" - - " Soft Deleting 10 files: L0.2825, L0.2838, L0.2851, L0.2864, L0.2877, L0.2890, L0.2903, L0.2916, L0.2930, L0.3056" + - " Soft Deleting 10 files: L0.2992, L0.2993, L0.2994, L0.2995, L0.2996, L0.2997, L0.2998, L0.2999, L0.3000, L0.3001" - " Creating 2 files" - - "**** Simulation run 346, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1382]). 10 Input Files, 160mb total:" - - "L0 " - - "L0.2917[1282,1441] 199ns 0b|----------------------------------------L0.2917-----------------------------------------|" - - "L0.2904[1282,1441] 198ns 0b|----------------------------------------L0.2904-----------------------------------------|" - - "L0.2891[1282,1441] 197ns 0b|----------------------------------------L0.2891-----------------------------------------|" - - "L0.2878[1282,1441] 196ns 0b|----------------------------------------L0.2878-----------------------------------------|" - - "L0.2865[1282,1441] 195ns 0b|----------------------------------------L0.2865-----------------------------------------|" - - "L0.2852[1282,1441] 194ns 0b|----------------------------------------L0.2852-----------------------------------------|" - - "L0.2839[1282,1441] 193ns 0b|----------------------------------------L0.2839-----------------------------------------|" - - "L0.2826[1282,1441] 192ns 0b|----------------------------------------L0.2826-----------------------------------------|" - - "L0.2931[1282,1441] 19ns 160mb|----------------------------------------L0.2931-----------------------------------------|" - - "L0.3057[1282,1441] 191ns 0b|----------------------------------------L0.3057-----------------------------------------|" + - "**** Simulation run 345, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1382]). 10 Input Files, 160mb total:" + - "L0 " + - "L0.3011[1282,1441] 199ns 0b|----------------------------------------L0.3011-----------------------------------------|" + - "L0.3010[1282,1441] 179ns 0b|----------------------------------------L0.3010-----------------------------------------|" + - "L0.3009[1282,1441] 159ns 0b|----------------------------------------L0.3009-----------------------------------------|" + - "L0.3008[1282,1441] 139ns 0b|----------------------------------------L0.3008-----------------------------------------|" + - "L0.3007[1282,1441] 119ns 0b|----------------------------------------L0.3007-----------------------------------------|" + - "L0.3006[1282,1441] 99ns 0b|----------------------------------------L0.3006-----------------------------------------|" + - "L0.3005[1282,1441] 79ns 0b|----------------------------------------L0.3005-----------------------------------------|" + - "L0.3004[1282,1441] 59ns 0b|----------------------------------------L0.3004-----------------------------------------|" + - "L0.3003[1282,1441] 39ns 0b|----------------------------------------L0.3003-----------------------------------------|" + - "L0.3002[1282,1441] 19ns 160mb|----------------------------------------L0.3002-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 160mb total:" - "L1 " - "L1.?[1282,1382] 199ns 101mb|-------------------------L1.?-------------------------| " - "L1.?[1383,1441] 199ns 59mb |-------------L1.?-------------| " - "Committing partition 1:" - - " Soft Deleting 10 files: L0.2826, L0.2839, L0.2852, L0.2865, L0.2878, L0.2891, L0.2904, L0.2917, L0.2931, L0.3057" + - " Soft Deleting 10 files: L0.3002, L0.3003, L0.3004, L0.3005, L0.3006, L0.3007, L0.3008, L0.3009, L0.3010, L0.3011" - " Creating 2 files" - - "**** Simulation run 347, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1542]). 10 Input Files, 160mb total:" - - "L0 " - - "L0.2918[1442,1601] 199ns 0b|----------------------------------------L0.2918-----------------------------------------|" - - "L0.2905[1442,1601] 198ns 0b|----------------------------------------L0.2905-----------------------------------------|" - - "L0.2892[1442,1601] 197ns 0b|----------------------------------------L0.2892-----------------------------------------|" - - "L0.2879[1442,1601] 196ns 0b|----------------------------------------L0.2879-----------------------------------------|" - - "L0.2866[1442,1601] 195ns 0b|----------------------------------------L0.2866-----------------------------------------|" - - "L0.2853[1442,1601] 194ns 0b|----------------------------------------L0.2853-----------------------------------------|" - - "L0.2840[1442,1601] 193ns 0b|----------------------------------------L0.2840-----------------------------------------|" - - "L0.2827[1442,1601] 192ns 0b|----------------------------------------L0.2827-----------------------------------------|" - - "L0.2932[1442,1601] 19ns 160mb|----------------------------------------L0.2932-----------------------------------------|" - - "L0.3058[1442,1601] 191ns 0b|----------------------------------------L0.3058-----------------------------------------|" + - "**** Simulation run 346, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1542]). 10 Input Files, 160mb total:" + - "L0 " + - "L0.3021[1442,1601] 199ns 0b|----------------------------------------L0.3021-----------------------------------------|" + - "L0.3020[1442,1601] 179ns 0b|----------------------------------------L0.3020-----------------------------------------|" + - "L0.3019[1442,1601] 159ns 0b|----------------------------------------L0.3019-----------------------------------------|" + - "L0.3018[1442,1601] 139ns 0b|----------------------------------------L0.3018-----------------------------------------|" + - "L0.3017[1442,1601] 119ns 0b|----------------------------------------L0.3017-----------------------------------------|" + - "L0.3016[1442,1601] 99ns 0b|----------------------------------------L0.3016-----------------------------------------|" + - "L0.3015[1442,1601] 79ns 0b|----------------------------------------L0.3015-----------------------------------------|" + - "L0.3014[1442,1601] 59ns 0b|----------------------------------------L0.3014-----------------------------------------|" + - "L0.3013[1442,1601] 39ns 0b|----------------------------------------L0.3013-----------------------------------------|" + - "L0.3012[1442,1601] 19ns 160mb|----------------------------------------L0.3012-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 160mb total:" - "L1 " - "L1.?[1442,1542] 199ns 101mb|-------------------------L1.?-------------------------| " - "L1.?[1543,1601] 199ns 59mb |-------------L1.?-------------| " - "Committing partition 1:" - - " Soft Deleting 10 files: L0.2827, L0.2840, L0.2853, L0.2866, L0.2879, L0.2892, L0.2905, L0.2918, L0.2932, L0.3058" + - " Soft Deleting 10 files: L0.3012, L0.3013, L0.3014, L0.3015, L0.3016, L0.3017, L0.3018, L0.3019, L0.3020, L0.3021" - " Creating 2 files" - - "**** Simulation run 348, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1702]). 10 Input Files, 160mb total:" - - "L0 " - - "L0.2919[1602,1761] 199ns 0b|----------------------------------------L0.2919-----------------------------------------|" - - "L0.2906[1602,1761] 198ns 0b|----------------------------------------L0.2906-----------------------------------------|" - - "L0.2893[1602,1761] 197ns 0b|----------------------------------------L0.2893-----------------------------------------|" - - "L0.2880[1602,1761] 196ns 0b|----------------------------------------L0.2880-----------------------------------------|" - - "L0.2867[1602,1761] 195ns 0b|----------------------------------------L0.2867-----------------------------------------|" - - "L0.2854[1602,1761] 194ns 0b|----------------------------------------L0.2854-----------------------------------------|" - - "L0.2841[1602,1761] 193ns 0b|----------------------------------------L0.2841-----------------------------------------|" - - "L0.2828[1602,1761] 192ns 0b|----------------------------------------L0.2828-----------------------------------------|" - - "L0.2933[1602,1761] 19ns 160mb|----------------------------------------L0.2933-----------------------------------------|" - - "L0.3059[1602,1761] 191ns 0b|----------------------------------------L0.3059-----------------------------------------|" + - "**** Simulation run 347, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1702]). 10 Input Files, 160mb total:" + - "L0 " + - "L0.3031[1602,1761] 199ns 0b|----------------------------------------L0.3031-----------------------------------------|" + - "L0.3030[1602,1761] 179ns 0b|----------------------------------------L0.3030-----------------------------------------|" + - "L0.3029[1602,1761] 159ns 0b|----------------------------------------L0.3029-----------------------------------------|" + - "L0.3028[1602,1761] 139ns 0b|----------------------------------------L0.3028-----------------------------------------|" + - "L0.3027[1602,1761] 119ns 0b|----------------------------------------L0.3027-----------------------------------------|" + - "L0.3026[1602,1761] 99ns 0b|----------------------------------------L0.3026-----------------------------------------|" + - "L0.3025[1602,1761] 79ns 0b|----------------------------------------L0.3025-----------------------------------------|" + - "L0.3024[1602,1761] 59ns 0b|----------------------------------------L0.3024-----------------------------------------|" + - "L0.3023[1602,1761] 39ns 0b|----------------------------------------L0.3023-----------------------------------------|" + - "L0.3022[1602,1761] 19ns 160mb|----------------------------------------L0.3022-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 160mb total:" - "L1 " - "L1.?[1602,1702] 199ns 101mb|-------------------------L1.?-------------------------| " - "L1.?[1703,1761] 199ns 59mb |-------------L1.?-------------| " - "Committing partition 1:" - - " Soft Deleting 10 files: L0.2828, L0.2841, L0.2854, L0.2867, L0.2880, L0.2893, L0.2906, L0.2919, L0.2933, L0.3059" + - " Soft Deleting 10 files: L0.3022, L0.3023, L0.3024, L0.3025, L0.3026, L0.3027, L0.3028, L0.3029, L0.3030, L0.3031" - " Creating 2 files" - - "**** Simulation run 349, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1862, 1962]). 10 Input Files, 239mb total:" - - "L0 " - - "L0.2920[1762,2000] 199ns 0b|----------------------------------------L0.2920-----------------------------------------|" - - "L0.2907[1762,2000] 198ns 0b|----------------------------------------L0.2907-----------------------------------------|" - - "L0.2894[1762,2000] 197ns 0b|----------------------------------------L0.2894-----------------------------------------|" - - "L0.2881[1762,2000] 196ns 0b|----------------------------------------L0.2881-----------------------------------------|" - - "L0.2868[1762,2000] 195ns 0b|----------------------------------------L0.2868-----------------------------------------|" - - "L0.2855[1762,2000] 194ns 0b|----------------------------------------L0.2855-----------------------------------------|" - - "L0.2842[1762,2000] 193ns 0b|----------------------------------------L0.2842-----------------------------------------|" - - "L0.2829[1762,2000] 192ns 0b|----------------------------------------L0.2829-----------------------------------------|" - - "L0.2936[1762,2000] 19ns 239mb|----------------------------------------L0.2936-----------------------------------------|" - - "L0.3060[1762,2000] 191ns 0b|----------------------------------------L0.3060-----------------------------------------|" + - "**** Simulation run 348, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1862, 1962]). 10 Input Files, 239mb total:" + - "L0 " + - "L0.3041[1762,2000] 119ns 0b|----------------------------------------L0.3041-----------------------------------------|" + - "L0.3040[1762,2000] 99ns 0b|----------------------------------------L0.3040-----------------------------------------|" + - "L0.3039[1762,2000] 199ns 0b|----------------------------------------L0.3039-----------------------------------------|" + - "L0.3038[1762,2000] 179ns 0b|----------------------------------------L0.3038-----------------------------------------|" + - "L0.3037[1762,2000] 159ns 0b|----------------------------------------L0.3037-----------------------------------------|" + - "L0.3036[1762,2000] 139ns 0b|----------------------------------------L0.3036-----------------------------------------|" + - "L0.3035[1762,2000] 79ns 0b|----------------------------------------L0.3035-----------------------------------------|" + - "L0.3034[1762,2000] 59ns 0b|----------------------------------------L0.3034-----------------------------------------|" + - "L0.3033[1762,2000] 39ns 0b|----------------------------------------L0.3033-----------------------------------------|" + - "L0.3032[1762,2000] 19ns 239mb|----------------------------------------L0.3032-----------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 239mb total:" - "L1 " - "L1.?[1762,1862] 199ns 101mb|---------------L1.?----------------| " - "L1.?[1863,1962] 199ns 100mb |---------------L1.?----------------| " - "L1.?[1963,2000] 199ns 38mb |---L1.?----| " - "Committing partition 1:" - - " Soft Deleting 10 files: L0.2829, L0.2842, L0.2855, L0.2868, L0.2881, L0.2894, L0.2907, L0.2920, L0.2936, L0.3060" + - " Soft Deleting 10 files: L0.3032, L0.3033, L0.3034, L0.3035, L0.3036, L0.3037, L0.3038, L0.3039, L0.3040, L0.3041" - " Creating 3 files" - - "**** Simulation run 350, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1592392]). 3 Input Files, 38mb total:" + - "**** Simulation run 349, type=compact(TotalSizeLessThanMaxCompactSize). 9 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.3050[2001,2086] 199ns |----------------------------------------L0.3050-----------------------------------------|" + - "L0.3049[2001,2086] 179ns |----------------------------------------L0.3049-----------------------------------------|" + - "L0.3048[2001,2086] 159ns |----------------------------------------L0.3048-----------------------------------------|" + - "L0.3047[2001,2086] 139ns |----------------------------------------L0.3047-----------------------------------------|" + - "L0.3046[2001,2086] 119ns |----------------------------------------L0.3046-----------------------------------------|" + - "L0.3045[2001,2086] 99ns |----------------------------------------L0.3045-----------------------------------------|" + - "L0.3044[2001,2086] 79ns |----------------------------------------L0.3044-----------------------------------------|" + - "L0.3043[2001,2086] 59ns |----------------------------------------L0.3043-----------------------------------------|" + - "L0.3042[2001,2086] 39ns |----------------------------------------L0.3042-----------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L1, all files 0b " + - "L1.?[2001,2086] 199ns |------------------------------------------L1.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 9 files: L0.3042, L0.3043, L0.3044, L0.3045, L0.3046, L0.3047, L0.3048, L0.3049, L0.3050" + - " Creating 1 files" + - "**** Simulation run 350, type=compact(TotalSizeLessThanMaxCompactSize). 9 Input Files, 2kb total:" + - "L0, all files 200b " + - "L0.3059[2087,1990000] 199ns|----------------------------------------L0.3059-----------------------------------------|" + - "L0.3058[2087,1790000] 179ns|-----------------------------------L0.3058------------------------------------| " + - "L0.3057[2087,1590000] 159ns|-------------------------------L0.3057-------------------------------| " + - "L0.3056[2087,1390000] 139ns|--------------------------L0.3056---------------------------| " + - "L0.3055[2087,1190000] 119ns|----------------------L0.3055----------------------| " + - "L0.3054[2087,990000] 99ns|-----------------L0.3054------------------| " + - "L0.3053[2087,790000] 79ns|-------------L0.3053-------------| " + - "L0.3052[2087,590000] 59ns|--------L0.3052---------| " + - "L0.3051[2087,390000] 39ns|----L0.3051----| " + - "**** 1 Output Files (parquet_file_id not yet assigned), 2kb total:" + - "L1, all files 2kb " + - "L1.?[2087,1990000] 199ns |------------------------------------------L1.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 9 files: L0.3051, L0.3052, L0.3053, L0.3054, L0.3055, L0.3056, L0.3057, L0.3058, L0.3059" + - " Creating 1 files" + - "**** Simulation run 351, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1592392]). 3 Input Files, 38mb total:" - "L1 " - - "L1.3085[1963,2000] 199ns 38mb|L1.3085| " - - "L1.3062[2087,1990000] 199ns 2kb|----------------------------------------L1.3062----------------------------------------| " - - "L1.3061[2001,2086] 199ns 0b|L1.3061| " + - "L1.3086[2087,1990000] 199ns 2kb|----------------------------------------L1.3086----------------------------------------| " + - "L1.3085[2001,2086] 199ns 0b|L1.3085| " + - "L1.3084[1963,2000] 199ns 38mb|L1.3084| " - "**** 2 Output Files (parquet_file_id not yet assigned), 38mb total:" - "L2 " - "L2.?[1963,1592392] 199ns 30mb|--------------------------------L2.?---------------------------------| " - "L2.?[1592393,1990000] 199ns 8mb |-----L2.?------| " - "Committing partition 1:" - - " Soft Deleting 3 files: L1.3061, L1.3062, L1.3085" - - " Upgrading 24 files level to CompactionLevel::L2: L1.3035, L1.3036, L1.3063, L1.3064, L1.3065, L1.3066, L1.3067, L1.3068, L1.3069, L1.3070, L1.3071, L1.3072, L1.3073, L1.3074, L1.3075, L1.3076, L1.3077, L1.3078, L1.3079, L1.3080, L1.3081, L1.3082, L1.3083, L1.3084" + - " Soft Deleting 3 files: L1.3084, L1.3085, L1.3086" + - " Upgrading 24 files level to CompactionLevel::L2: L1.3060, L1.3061, L1.3062, L1.3063, L1.3064, L1.3065, L1.3066, L1.3067, L1.3068, L1.3069, L1.3070, L1.3071, L1.3072, L1.3073, L1.3074, L1.3075, L1.3076, L1.3077, L1.3078, L1.3079, L1.3080, L1.3081, L1.3082, L1.3083" - " Creating 2 files" - "**** Final Output Files (5.9gb written)" - "L2 " - - "L2.3035[1,101] 161ns 101mb|L2.3035| " - - "L2.3036[102,161] 161ns 60mb|L2.3036| " - - "L2.3063[162,262] 199ns 101mb|L2.3063| " - - "L2.3064[263,321] 199ns 59mb|L2.3064| " - - "L2.3065[322,422] 199ns 101mb|L2.3065| " - - "L2.3066[423,481] 199ns 59mb|L2.3066| " - - "L2.3067[482,582] 199ns 101mb|L2.3067| " - - "L2.3068[583,641] 199ns 59mb|L2.3068| " - - "L2.3069[642,742] 199ns 101mb|L2.3069| " - - "L2.3070[743,801] 199ns 59mb|L2.3070| " - - "L2.3071[802,902] 199ns 101mb|L2.3071| " - - "L2.3072[903,961] 199ns 59mb|L2.3072| " - - "L2.3073[962,1062] 199ns 101mb|L2.3073| " - - "L2.3074[1063,1121] 199ns 59mb|L2.3074| " - - "L2.3075[1122,1222] 199ns 101mb|L2.3075| " - - "L2.3076[1223,1281] 199ns 59mb|L2.3076| " - - "L2.3077[1282,1382] 199ns 101mb|L2.3077| " - - "L2.3078[1383,1441] 199ns 59mb|L2.3078| " - - "L2.3079[1442,1542] 199ns 101mb|L2.3079| " - - "L2.3080[1543,1601] 199ns 59mb|L2.3080| " - - "L2.3081[1602,1702] 199ns 101mb|L2.3081| " - - "L2.3082[1703,1761] 199ns 59mb|L2.3082| " - - "L2.3083[1762,1862] 199ns 101mb|L2.3083| " - - "L2.3084[1863,1962] 199ns 100mb|L2.3084| " - - "L2.3086[1963,1592392] 199ns 30mb|-------------------------------L2.3086-------------------------------| " - - "L2.3087[1592393,1990000] 199ns 8mb |----L2.3087----| " + - "L2.3060[1,101] 161ns 101mb|L2.3060| " + - "L2.3061[102,161] 161ns 60mb|L2.3061| " + - "L2.3062[162,262] 199ns 101mb|L2.3062| " + - "L2.3063[263,321] 199ns 59mb|L2.3063| " + - "L2.3064[322,422] 199ns 101mb|L2.3064| " + - "L2.3065[423,481] 199ns 59mb|L2.3065| " + - "L2.3066[482,582] 199ns 101mb|L2.3066| " + - "L2.3067[583,641] 199ns 59mb|L2.3067| " + - "L2.3068[642,742] 199ns 101mb|L2.3068| " + - "L2.3069[743,801] 199ns 59mb|L2.3069| " + - "L2.3070[802,902] 199ns 101mb|L2.3070| " + - "L2.3071[903,961] 199ns 59mb|L2.3071| " + - "L2.3072[962,1062] 199ns 101mb|L2.3072| " + - "L2.3073[1063,1121] 199ns 59mb|L2.3073| " + - "L2.3074[1122,1222] 199ns 101mb|L2.3074| " + - "L2.3075[1223,1281] 199ns 59mb|L2.3075| " + - "L2.3076[1282,1382] 199ns 101mb|L2.3076| " + - "L2.3077[1383,1441] 199ns 59mb|L2.3077| " + - "L2.3078[1442,1542] 199ns 101mb|L2.3078| " + - "L2.3079[1543,1601] 199ns 59mb|L2.3079| " + - "L2.3080[1602,1702] 199ns 101mb|L2.3080| " + - "L2.3081[1703,1761] 199ns 59mb|L2.3081| " + - "L2.3082[1762,1862] 199ns 101mb|L2.3082| " + - "L2.3083[1863,1962] 199ns 100mb|L2.3083| " + - "L2.3087[1963,1592392] 199ns 30mb|-------------------------------L2.3087-------------------------------| " + - "L2.3088[1592393,1990000] 199ns 8mb |----L2.3088----| " - "**** Breakdown of where bytes were written" - 1.95gb written by compact(ManySmallFiles) - 1.95gb written by split(VerticalSplit) @@ -10884,23 +10893,23 @@ async fn split_precent_loop() { - "L1.3[1676005158277000000,1676010156669000000] 1676010160.05s 58mb |L1.3| " - "WARNING: file L0.40[1676020762355000000,1676036230752000000] 1676036233.84s 159mb exceeds soft limit 100mb by more than 50%" - "WARNING: file L0.43[1676039845773000000,1676063836202000000] 1676063839.07s 242mb exceeds soft limit 100mb by more than 50%" - - "**** Final Output Files (1.98gb written)" + - "**** Final Output Files (3.12gb written)" - "L2 " - - "L2.220[1676039875848666667,1676049206274887385] 1676066475.26s 100mb |-L2.220-| " - - "L2.224[1675987200001000000,1675995221280934243] 1676066475.26s 100mb|L2.224-| " - - "L2.225[1675995221280934244,1676003242560868486] 1676066475.26s 100mb |L2.225-| " - - "L2.230[1676022715716555555,1676032446190144752] 1676066475.26s 100mb |-L2.230--| " - - "L2.231[1676032446190144753,1676039875848666666] 1676066475.26s 76mb |L2.231| " - - "L2.232[1676003242560868487,1676011412551749425] 1676066475.26s 100mb |L2.232-| " - - "L2.233[1676011412551749426,1676019582542630363] 1676066475.26s 100mb |L2.233-| " - - "L2.234[1676019582542630364,1676022715716555554] 1676066475.26s 38mb |L2.234| " - - "L2.235[1676049206274887386,1676059443970289442] 1676066475.26s 100mb |-L2.235--| " - - "L2.236[1676059443970289443,1676066212011000000] 1676066475.26s 66mb |L2.236|" + - "L2.181[1676026707767500000,1676037142262153774] 1676066475.26s 100mb |-L2.181--| " + - "L2.213[1675987200001000000,1675994138427581805] 1676066475.26s 100mb|L2.213| " + - "L2.214[1675994138427581806,1676001076854163610] 1676066475.26s 100mb |L2.214| " + - "L2.219[1676001076854163611,1676009960845796297] 1676066475.26s 100mb |-L2.219-| " + - "L2.220[1676009960845796298,1676018844837428983] 1676066475.26s 100mb |-L2.220-| " + - "L2.221[1676018844837428984,1676026707767499999] 1676066475.26s 89mb |L2.221| " + - "L2.222[1676037142262153775,1676047087212467550] 1676066475.26s 100mb |-L2.222--| " + - "L2.223[1676047087212467551,1676057032162781325] 1676066475.26s 100mb |-L2.223--| " + - "L2.224[1676057032162781326,1676066212011000000] 1676066475.26s 92mb |-L2.224-| " - "**** Breakdown of where bytes were written" - - 594mb written by split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize)) - - 650mb written by split(VerticalSplit) - - 691mb written by split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize)) - - 93mb written by compact(ManySmallFiles) + - 1.5gb written by split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize)) + - 247mb written by split(ReduceOverlap) + - 513mb written by split(VerticalSplit) + - 808mb written by split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize)) + - 88mb written by compact(ManySmallFiles) "### ); } @@ -11388,313 +11397,314 @@ async fn very_big_overlapped_backlog() { - "L2.398[194000,195999] 97ns 100mb |L2.398|" - "L2.399[196000,197999] 98ns 100mb |L2.399|" - "L2.400[198000,199999] 99ns 100mb |L2.400|" - - "**** Final Output Files (40.21gb written)" + - "**** Final Output Files (35.55gb written)" - "L2 " - - "L2.823[0,980] 166ns 100mb|L2.823| " - - "L2.824[981,1960] 166ns 100mb|L2.824| " - - "L2.825[1961,1999] 166ns 4mb|L2.825| " - - "L2.826[2000,2980] 166ns 100mb|L2.826| " - - "L2.827[2981,3960] 166ns 100mb |L2.827| " - - "L2.828[3961,3999] 166ns 4mb |L2.828| " - - "L2.829[4000,4980] 166ns 100mb |L2.829| " - - "L2.830[4981,5960] 166ns 100mb |L2.830| " - - "L2.831[5961,5999] 166ns 4mb |L2.831| " - - "L2.832[6000,6980] 166ns 100mb |L2.832| " - - "L2.833[6981,7960] 166ns 100mb |L2.833| " - - "L2.834[7961,7999] 166ns 4mb |L2.834| " - - "L2.835[8000,8980] 166ns 100mb |L2.835| " - - "L2.836[8981,9960] 166ns 100mb |L2.836| " - - "L2.837[9961,9999] 166ns 4mb |L2.837| " - - "L2.838[10000,10980] 166ns 100mb |L2.838| " - - "L2.839[10981,11960] 166ns 100mb |L2.839| " - - "L2.840[11961,11999] 166ns 4mb |L2.840| " - - "L2.841[12000,12980] 166ns 100mb |L2.841| " - - "L2.842[12981,13960] 166ns 100mb |L2.842| " - - "L2.843[13961,13999] 166ns 4mb |L2.843| " - - "L2.844[14000,14980] 166ns 100mb |L2.844| " - - "L2.845[14981,15960] 166ns 100mb |L2.845| " - - "L2.846[15961,15999] 166ns 4mb |L2.846| " - - "L2.847[16000,16980] 166ns 100mb |L2.847| " - - "L2.848[16981,17960] 166ns 100mb |L2.848| " - - "L2.849[17961,17999] 166ns 4mb |L2.849| " - - "L2.850[18000,18980] 166ns 100mb |L2.850| " - - "L2.851[18981,19960] 166ns 100mb |L2.851| " - - "L2.852[19961,19999] 166ns 4mb |L2.852| " - - "L2.853[20000,20980] 166ns 100mb |L2.853| " - - "L2.854[20981,21960] 166ns 100mb |L2.854| " - - "L2.855[21961,21999] 166ns 4mb |L2.855| " - - "L2.856[22000,22980] 166ns 100mb |L2.856| " - - "L2.857[22981,23960] 166ns 100mb |L2.857| " - - "L2.858[23961,23999] 166ns 4mb |L2.858| " - - "L2.859[24000,24980] 166ns 100mb |L2.859| " - - "L2.860[24981,25960] 166ns 100mb |L2.860| " - - "L2.861[25961,25999] 166ns 4mb |L2.861| " - - "L2.862[26000,26980] 166ns 100mb |L2.862| " - - "L2.863[26981,27960] 166ns 100mb |L2.863| " - - "L2.864[27961,27999] 166ns 4mb |L2.864| " - - "L2.865[28000,28980] 166ns 100mb |L2.865| " - - "L2.866[28981,29960] 166ns 100mb |L2.866| " - - "L2.867[29961,29999] 166ns 4mb |L2.867| " - - "L2.868[30000,30980] 166ns 100mb |L2.868| " - - "L2.869[30981,31960] 166ns 100mb |L2.869| " - - "L2.870[31961,31999] 166ns 4mb |L2.870| " - - "L2.871[32000,32980] 166ns 100mb |L2.871| " - - "L2.872[32981,33960] 166ns 100mb |L2.872| " - - "L2.873[33961,33999] 166ns 4mb |L2.873| " - - "L2.874[34000,34980] 166ns 100mb |L2.874| " - - "L2.875[34981,35960] 166ns 100mb |L2.875| " - - "L2.876[35961,35999] 166ns 4mb |L2.876| " - - "L2.877[36000,36980] 166ns 100mb |L2.877| " - - "L2.878[36981,37960] 166ns 100mb |L2.878| " - - "L2.879[37961,37999] 166ns 4mb |L2.879| " - - "L2.880[38000,38980] 166ns 100mb |L2.880| " - - "L2.881[38981,39960] 166ns 100mb |L2.881| " - - "L2.882[39961,39999] 166ns 4mb |L2.882| " - - "L2.883[40000,40980] 166ns 100mb |L2.883| " - - "L2.884[40981,41960] 166ns 100mb |L2.884| " - - "L2.885[41961,41999] 166ns 4mb |L2.885| " - - "L2.886[42000,42980] 166ns 100mb |L2.886| " - - "L2.887[42981,43960] 166ns 100mb |L2.887| " - - "L2.888[43961,43999] 166ns 4mb |L2.888| " - - "L2.889[44000,44980] 166ns 100mb |L2.889| " - - "L2.890[44981,45960] 166ns 100mb |L2.890| " - - "L2.891[45961,45999] 166ns 4mb |L2.891| " - - "L2.892[46000,46980] 166ns 100mb |L2.892| " - - "L2.893[46981,47960] 166ns 100mb |L2.893| " - - "L2.894[47961,47999] 166ns 4mb |L2.894| " - - "L2.895[48000,48980] 166ns 100mb |L2.895| " - - "L2.896[48981,49960] 166ns 100mb |L2.896| " - - "L2.897[49961,49999] 166ns 4mb |L2.897| " - - "L2.898[50000,50980] 166ns 100mb |L2.898| " - - "L2.899[50981,51960] 166ns 100mb |L2.899| " - - "L2.900[51961,51999] 166ns 4mb |L2.900| " - - "L2.901[52000,52980] 166ns 100mb |L2.901| " - - "L2.902[52981,53960] 166ns 100mb |L2.902| " - - "L2.903[53961,53999] 166ns 4mb |L2.903| " - - "L2.904[54000,54980] 166ns 100mb |L2.904| " - - "L2.905[54981,55960] 166ns 100mb |L2.905| " - - "L2.906[55961,55999] 166ns 4mb |L2.906| " - - "L2.907[56000,56980] 166ns 100mb |L2.907| " - - "L2.908[56981,57960] 166ns 100mb |L2.908| " - - "L2.909[57961,57999] 166ns 4mb |L2.909| " - - "L2.910[58000,58980] 166ns 100mb |L2.910| " - - "L2.911[58981,59960] 166ns 100mb |L2.911| " - - "L2.912[59961,59999] 166ns 4mb |L2.912| " - - "L2.913[60000,60980] 166ns 100mb |L2.913| " - - "L2.914[60981,61960] 166ns 100mb |L2.914| " - - "L2.915[61961,61999] 166ns 4mb |L2.915| " - - "L2.916[62000,62980] 166ns 100mb |L2.916| " - - "L2.917[62981,63960] 166ns 100mb |L2.917| " - - "L2.918[63961,63999] 166ns 4mb |L2.918| " - - "L2.919[64000,64980] 166ns 100mb |L2.919| " - - "L2.920[64981,65960] 166ns 100mb |L2.920| " - - "L2.921[65961,65999] 166ns 4mb |L2.921| " - - "L2.922[66000,66980] 233ns 100mb |L2.922| " - - "L2.923[66981,67960] 233ns 100mb |L2.923| " - - "L2.924[67961,67999] 233ns 4mb |L2.924| " - - "L2.925[68000,68980] 233ns 100mb |L2.925| " - - "L2.926[68981,69960] 233ns 100mb |L2.926| " - - "L2.927[69961,69999] 233ns 4mb |L2.927| " - - "L2.928[70000,70980] 233ns 100mb |L2.928| " - - "L2.929[70981,71960] 233ns 100mb |L2.929| " - - "L2.930[71961,71999] 233ns 4mb |L2.930| " - - "L2.931[72000,72980] 233ns 100mb |L2.931| " - - "L2.932[72981,73960] 233ns 100mb |L2.932| " - - "L2.933[73961,73999] 233ns 4mb |L2.933| " - - "L2.934[74000,74980] 233ns 100mb |L2.934| " - - "L2.935[74981,75960] 233ns 100mb |L2.935| " - - "L2.936[75961,75999] 233ns 4mb |L2.936| " - - "L2.937[76000,76980] 233ns 100mb |L2.937| " - - "L2.938[76981,77960] 233ns 100mb |L2.938| " - - "L2.939[77961,77999] 233ns 4mb |L2.939| " - - "L2.940[78000,78980] 233ns 100mb |L2.940| " - - "L2.941[78981,79960] 233ns 100mb |L2.941| " - - "L2.942[79961,79999] 233ns 4mb |L2.942| " - - "L2.943[80000,80980] 233ns 100mb |L2.943| " - - "L2.944[80981,81960] 233ns 100mb |L2.944| " - - "L2.945[81961,81999] 233ns 4mb |L2.945| " - - "L2.946[82000,82980] 233ns 100mb |L2.946| " - - "L2.947[82981,83960] 233ns 100mb |L2.947| " - - "L2.948[83961,83999] 233ns 4mb |L2.948| " - - "L2.949[84000,84980] 233ns 100mb |L2.949| " - - "L2.950[84981,85960] 233ns 100mb |L2.950| " - - "L2.951[85961,85999] 233ns 4mb |L2.951| " - - "L2.952[86000,86980] 233ns 100mb |L2.952| " - - "L2.953[86981,87960] 233ns 100mb |L2.953| " - - "L2.954[87961,87999] 233ns 4mb |L2.954| " - - "L2.955[88000,88980] 233ns 100mb |L2.955| " - - "L2.956[88981,89960] 233ns 100mb |L2.956| " - - "L2.957[89961,89999] 233ns 4mb |L2.957| " - - "L2.958[90000,90980] 233ns 100mb |L2.958| " - - "L2.959[90981,91960] 233ns 100mb |L2.959| " - - "L2.960[91961,91999] 233ns 4mb |L2.960| " - - "L2.961[92000,92980] 233ns 100mb |L2.961| " - - "L2.962[92981,93960] 233ns 100mb |L2.962| " - - "L2.963[93961,93999] 233ns 4mb |L2.963| " - - "L2.964[94000,94980] 233ns 100mb |L2.964| " - - "L2.965[94981,95960] 233ns 100mb |L2.965| " - - "L2.966[95961,95999] 233ns 4mb |L2.966| " - - "L2.967[96000,96980] 233ns 100mb |L2.967| " - - "L2.968[96981,97960] 233ns 100mb |L2.968| " - - "L2.969[97961,97999] 233ns 4mb |L2.969| " - - "L2.970[98000,98980] 233ns 100mb |L2.970| " - - "L2.971[98981,99960] 233ns 100mb |L2.971| " - - "L2.972[99961,99999] 233ns 4mb |L2.972| " - - "L2.973[100000,100980] 233ns 100mb |L2.973| " - - "L2.974[100981,101960] 233ns 100mb |L2.974| " - - "L2.975[101961,101999] 233ns 4mb |L2.975| " - - "L2.976[102000,102980] 233ns 100mb |L2.976| " - - "L2.977[102981,103960] 233ns 100mb |L2.977| " - - "L2.978[103961,103999] 233ns 4mb |L2.978| " - - "L2.979[104000,104980] 233ns 100mb |L2.979| " - - "L2.980[104981,105960] 233ns 100mb |L2.980| " - - "L2.981[105961,105999] 233ns 4mb |L2.981| " - - "L2.982[106000,106980] 233ns 100mb |L2.982| " - - "L2.983[106981,107960] 233ns 100mb |L2.983| " - - "L2.984[107961,107999] 233ns 4mb |L2.984| " - - "L2.985[108000,108980] 233ns 100mb |L2.985| " - - "L2.986[108981,109960] 233ns 100mb |L2.986| " - - "L2.987[109961,109999] 233ns 4mb |L2.987| " - - "L2.988[110000,110980] 233ns 100mb |L2.988| " - - "L2.989[110981,111960] 233ns 100mb |L2.989| " - - "L2.990[111961,111999] 233ns 4mb |L2.990| " - - "L2.991[112000,112980] 233ns 100mb |L2.991| " - - "L2.992[112981,113960] 233ns 100mb |L2.992| " - - "L2.993[113961,113999] 233ns 4mb |L2.993| " - - "L2.994[114000,114980] 233ns 100mb |L2.994| " - - "L2.995[114981,115960] 233ns 100mb |L2.995| " - - "L2.996[115961,115999] 233ns 4mb |L2.996| " - - "L2.997[116000,116980] 233ns 100mb |L2.997| " - - "L2.998[116981,117960] 233ns 100mb |L2.998| " - - "L2.999[117961,117999] 233ns 4mb |L2.999| " - - "L2.1000[118000,118980] 233ns 100mb |L2.1000| " - - "L2.1001[118981,119960] 233ns 100mb |L2.1001| " - - "L2.1002[119961,119999] 233ns 4mb |L2.1002| " - - "L2.1003[120000,120980] 233ns 100mb |L2.1003| " - - "L2.1004[120981,121960] 233ns 100mb |L2.1004| " - - "L2.1005[121961,121999] 233ns 4mb |L2.1005| " - - "L2.1006[122000,122980] 233ns 100mb |L2.1006| " - - "L2.1007[122981,123960] 233ns 100mb |L2.1007| " - - "L2.1008[123961,123999] 233ns 4mb |L2.1008| " - - "L2.1009[124000,124980] 233ns 100mb |L2.1009| " - - "L2.1010[124981,125960] 233ns 100mb |L2.1010| " - - "L2.1011[125961,125999] 233ns 4mb |L2.1011| " - - "L2.1012[126000,126980] 233ns 100mb |L2.1012| " - - "L2.1013[126981,127960] 233ns 100mb |L2.1013| " - - "L2.1014[127961,127999] 233ns 4mb |L2.1014| " - - "L2.1015[128000,128980] 233ns 100mb |L2.1015| " - - "L2.1016[128981,129960] 233ns 100mb |L2.1016| " - - "L2.1017[129961,129999] 233ns 4mb |L2.1017| " - - "L2.1018[130000,130980] 233ns 100mb |L2.1018| " - - "L2.1019[130981,131960] 233ns 100mb |L2.1019| " - - "L2.1020[131961,131999] 233ns 4mb |L2.1020| " - - "L2.1021[132000,132974] 292ns 100mb |L2.1021| " - - "L2.1022[132975,133948] 292ns 100mb |L2.1022| " - - "L2.1023[133949,133999] 292ns 5mb |L2.1023| " - - "L2.1024[134000,134962] 292ns 100mb |L2.1024| " - - "L2.1025[134963,135924] 292ns 100mb |L2.1025| " - - "L2.1026[135925,135999] 292ns 8mb |L2.1026| " - - "L2.1027[136000,136962] 292ns 100mb |L2.1027| " - - "L2.1028[136963,137924] 292ns 100mb |L2.1028| " - - "L2.1029[137925,137999] 292ns 8mb |L2.1029| " - - "L2.1030[138000,138962] 292ns 100mb |L2.1030| " - - "L2.1031[138963,139924] 292ns 100mb |L2.1031| " - - "L2.1032[139925,139999] 292ns 8mb |L2.1032| " - - "L2.1033[140000,140962] 292ns 100mb |L2.1033| " - - "L2.1034[140963,141924] 292ns 100mb |L2.1034| " - - "L2.1035[141925,141999] 292ns 8mb |L2.1035| " - - "L2.1036[142000,142962] 292ns 100mb |L2.1036| " - - "L2.1037[142963,143924] 292ns 100mb |L2.1037| " - - "L2.1038[143925,143999] 292ns 8mb |L2.1038| " - - "L2.1039[144000,144962] 292ns 100mb |L2.1039| " - - "L2.1040[144963,145924] 292ns 100mb |L2.1040| " - - "L2.1041[145925,145999] 292ns 8mb |L2.1041| " - - "L2.1042[146000,146962] 292ns 100mb |L2.1042| " - - "L2.1043[146963,147924] 292ns 100mb |L2.1043| " - - "L2.1044[147925,147999] 292ns 8mb |L2.1044| " - - "L2.1045[148000,148962] 292ns 100mb |L2.1045| " - - "L2.1046[148963,149924] 292ns 100mb |L2.1046| " - - "L2.1047[149925,149999] 292ns 8mb |L2.1047| " - - "L2.1048[150000,150962] 292ns 100mb |L2.1048| " - - "L2.1049[150963,151924] 292ns 100mb |L2.1049| " - - "L2.1050[151925,151999] 292ns 8mb |L2.1050| " - - "L2.1051[152000,152962] 292ns 100mb |L2.1051| " - - "L2.1052[152963,153924] 292ns 100mb |L2.1052| " - - "L2.1053[153925,153999] 292ns 8mb |L2.1053| " - - "L2.1054[154000,154962] 292ns 100mb |L2.1054| " - - "L2.1055[154963,155924] 292ns 100mb |L2.1055| " - - "L2.1056[155925,155999] 292ns 8mb |L2.1056| " - - "L2.1057[156000,156962] 292ns 100mb |L2.1057| " - - "L2.1058[156963,157924] 292ns 100mb |L2.1058| " - - "L2.1059[157925,157999] 292ns 8mb |L2.1059| " - - "L2.1060[158000,158962] 292ns 100mb |L2.1060| " - - "L2.1061[158963,159924] 292ns 100mb |L2.1061| " - - "L2.1062[159925,159999] 292ns 8mb |L2.1062| " - - "L2.1063[160000,160962] 292ns 100mb |L2.1063| " - - "L2.1064[160963,161924] 292ns 100mb |L2.1064| " - - "L2.1065[161925,161999] 292ns 8mb |L2.1065| " - - "L2.1066[162000,162962] 292ns 100mb |L2.1066| " - - "L2.1067[162963,163924] 292ns 100mb |L2.1067| " - - "L2.1068[163925,163999] 292ns 8mb |L2.1068| " - - "L2.1069[164000,164962] 292ns 100mb |L2.1069| " - - "L2.1070[164963,165924] 292ns 100mb |L2.1070| " - - "L2.1071[165925,165999] 292ns 8mb |L2.1071| " - - "L2.1072[166000,166962] 292ns 100mb |L2.1072| " - - "L2.1073[166963,167924] 292ns 100mb |L2.1073| " - - "L2.1074[167925,167999] 292ns 8mb |L2.1074| " - - "L2.1075[168000,168962] 292ns 100mb |L2.1075| " - - "L2.1076[168963,169924] 292ns 100mb |L2.1076| " - - "L2.1077[169925,169999] 292ns 8mb |L2.1077| " - - "L2.1078[170000,170962] 292ns 100mb |L2.1078| " - - "L2.1079[170963,171924] 292ns 100mb |L2.1079| " - - "L2.1080[171925,171999] 292ns 8mb |L2.1080| " - - "L2.1081[172000,172962] 292ns 100mb |L2.1081| " - - "L2.1082[172963,173924] 292ns 100mb |L2.1082| " - - "L2.1083[173925,173999] 292ns 8mb |L2.1083| " - - "L2.1084[174000,174962] 292ns 100mb |L2.1084| " - - "L2.1085[174963,175924] 292ns 100mb |L2.1085| " - - "L2.1086[175925,175999] 292ns 8mb |L2.1086| " - - "L2.1087[176000,176962] 292ns 100mb |L2.1087| " - - "L2.1088[176963,177924] 292ns 100mb |L2.1088| " - - "L2.1089[177925,177999] 292ns 8mb |L2.1089| " - - "L2.1090[178000,178962] 292ns 100mb |L2.1090| " - - "L2.1091[178963,179924] 292ns 100mb |L2.1091| " - - "L2.1092[179925,179999] 292ns 8mb |L2.1092| " - - "L2.1093[180000,180962] 292ns 100mb |L2.1093|" - - "L2.1094[180963,181924] 292ns 100mb |L2.1094|" - - "L2.1095[181925,181999] 292ns 8mb |L2.1095|" - - "L2.1096[182000,183599] 292ns 86mb |L2.1096|" - - "L2.1097[183600,183999] 292ns 22mb |L2.1097|" - - "L2.1098[184000,184980] 292ns 100mb |L2.1098|" - - "L2.1099[184981,185960] 292ns 100mb |L2.1099|" - - "L2.1100[185961,185999] 292ns 4mb |L2.1100|" - - "L2.1101[186000,186980] 292ns 100mb |L2.1101|" - - "L2.1102[186981,187960] 292ns 100mb |L2.1102|" - - "L2.1103[187961,187999] 292ns 4mb |L2.1103|" - - "L2.1104[188000,188980] 292ns 100mb |L2.1104|" - - "L2.1105[188981,189960] 292ns 100mb |L2.1105|" - - "L2.1106[189961,189999] 292ns 4mb |L2.1106|" - - "L2.1107[190000,190980] 292ns 100mb |L2.1107|" - - "L2.1108[190981,191960] 292ns 100mb |L2.1108|" - - "L2.1109[191961,191999] 292ns 4mb |L2.1109|" - - "L2.1110[192000,192980] 295ns 100mb |L2.1110|" - - "L2.1111[192981,193960] 295ns 100mb |L2.1111|" - - "L2.1112[193961,193999] 295ns 4mb |L2.1112|" - - "L2.1113[194000,194980] 295ns 100mb |L2.1113|" - - "L2.1114[194981,195960] 295ns 100mb |L2.1114|" - - "L2.1115[195961,195999] 295ns 4mb |L2.1115|" - - "L2.1116[196000,196981] 299ns 100mb |L2.1116|" - - "L2.1117[196982,197962] 299ns 100mb |L2.1117|" - - "L2.1118[197963,197999] 299ns 4mb |L2.1118|" - - "L2.1119[198000,198981] 299ns 100mb |L2.1119|" - - "L2.1120[198982,199962] 299ns 100mb |L2.1120|" - - "L2.1121[199963,200000] 299ns 4mb |L2.1121|" + - "L2.774[0,980] 119ns 100mb|L2.774| " + - "L2.775[981,1960] 119ns 100mb|L2.775| " + - "L2.776[1961,1999] 119ns 4mb|L2.776| " + - "L2.777[2000,2980] 119ns 100mb|L2.777| " + - "L2.778[2981,3960] 119ns 100mb |L2.778| " + - "L2.779[3961,3999] 119ns 4mb |L2.779| " + - "L2.780[4000,4980] 119ns 100mb |L2.780| " + - "L2.781[4981,5960] 119ns 100mb |L2.781| " + - "L2.782[5961,5999] 119ns 4mb |L2.782| " + - "L2.783[6000,6980] 119ns 100mb |L2.783| " + - "L2.784[6981,7960] 119ns 100mb |L2.784| " + - "L2.785[7961,7999] 119ns 4mb |L2.785| " + - "L2.786[8000,8980] 119ns 100mb |L2.786| " + - "L2.787[8981,9960] 119ns 100mb |L2.787| " + - "L2.788[9961,9999] 119ns 4mb |L2.788| " + - "L2.789[10000,10980] 119ns 100mb |L2.789| " + - "L2.790[10981,11960] 119ns 100mb |L2.790| " + - "L2.791[11961,11999] 119ns 4mb |L2.791| " + - "L2.792[12000,12980] 119ns 100mb |L2.792| " + - "L2.793[12981,13960] 119ns 100mb |L2.793| " + - "L2.794[13961,13999] 119ns 4mb |L2.794| " + - "L2.795[14000,14980] 119ns 100mb |L2.795| " + - "L2.796[14981,15960] 119ns 100mb |L2.796| " + - "L2.797[15961,15999] 119ns 4mb |L2.797| " + - "L2.798[16000,16980] 119ns 100mb |L2.798| " + - "L2.799[16981,17960] 119ns 100mb |L2.799| " + - "L2.800[17961,17999] 119ns 4mb |L2.800| " + - "L2.801[18000,18980] 119ns 100mb |L2.801| " + - "L2.802[18981,19960] 119ns 100mb |L2.802| " + - "L2.803[19961,19999] 119ns 4mb |L2.803| " + - "L2.804[20000,20980] 133ns 100mb |L2.804| " + - "L2.805[20981,21960] 133ns 100mb |L2.805| " + - "L2.806[21961,21999] 133ns 4mb |L2.806| " + - "L2.807[22000,22980] 133ns 100mb |L2.807| " + - "L2.808[22981,23960] 133ns 100mb |L2.808| " + - "L2.809[23961,23999] 133ns 4mb |L2.809| " + - "L2.810[24000,24980] 133ns 100mb |L2.810| " + - "L2.811[24981,25960] 133ns 100mb |L2.811| " + - "L2.812[25961,25999] 133ns 4mb |L2.812| " + - "L2.813[26000,26980] 133ns 100mb |L2.813| " + - "L2.814[26981,27960] 133ns 100mb |L2.814| " + - "L2.815[27961,27999] 133ns 4mb |L2.815| " + - "L2.816[28000,28980] 133ns 100mb |L2.816| " + - "L2.817[28981,29960] 133ns 100mb |L2.817| " + - "L2.818[29961,29999] 133ns 4mb |L2.818| " + - "L2.819[30000,30980] 133ns 100mb |L2.819| " + - "L2.820[30981,31960] 133ns 100mb |L2.820| " + - "L2.821[31961,31999] 133ns 4mb |L2.821| " + - "L2.822[32000,32980] 133ns 100mb |L2.822| " + - "L2.823[32981,33960] 133ns 100mb |L2.823| " + - "L2.824[33961,33999] 133ns 4mb |L2.824| " + - "L2.825[34000,34980] 152ns 100mb |L2.825| " + - "L2.826[34981,35960] 152ns 100mb |L2.826| " + - "L2.827[35961,35999] 152ns 4mb |L2.827| " + - "L2.828[36000,36980] 152ns 100mb |L2.828| " + - "L2.829[36981,37960] 152ns 100mb |L2.829| " + - "L2.830[37961,37999] 152ns 4mb |L2.830| " + - "L2.831[38000,38980] 152ns 100mb |L2.831| " + - "L2.832[38981,39960] 152ns 100mb |L2.832| " + - "L2.833[39961,39999] 152ns 4mb |L2.833| " + - "L2.834[40000,40980] 152ns 100mb |L2.834| " + - "L2.835[40981,41960] 152ns 100mb |L2.835| " + - "L2.836[41961,41999] 152ns 4mb |L2.836| " + - "L2.837[42000,42980] 152ns 100mb |L2.837| " + - "L2.838[42981,43960] 152ns 100mb |L2.838| " + - "L2.839[43961,43999] 152ns 4mb |L2.839| " + - "L2.840[44000,44980] 152ns 100mb |L2.840| " + - "L2.841[44981,45960] 152ns 100mb |L2.841| " + - "L2.842[45961,45999] 152ns 4mb |L2.842| " + - "L2.843[46000,46980] 152ns 100mb |L2.843| " + - "L2.844[46981,47960] 152ns 100mb |L2.844| " + - "L2.845[47961,47999] 152ns 4mb |L2.845| " + - "L2.846[48000,48980] 152ns 100mb |L2.846| " + - "L2.847[48981,49960] 152ns 100mb |L2.847| " + - "L2.848[49961,49999] 152ns 4mb |L2.848| " + - "L2.849[50000,50980] 167ns 100mb |L2.849| " + - "L2.850[50981,51960] 167ns 100mb |L2.850| " + - "L2.851[51961,51999] 167ns 4mb |L2.851| " + - "L2.852[52000,52980] 167ns 100mb |L2.852| " + - "L2.853[52981,53960] 167ns 100mb |L2.853| " + - "L2.854[53961,53999] 167ns 4mb |L2.854| " + - "L2.855[54000,54980] 167ns 100mb |L2.855| " + - "L2.856[54981,55960] 167ns 100mb |L2.856| " + - "L2.857[55961,55999] 167ns 4mb |L2.857| " + - "L2.858[56000,56980] 167ns 100mb |L2.858| " + - "L2.859[56981,57960] 167ns 100mb |L2.859| " + - "L2.860[57961,57999] 167ns 4mb |L2.860| " + - "L2.861[58000,58980] 167ns 100mb |L2.861| " + - "L2.862[58981,59960] 167ns 100mb |L2.862| " + - "L2.863[59961,59999] 167ns 4mb |L2.863| " + - "L2.864[60000,60980] 167ns 100mb |L2.864| " + - "L2.865[60981,61960] 167ns 100mb |L2.865| " + - "L2.866[61961,61999] 167ns 4mb |L2.866| " + - "L2.867[62000,62980] 167ns 100mb |L2.867| " + - "L2.868[62981,63960] 167ns 100mb |L2.868| " + - "L2.869[63961,63999] 167ns 4mb |L2.869| " + - "L2.870[64000,64980] 167ns 100mb |L2.870| " + - "L2.871[64981,65960] 167ns 100mb |L2.871| " + - "L2.872[65961,65999] 167ns 4mb |L2.872| " + - "L2.873[66000,66980] 167ns 100mb |L2.873| " + - "L2.874[66981,67960] 167ns 100mb |L2.874| " + - "L2.875[67961,67999] 167ns 4mb |L2.875| " + - "L2.876[68000,68980] 186ns 100mb |L2.876| " + - "L2.877[68981,69960] 186ns 100mb |L2.877| " + - "L2.878[69961,69999] 186ns 4mb |L2.878| " + - "L2.879[70000,70980] 186ns 100mb |L2.879| " + - "L2.880[70981,71960] 186ns 100mb |L2.880| " + - "L2.881[71961,71999] 186ns 4mb |L2.881| " + - "L2.882[72000,72980] 186ns 100mb |L2.882| " + - "L2.883[72981,73960] 186ns 100mb |L2.883| " + - "L2.884[73961,73999] 186ns 4mb |L2.884| " + - "L2.885[74000,74980] 186ns 100mb |L2.885| " + - "L2.886[74981,75960] 186ns 100mb |L2.886| " + - "L2.887[75961,75999] 186ns 4mb |L2.887| " + - "L2.888[76000,76980] 186ns 100mb |L2.888| " + - "L2.889[76981,77960] 186ns 100mb |L2.889| " + - "L2.890[77961,77999] 186ns 4mb |L2.890| " + - "L2.891[78000,78980] 186ns 100mb |L2.891| " + - "L2.892[78981,79960] 186ns 100mb |L2.892| " + - "L2.893[79961,79999] 186ns 4mb |L2.893| " + - "L2.894[80000,80980] 186ns 100mb |L2.894| " + - "L2.895[80981,81960] 186ns 100mb |L2.895| " + - "L2.896[81961,81999] 186ns 4mb |L2.896| " + - "L2.897[82000,82980] 186ns 100mb |L2.897| " + - "L2.898[82981,83960] 186ns 100mb |L2.898| " + - "L2.899[83961,83999] 186ns 4mb |L2.899| " + - "L2.900[84000,84980] 201ns 100mb |L2.900| " + - "L2.901[84981,85960] 201ns 100mb |L2.901| " + - "L2.902[85961,85999] 201ns 4mb |L2.902| " + - "L2.903[86000,86980] 201ns 100mb |L2.903| " + - "L2.904[86981,87960] 201ns 100mb |L2.904| " + - "L2.905[87961,87999] 201ns 4mb |L2.905| " + - "L2.906[88000,88980] 201ns 100mb |L2.906| " + - "L2.907[88981,89960] 201ns 100mb |L2.907| " + - "L2.908[89961,89999] 201ns 4mb |L2.908| " + - "L2.909[90000,90980] 201ns 100mb |L2.909| " + - "L2.910[90981,91960] 201ns 100mb |L2.910| " + - "L2.911[91961,91999] 201ns 4mb |L2.911| " + - "L2.912[92000,92980] 201ns 100mb |L2.912| " + - "L2.913[92981,93960] 201ns 100mb |L2.913| " + - "L2.914[93961,93999] 201ns 4mb |L2.914| " + - "L2.915[94000,94980] 201ns 100mb |L2.915| " + - "L2.916[94981,95960] 201ns 100mb |L2.916| " + - "L2.917[95961,95999] 201ns 4mb |L2.917| " + - "L2.918[96000,96980] 201ns 100mb |L2.918| " + - "L2.919[96981,97960] 201ns 100mb |L2.919| " + - "L2.920[97961,97999] 201ns 4mb |L2.920| " + - "L2.921[98000,98980] 201ns 100mb |L2.921| " + - "L2.922[98981,99960] 201ns 100mb |L2.922| " + - "L2.923[99961,99999] 201ns 4mb |L2.923| " + - "L2.924[100000,100980] 201ns 100mb |L2.924| " + - "L2.925[100981,101960] 201ns 100mb |L2.925| " + - "L2.926[101961,101999] 201ns 4mb |L2.926| " + - "L2.927[102000,102980] 220ns 100mb |L2.927| " + - "L2.928[102981,103960] 220ns 100mb |L2.928| " + - "L2.929[103961,103999] 220ns 4mb |L2.929| " + - "L2.930[104000,104980] 220ns 100mb |L2.930| " + - "L2.931[104981,105960] 220ns 100mb |L2.931| " + - "L2.932[105961,105999] 220ns 4mb |L2.932| " + - "L2.933[106000,106980] 220ns 100mb |L2.933| " + - "L2.934[106981,107960] 220ns 100mb |L2.934| " + - "L2.935[107961,107999] 220ns 4mb |L2.935| " + - "L2.936[108000,108980] 220ns 100mb |L2.936| " + - "L2.937[108981,109960] 220ns 100mb |L2.937| " + - "L2.938[109961,109999] 220ns 4mb |L2.938| " + - "L2.939[110000,110980] 220ns 100mb |L2.939| " + - "L2.940[110981,111960] 220ns 100mb |L2.940| " + - "L2.941[111961,111999] 220ns 4mb |L2.941| " + - "L2.942[112000,112980] 220ns 100mb |L2.942| " + - "L2.943[112981,113960] 220ns 100mb |L2.943| " + - "L2.944[113961,113999] 220ns 4mb |L2.944| " + - "L2.945[114000,114980] 220ns 100mb |L2.945| " + - "L2.946[114981,115960] 220ns 100mb |L2.946| " + - "L2.947[115961,115999] 220ns 4mb |L2.947| " + - "L2.948[116000,116980] 220ns 100mb |L2.948| " + - "L2.949[116981,117960] 220ns 100mb |L2.949| " + - "L2.950[117961,117999] 220ns 4mb |L2.950| " + - "L2.951[118000,118980] 240ns 100mb |L2.951| " + - "L2.952[118981,119960] 240ns 100mb |L2.952| " + - "L2.953[119961,119999] 240ns 4mb |L2.953| " + - "L2.954[120000,120980] 240ns 100mb |L2.954| " + - "L2.955[120981,121960] 240ns 100mb |L2.955| " + - "L2.956[121961,121999] 240ns 4mb |L2.956| " + - "L2.957[122000,122980] 240ns 100mb |L2.957| " + - "L2.958[122981,123960] 240ns 100mb |L2.958| " + - "L2.959[123961,123999] 240ns 4mb |L2.959| " + - "L2.960[124000,124980] 240ns 100mb |L2.960| " + - "L2.961[124981,125960] 240ns 100mb |L2.961| " + - "L2.962[125961,125999] 240ns 4mb |L2.962| " + - "L2.963[126000,126980] 240ns 100mb |L2.963| " + - "L2.964[126981,127960] 240ns 100mb |L2.964| " + - "L2.965[127961,127999] 240ns 4mb |L2.965| " + - "L2.966[128000,128980] 240ns 100mb |L2.966| " + - "L2.967[128981,129960] 240ns 100mb |L2.967| " + - "L2.968[129961,129999] 240ns 4mb |L2.968| " + - "L2.969[130000,130980] 240ns 100mb |L2.969| " + - "L2.970[130981,131960] 240ns 100mb |L2.970| " + - "L2.971[131961,131999] 240ns 4mb |L2.971| " + - "L2.972[132000,132980] 240ns 100mb |L2.972| " + - "L2.973[132981,133960] 240ns 100mb |L2.973| " + - "L2.974[133961,133999] 240ns 4mb |L2.974| " + - "L2.975[134000,134980] 240ns 100mb |L2.975| " + - "L2.976[134981,135960] 240ns 100mb |L2.976| " + - "L2.977[135961,135999] 240ns 4mb |L2.977| " + - "L2.978[136000,136980] 240ns 100mb |L2.978| " + - "L2.979[136981,137960] 240ns 100mb |L2.979| " + - "L2.980[137961,137999] 240ns 4mb |L2.980| " + - "L2.981[138000,138980] 260ns 100mb |L2.981| " + - "L2.982[138981,139960] 260ns 100mb |L2.982| " + - "L2.983[139961,139999] 260ns 4mb |L2.983| " + - "L2.984[140000,140980] 260ns 100mb |L2.984| " + - "L2.985[140981,141960] 260ns 100mb |L2.985| " + - "L2.986[141961,141999] 260ns 4mb |L2.986| " + - "L2.987[142000,142980] 260ns 100mb |L2.987| " + - "L2.988[142981,143960] 260ns 100mb |L2.988| " + - "L2.989[143961,143999] 260ns 4mb |L2.989| " + - "L2.990[144000,144980] 260ns 100mb |L2.990| " + - "L2.991[144981,145960] 260ns 100mb |L2.991| " + - "L2.992[145961,145999] 260ns 4mb |L2.992| " + - "L2.993[146000,146980] 260ns 100mb |L2.993| " + - "L2.994[146981,147960] 260ns 100mb |L2.994| " + - "L2.995[147961,147999] 260ns 4mb |L2.995| " + - "L2.996[148000,148980] 260ns 100mb |L2.996| " + - "L2.997[148981,149960] 260ns 100mb |L2.997| " + - "L2.998[149961,149999] 260ns 4mb |L2.998| " + - "L2.999[150000,150980] 260ns 100mb |L2.999| " + - "L2.1000[150981,151960] 260ns 100mb |L2.1000| " + - "L2.1001[151961,151999] 260ns 4mb |L2.1001| " + - "L2.1002[152000,152980] 260ns 100mb |L2.1002| " + - "L2.1003[152981,153960] 260ns 100mb |L2.1003| " + - "L2.1004[153961,153999] 260ns 4mb |L2.1004| " + - "L2.1005[154000,154980] 260ns 100mb |L2.1005| " + - "L2.1006[154981,155960] 260ns 100mb |L2.1006| " + - "L2.1007[155961,155999] 260ns 4mb |L2.1007| " + - "L2.1008[156000,156980] 260ns 100mb |L2.1008| " + - "L2.1009[156981,157960] 260ns 100mb |L2.1009| " + - "L2.1010[157961,157999] 260ns 4mb |L2.1010| " + - "L2.1011[158000,158980] 280ns 100mb |L2.1011| " + - "L2.1012[158981,159960] 280ns 100mb |L2.1012| " + - "L2.1013[159961,159999] 280ns 4mb |L2.1013| " + - "L2.1014[160000,160980] 280ns 100mb |L2.1014| " + - "L2.1015[160981,161960] 280ns 100mb |L2.1015| " + - "L2.1016[161961,161999] 280ns 4mb |L2.1016| " + - "L2.1017[162000,162980] 280ns 100mb |L2.1017| " + - "L2.1018[162981,163960] 280ns 100mb |L2.1018| " + - "L2.1019[163961,163999] 280ns 4mb |L2.1019| " + - "L2.1020[164000,164980] 280ns 100mb |L2.1020| " + - "L2.1021[164981,165960] 280ns 100mb |L2.1021| " + - "L2.1022[165961,165999] 280ns 4mb |L2.1022| " + - "L2.1023[166000,166980] 280ns 100mb |L2.1023| " + - "L2.1024[166981,167960] 280ns 100mb |L2.1024| " + - "L2.1025[167961,167999] 280ns 4mb |L2.1025| " + - "L2.1026[168000,168980] 280ns 100mb |L2.1026| " + - "L2.1027[168981,169960] 280ns 100mb |L2.1027| " + - "L2.1028[169961,169999] 280ns 4mb |L2.1028| " + - "L2.1029[170000,170980] 280ns 100mb |L2.1029| " + - "L2.1030[170981,171960] 280ns 100mb |L2.1030| " + - "L2.1031[171961,171999] 280ns 4mb |L2.1031| " + - "L2.1032[172000,172980] 280ns 100mb |L2.1032| " + - "L2.1033[172981,173960] 280ns 100mb |L2.1033| " + - "L2.1034[173961,173999] 280ns 4mb |L2.1034| " + - "L2.1035[174000,174980] 280ns 100mb |L2.1035| " + - "L2.1036[174981,175960] 280ns 100mb |L2.1036| " + - "L2.1037[175961,175999] 280ns 4mb |L2.1037| " + - "L2.1038[176000,176980] 280ns 100mb |L2.1038| " + - "L2.1039[176981,177960] 280ns 100mb |L2.1039| " + - "L2.1040[177961,177999] 280ns 4mb |L2.1040| " + - "L2.1041[178000,178980] 299ns 100mb |L2.1041| " + - "L2.1042[178981,179960] 299ns 100mb |L2.1042| " + - "L2.1043[179961,179999] 299ns 4mb |L2.1043| " + - "L2.1044[180000,180980] 299ns 100mb |L2.1044|" + - "L2.1045[180981,181960] 299ns 100mb |L2.1045|" + - "L2.1046[181961,181999] 299ns 4mb |L2.1046|" + - "L2.1047[182000,182980] 299ns 100mb |L2.1047|" + - "L2.1048[182981,183960] 299ns 100mb |L2.1048|" + - "L2.1049[183961,183999] 299ns 4mb |L2.1049|" + - "L2.1050[184000,184980] 299ns 100mb |L2.1050|" + - "L2.1051[184981,185960] 299ns 100mb |L2.1051|" + - "L2.1052[185961,185999] 299ns 4mb |L2.1052|" + - "L2.1053[186000,186980] 299ns 100mb |L2.1053|" + - "L2.1054[186981,187960] 299ns 100mb |L2.1054|" + - "L2.1055[187961,187999] 299ns 4mb |L2.1055|" + - "L2.1056[188000,188980] 299ns 100mb |L2.1056|" + - "L2.1057[188981,189960] 299ns 100mb |L2.1057|" + - "L2.1058[189961,189999] 299ns 4mb |L2.1058|" + - "L2.1059[190000,190980] 299ns 100mb |L2.1059|" + - "L2.1060[190981,191960] 299ns 100mb |L2.1060|" + - "L2.1061[191961,191999] 299ns 4mb |L2.1061|" + - "L2.1062[192000,192980] 299ns 100mb |L2.1062|" + - "L2.1063[192981,193960] 299ns 100mb |L2.1063|" + - "L2.1064[193961,193999] 299ns 4mb |L2.1064|" + - "L2.1065[194000,194980] 299ns 100mb |L2.1065|" + - "L2.1066[194981,195960] 299ns 100mb |L2.1066|" + - "L2.1067[195961,195999] 299ns 4mb |L2.1067|" + - "L2.1068[196000,196980] 299ns 100mb |L2.1068|" + - "L2.1069[196981,197960] 299ns 100mb |L2.1069|" + - "L2.1070[197961,197999] 299ns 4mb |L2.1070|" + - "L2.1071[198000,198981] 299ns 100mb |L2.1071|" + - "L2.1072[198982,199962] 299ns 100mb |L2.1072|" + - "L2.1073[199963,200000] 299ns 4mb |L2.1073|" - "**** Breakdown of where bytes were written" - - 358mb written by compact(ManySmallFiles) - - 38.3gb written by split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize)) - - 4mb written by split(VerticalSplit) - - 679mb written by split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize)) - - 913mb written by split(ReduceOverlap) + - 29.47gb written by split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize)) + - 400mb written by compact(ManySmallFiles) + - 5.08gb written by split(ReduceOverlap) + - 620mb written by split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize)) + - 6mb written by split(VerticalSplit) "### ); }