Commit Hash
stringlengths
40
40
Author
stringclasses
38 values
Date
stringlengths
19
19
Description
stringlengths
8
113
Body
stringlengths
10
22.2k
Footers
stringclasses
56 values
Commit Message
stringlengths
28
22.3k
Git Diff
stringlengths
140
3.61M
64c9d87b9b600e1624ecf21227c512b5b00f9e73
Dom Dwyer
2022-11-21 14:57:37
move DmlSink
Extracts the DmlSink trait into its own module - it is independent of the Kafka handler and will be reused.
null
refactor: move DmlSink Extracts the DmlSink trait into its own module - it is independent of the Kafka handler and will be reused.
diff --git a/ingester/src/stream_handler/mock_sink.rs b/ingester/src/dml_sink/mock_sink.rs similarity index 100% rename from ingester/src/stream_handler/mock_sink.rs rename to ingester/src/dml_sink/mock_sink.rs diff --git a/ingester/src/dml_sink/mod.rs b/ingester/src/dml_sink/mod.rs new file mode 100644 index 0000000000..7aed0b7a52 --- /dev/null +++ b/ingester/src/dml_sink/mod.rs @@ -0,0 +1,5 @@ +mod r#trait; +pub(crate) use r#trait::*; + +#[cfg(test)] +pub(crate) mod mock_sink; diff --git a/ingester/src/stream_handler/sink.rs b/ingester/src/dml_sink/trait.rs similarity index 100% rename from ingester/src/stream_handler/sink.rs rename to ingester/src/dml_sink/trait.rs diff --git a/ingester/src/lib.rs b/ingester/src/lib.rs index 30e3fbc005..9dd8ae0111 100644 --- a/ingester/src/lib.rs +++ b/ingester/src/lib.rs @@ -20,6 +20,8 @@ mod arcmap; pub(crate) mod compact; pub mod data; +mod deferred_load; +mod dml_sink; pub mod handler; mod job; pub mod lifecycle; @@ -28,7 +30,5 @@ pub mod querier_handler; pub(crate) mod query_adaptor; pub mod server; pub(crate) mod stream_handler; - -mod deferred_load; #[cfg(test)] pub(crate) mod test_util; diff --git a/ingester/src/server/grpc/rpc_write.rs b/ingester/src/server/grpc/rpc_write.rs index 5d6f60b3b9..26956d6464 100644 --- a/ingester/src/server/grpc/rpc_write.rs +++ b/ingester/src/server/grpc/rpc_write.rs @@ -9,7 +9,7 @@ use observability_deps::tracing::*; use thiserror::Error; use tonic::{Request, Response}; -use crate::{data::DmlApplyAction, stream_handler::DmlSink}; +use crate::{data::DmlApplyAction, dml_sink::DmlSink}; // A list of error states when handling an RPC write request. // @@ -177,7 +177,7 @@ mod tests { Column, DatabaseBatch, TableBatch, }; - use crate::stream_handler::mock_sink::MockDmlSink; + use crate::dml_sink::mock_sink::MockDmlSink; use super::*; diff --git a/ingester/src/stream_handler/handler.rs b/ingester/src/stream_handler/handler.rs index 94489d34e4..ed46e42a78 100644 --- a/ingester/src/stream_handler/handler.rs +++ b/ingester/src/stream_handler/handler.rs @@ -1,8 +1,8 @@ //! A handler of streamed ops from a write buffer. -use super::DmlSink; use crate::{ data::DmlApplyAction, + dml_sink::DmlSink, lifecycle::{LifecycleHandle, LifecycleHandleImpl}, }; use data_types::{SequenceNumber, ShardId, ShardIndex}; @@ -510,8 +510,8 @@ fn metric_attrs( mod tests { use super::*; use crate::{ + dml_sink::mock_sink::MockDmlSink, lifecycle::{LifecycleConfig, LifecycleManager}, - stream_handler::mock_sink::MockDmlSink, }; use assert_matches::assert_matches; use async_trait::async_trait; diff --git a/ingester/src/stream_handler/mod.rs b/ingester/src/stream_handler/mod.rs index 5e9a351fe4..1a7f828c39 100644 --- a/ingester/src/stream_handler/mod.rs +++ b/ingester/src/stream_handler/mod.rs @@ -16,17 +16,13 @@ //! [`WriteBufferReading`]: write_buffer::core::WriteBufferReading //! [`LifecycleManager`]: crate::lifecycle::LifecycleManager //! [`LifecycleHandle::can_resume_ingest()`]: crate::lifecycle::LifecycleHandle::can_resume_ingest() +//! [`DmlSink`]: crate::dml_sink::DmlSink pub(crate) mod handler; mod periodic_watermark_fetcher; -mod sink; -#[cfg(test)] -pub mod mock_sink; #[cfg(test)] pub mod mock_watermark_fetcher; pub(crate) mod sink_adaptor; pub(crate) mod sink_instrumentation; - pub(crate) use periodic_watermark_fetcher::*; -pub(crate) use sink::*; diff --git a/ingester/src/stream_handler/sink_adaptor.rs b/ingester/src/stream_handler/sink_adaptor.rs index 3780f3da5b..30490ccf75 100644 --- a/ingester/src/stream_handler/sink_adaptor.rs +++ b/ingester/src/stream_handler/sink_adaptor.rs @@ -6,9 +6,9 @@ use async_trait::async_trait; use data_types::ShardId; use dml::DmlOperation; -use super::DmlSink; use crate::{ data::{DmlApplyAction, IngesterData}, + dml_sink::DmlSink, lifecycle::LifecycleHandleImpl, }; diff --git a/ingester/src/stream_handler/sink_instrumentation.rs b/ingester/src/stream_handler/sink_instrumentation.rs index ab74edd895..b7548bf583 100644 --- a/ingester/src/stream_handler/sink_instrumentation.rs +++ b/ingester/src/stream_handler/sink_instrumentation.rs @@ -9,9 +9,7 @@ use iox_time::{SystemProvider, TimeProvider}; use metric::{Attributes, DurationHistogram, U64Counter, U64Gauge}; use trace::span::{SpanExt, SpanRecorder}; -use crate::data::DmlApplyAction; - -use super::DmlSink; +use crate::{data::DmlApplyAction, dml_sink::DmlSink}; /// A [`WatermarkFetcher`] abstracts a source of the write buffer high watermark /// (max known offset). @@ -237,8 +235,9 @@ where #[cfg(test)] mod tests { use super::*; - use crate::stream_handler::{ - mock_sink::MockDmlSink, mock_watermark_fetcher::MockWatermarkFetcher, + use crate::{ + dml_sink::mock_sink::MockDmlSink, + stream_handler::mock_watermark_fetcher::MockWatermarkFetcher, }; use assert_matches::assert_matches; use data_types::{NamespaceId, Sequence, SequenceNumber, ShardId, TableId};
d6cbae16ac83df3164d82dd43371f661c6617dff
Marco Neumann
2022-11-01 06:39:26
update rskafka (#5998)
Includes additional logging to debug https://github.com/influxdata/idpe/issues/16278
null
chore: update rskafka (#5998) Includes additional logging to debug https://github.com/influxdata/idpe/issues/16278
diff --git a/Cargo.lock b/Cargo.lock index 7105e47d62..9493cd0b3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4271,7 +4271,7 @@ dependencies = [ [[package]] name = "rskafka" version = "0.3.0" -source = "git+https://github.com/influxdata/rskafka.git?rev=8c98c56b5d4b06206ce40e21404a75e6bb7bf7af#8c98c56b5d4b06206ce40e21404a75e6bb7bf7af" +source = "git+https://github.com/influxdata/rskafka.git?rev=4f05f3b42d144f75ddb684a10c746f8afddbf338#4f05f3b42d144f75ddb684a10c746f8afddbf338" dependencies = [ "async-socks5", "async-trait", diff --git a/write_buffer/Cargo.toml b/write_buffer/Cargo.toml index 213375dc1a..645cd10386 100644 --- a/write_buffer/Cargo.toml +++ b/write_buffer/Cargo.toml @@ -24,7 +24,7 @@ observability_deps = { path = "../observability_deps" } parking_lot = "0.12" pin-project = "1.0" prost = "0.11" -rskafka = { git = "https://github.com/influxdata/rskafka.git", rev="8c98c56b5d4b06206ce40e21404a75e6bb7bf7af", default-features = false, features = ["compression-snappy", "transport-socks5"] } +rskafka = { git = "https://github.com/influxdata/rskafka.git", rev="4f05f3b42d144f75ddb684a10c746f8afddbf338", default-features = false, features = ["compression-snappy", "transport-socks5"] } schema = { path = "../schema" } tokio = { version = "1.21", features = ["fs", "macros", "parking_lot", "rt", "sync", "time"] } tokio-util = "0.7.4"
9bf8840a63707bb8a621e69eea1c45cc6026e521
Jeffrey Smith II
2022-11-21 10:39:30
update me and users routes to match cloud/documentation (#23837)
* fix: update me and users routes to match cloud/documentation * fix: handle errors in user routes properly
null
fix: update me and users routes to match cloud/documentation (#23837) * fix: update me and users routes to match cloud/documentation * fix: handle errors in user routes properly
diff --git a/cmd/influxd/launcher/launcher.go b/cmd/influxd/launcher/launcher.go index e03e514144..0f965c1885 100644 --- a/cmd/influxd/launcher/launcher.go +++ b/cmd/influxd/launcher/launcher.go @@ -787,6 +787,7 @@ func (m *Launcher) run(ctx context.Context, opts *InfluxdOpts) (err error) { } userHTTPServer := ts.NewUserHTTPHandler(m.log) + meHTTPServer := ts.NewMeHTTPHandler(m.log) onboardHTTPServer := tenant.NewHTTPOnboardHandler(m.log, onboardSvc) // feature flagging for new labels service @@ -897,8 +898,8 @@ func (m *Launcher) run(ctx context.Context, opts *InfluxdOpts) (err error) { http.WithResourceHandler(labelHandler), http.WithResourceHandler(sessionHTTPServer.SignInResourceHandler()), http.WithResourceHandler(sessionHTTPServer.SignOutResourceHandler()), - http.WithResourceHandler(userHTTPServer.MeResourceHandler()), - http.WithResourceHandler(userHTTPServer.UserResourceHandler()), + http.WithResourceHandler(userHTTPServer), + http.WithResourceHandler(meHTTPServer), http.WithResourceHandler(orgHTTPServer), http.WithResourceHandler(bucketHTTPServer), http.WithResourceHandler(v1AuthHTTPServer), diff --git a/tenant/http_server_user.go b/tenant/http_server_user.go index 5e124f8c8f..ca45e3b096 100644 --- a/tenant/http_server_user.go +++ b/tenant/http_server_user.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "net/http" - "strings" "github.com/go-chi/chi" "github.com/go-chi/chi/middleware" @@ -67,20 +66,8 @@ func NewHTTPUserHandler(log *zap.Logger, userService influxdb.UserService, passw return svr } -type resourceHandler struct { - prefix string - *UserHandler -} - -func (h *resourceHandler) Prefix() string { - return h.prefix -} -func (h *UserHandler) MeResourceHandler() *resourceHandler { - return &resourceHandler{prefix: prefixMe, UserHandler: h} -} - -func (h *UserHandler) UserResourceHandler() *resourceHandler { - return &resourceHandler{prefix: prefixUsers, UserHandler: h} +func (h *UserHandler) Prefix() string { + return prefixUsers } type passwordSetRequest struct { @@ -117,10 +104,15 @@ func (h *UserHandler) handlePostUserPassword(w http.ResponseWriter, r *http.Requ w.WriteHeader(http.StatusNoContent) } -func (h *UserHandler) putPassword(ctx context.Context, w http.ResponseWriter, r *http.Request) (username string, err error) { +// handlePutPassword is the HTTP handler for the PUT /api/v2/users/:id/password +func (h *UserHandler) handlePutUserPassword(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() req, err := decodePasswordResetRequest(r) if err != nil { - return "", err + h.api.Err(w, r, &errors.Error{ + Msg: fmt.Sprintf("error decoding password reset request: %s", err), + }) + return } param := chi.URLParam(r, "id") @@ -131,18 +123,7 @@ func (h *UserHandler) putPassword(ctx context.Context, w http.ResponseWriter, r }) return } - err = h.passwordSvc.CompareAndSetPassword(ctx, *userID, req.PasswordOld, req.PasswordNew) - if err != nil { - return "", err - } - return req.Username, nil -} - -// handlePutPassword is the HTTP handler for the PUT /api/v2/users/:id/password -func (h *UserHandler) handlePutUserPassword(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - _, err := h.putPassword(ctx, w, r) if err != nil { h.api.Err(w, r, err) return @@ -220,27 +201,6 @@ func decodePostUserRequest(ctx context.Context, r *http.Request) (*postUserReque }, nil } -// handleGetMe is the HTTP handler for the GET /api/v2/me. -func (h *UserHandler) handleGetMe(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - a, err := icontext.GetAuthorizer(ctx) - if err != nil { - h.api.Err(w, r, err) - return - } - - id := a.GetUserID() - user, err := h.userSvc.FindUserByID(ctx, id) - - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, newUserResponse(user)) -} - // handleGetUser is the HTTP handler for the GET /api/v2/users/:id route. func (h *UserHandler) handleGetUser(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -388,13 +348,6 @@ func newUserResponse(u *influxdb.User) *influxdb.UserResponse { // handleGetUsers is the HTTP handler for the GET /api/v2/users route. func (h *UserHandler) handleGetUsers(w http.ResponseWriter, r *http.Request) { - // because this is a mounted path in both the /users and the /me route - // we can get a me request through this handler - if strings.Contains(r.URL.Path, prefixMe) { - h.handleGetMe(w, r) - return - } - ctx := r.Context() req, err := decodeGetUsersRequest(ctx, r) if err != nil { @@ -495,3 +448,90 @@ func decodePatchUserRequest(ctx context.Context, r *http.Request) (*patchUserReq UserID: i, }, nil } + +// MeHandler represents an HTTP API handler for /me routes. +type MeHandler struct { + chi.Router + api *kithttp.API + log *zap.Logger + userSvc influxdb.UserService + passwordSvc influxdb.PasswordsService +} + +func (h *MeHandler) Prefix() string { + return prefixMe +} + +func NewHTTPMeHandler(log *zap.Logger, userService influxdb.UserService, passwordService influxdb.PasswordsService) *MeHandler { + svr := &MeHandler{ + api: kithttp.NewAPI(kithttp.WithLog(log)), + log: log, + userSvc: userService, + passwordSvc: passwordService, + } + + r := chi.NewRouter() + r.Use( + middleware.Recoverer, + middleware.RequestID, + middleware.RealIP, + ) + + // RESTy routes for "articles" resource + r.Route("/", func(r chi.Router) { + r.Get("/", svr.handleMe) + r.Put("/password", svr.handlePutMePassword) + }) + + svr.Router = r + return svr +} + +func (h *MeHandler) getUserID(ctx context.Context) (*platform.ID, error) { + a, err := icontext.GetAuthorizer(ctx) + if err != nil { + return nil, err + } + + id := a.GetUserID() + return &id, nil +} + +func (h *MeHandler) handleMe(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + userID, err := h.getUserID(ctx) + if err != nil { + h.api.Err(w, r, err) + return + } + user, err := h.userSvc.FindUserByID(ctx, *userID) + if err != nil { + h.api.Err(w, r, err) + return + } + h.api.Respond(w, r, http.StatusOK, newUserResponse(user)) +} + +func (h *MeHandler) handlePutMePassword(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + userID, err := h.getUserID(ctx) + if err != nil { + h.api.Err(w, r, err) + return + } + + req, err := decodePasswordResetRequest(r) + if err != nil { + h.api.Err(w, r, &errors.Error{ + Msg: fmt.Sprintf("error decoding password reset request: %s", err), + }) + } + + err = h.passwordSvc.CompareAndSetPassword(ctx, *userID, req.PasswordOld, req.PasswordNew) + if err != nil { + h.api.Err(w, r, err) + return + } + h.log.Debug("User password updated") + w.WriteHeader(http.StatusNoContent) +} diff --git a/tenant/middleware_user_auth.go b/tenant/middleware_user_auth.go index 2f7c7cc83d..47f6d3c1ee 100644 --- a/tenant/middleware_user_auth.go +++ b/tenant/middleware_user_auth.go @@ -108,11 +108,17 @@ func (s *AuthedPasswordService) SetPassword(ctx context.Context, userID platform // ComparePassword checks if the password matches the password recorded. // Passwords that do not match return errors. func (s *AuthedPasswordService) ComparePassword(ctx context.Context, userID platform.ID, password string) error { - panic("not implemented") + if _, _, err := authorizer.AuthorizeWriteResource(ctx, influxdb.UsersResourceType, userID); err != nil { + return err + } + return s.s.ComparePassword(ctx, userID, password) } // CompareAndSetPassword checks the password and if they match // updates to the new password. func (s *AuthedPasswordService) CompareAndSetPassword(ctx context.Context, userID platform.ID, old string, new string) error { - panic("not implemented") + if _, _, err := authorizer.AuthorizeWriteResource(ctx, influxdb.UsersResourceType, userID); err != nil { + return err + } + return s.s.CompareAndSetPassword(ctx, userID, old, new) } diff --git a/tenant/service.go b/tenant/service.go index 01e9aed043..3bb0d2f99f 100644 --- a/tenant/service.go +++ b/tenant/service.go @@ -83,3 +83,7 @@ func (ts *Service) NewBucketHTTPHandler(log *zap.Logger, labelSvc influxdb.Label func (ts *Service) NewUserHTTPHandler(log *zap.Logger) *UserHandler { return NewHTTPUserHandler(log.With(zap.String("handler", "user")), NewAuthedUserService(ts.UserService), NewAuthedPasswordService(ts.PasswordsService)) } + +func (ts *Service) NewMeHTTPHandler(log *zap.Logger) *MeHandler { + return NewHTTPMeHandler(log.With(zap.String("handler", "user")), NewAuthedUserService(ts.UserService), NewAuthedPasswordService(ts.PasswordsService)) +}
178483c1a0de928b47653c091b71838e3f1c025d
Marco Neumann
2023-06-23 10:05:50
basic non-aggregates w/ InfluxQL selector functions (#8016)
* test: ensure that selectors check arg count * feat: basic non-aggregates w/ InfluxQL selector functions See #7533. * refactor: clean up code * feat: get more advanced cases to work * docs: remove stale comments ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
feat: basic non-aggregates w/ InfluxQL selector functions (#8016) * test: ensure that selectors check arg count * feat: basic non-aggregates w/ InfluxQL selector functions See #7533. * refactor: clean up code * feat: get more advanced cases to work * docs: remove stale comments --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/influxdb_iox/tests/query_tests/cases/in/influxql_metadata.influxql.expected b/influxdb_iox/tests/query_tests/cases/in/influxql_metadata.influxql.expected index 015ee6c829..d3a4ecdc94 100644 --- a/influxdb_iox/tests/query_tests/cases/in/influxql_metadata.influxql.expected +++ b/influxdb_iox/tests/query_tests/cases/in/influxql_metadata.influxql.expected @@ -1,19 +1,21 @@ -- Test Setup: InfluxQLSelectSupport -- InfluxQL: SHOW MEASUREMENTS; name: measurements -+-------------+ -| name | -+-------------+ -| cpu | -| disk | -| m0 | -| m1 | -| m2 | -| m3 | -| m4 | -| select_test | -| time_test | -+-------------+ ++-----------------+ +| name | ++-----------------+ +| cpu | +| disk | +| m0 | +| m1 | +| m2 | +| m3 | +| m4 | +| select_test | +| selector_test_1 | +| selector_test_2 | +| time_test | ++-----------------+ -- InfluxQL: SHOW MEASUREMENTS LIMIT 2; name: measurements +------+ @@ -24,18 +26,20 @@ name: measurements +------+ -- InfluxQL: SHOW MEASUREMENTS OFFSET 1; name: measurements -+-------------+ -| name | -+-------------+ -| disk | -| m0 | -| m1 | -| m2 | -| m3 | -| m4 | -| select_test | -| time_test | -+-------------+ ++-----------------+ +| name | ++-----------------+ +| disk | +| m0 | +| m1 | +| m2 | +| m3 | +| m4 | +| select_test | +| selector_test_1 | +| selector_test_2 | +| time_test | ++-----------------+ -- InfluxQL: SHOW MEASUREMENTS LIMIT 1 OFFSET 2; name: measurements +------+ @@ -174,6 +178,20 @@ name: select_test | st_field_ba | float | | st_field_bb | float | +-------------+-----------+ +name: selector_test_1 ++----------+-----------+ +| fieldKey | fieldType | ++----------+-----------+ +| field1 | float | +| field2 | float | +| field3 | float | ++----------+-----------+ +name: selector_test_2 ++----------+-----------+ +| fieldKey | fieldType | ++----------+-----------+ +| f | float | ++----------+-----------+ name: time_test +--------------------------------+-----------+ | fieldKey | fieldType | @@ -237,6 +255,19 @@ name: select_test | st_field | string | | st_field_aa | float | +-------------+-----------+ +name: selector_test_1 ++----------+-----------+ +| fieldKey | fieldType | ++----------+-----------+ +| field1 | float | +| field2 | float | ++----------+-----------+ +name: selector_test_2 ++----------+-----------+ +| fieldKey | fieldType | ++----------+-----------+ +| f | float | ++----------+-----------+ name: time_test +----------------------------+-----------+ | fieldKey | fieldType | @@ -280,6 +311,13 @@ name: select_test | st_field_ba | float | | st_field_bb | float | +-------------+-----------+ +name: selector_test_1 ++----------+-----------+ +| fieldKey | fieldType | ++----------+-----------+ +| field2 | float | +| field3 | float | ++----------+-----------+ name: time_test +--------------------------------+-----------+ | fieldKey | fieldType | @@ -307,6 +345,12 @@ name: select_test +-------------+-----------+ | st_field_ab | float | +-------------+-----------+ +name: selector_test_1 ++----------+-----------+ +| fieldKey | fieldType | ++----------+-----------+ +| field3 | float | ++----------+-----------+ name: time_test +--------------------------------+-----------+ | fieldKey | fieldType | @@ -1052,6 +1096,20 @@ name: select_test | tag0 | | tag1 | +-----------+ +name: selector_test_1 ++--------+ +| tagKey | ++--------+ +| tag1 | +| tag2 | +| tag3 | ++--------+ +name: selector_test_2 ++--------+ +| tagKey | ++--------+ +| first | ++--------+ name: time_test +------------------------------+ | tagKey | @@ -1110,6 +1168,18 @@ name: select_test +--------+ | st_tag | +--------+ +name: selector_test_1 ++--------+ +| tagKey | ++--------+ +| tag1 | ++--------+ +name: selector_test_2 ++--------+ +| tagKey | ++--------+ +| first | ++--------+ name: time_test +--------+ | tagKey | @@ -1154,6 +1224,13 @@ name: select_test | tag0 | | tag1 | +-----------+ +name: selector_test_1 ++--------+ +| tagKey | ++--------+ +| tag2 | +| tag3 | ++--------+ name: time_test +------------------------------+ | tagKey | @@ -1193,6 +1270,12 @@ name: select_test +-----------+ | st_tag_aa | +-----------+ +name: selector_test_1 ++--------+ +| tagKey | ++--------+ +| tag2 | ++--------+ name: time_test +--------------------------+ | tagKey | diff --git a/influxdb_iox/tests/query_tests/cases/in/issue_6112.influxql b/influxdb_iox/tests/query_tests/cases/in/issue_6112.influxql index 31d6f39da7..56bf1d097b 100644 --- a/influxdb_iox/tests/query_tests/cases/in/issue_6112.influxql +++ b/influxdb_iox/tests/query_tests/cases/in/issue_6112.influxql @@ -490,12 +490,13 @@ SELECT FIRST(usage_idle), COUNT(usage_idle) FROM cpu WHERE time >= '2022-10-31T0 -- FILL(0) SELECT FIRST(usage_idle) FROM cpu WHERE time >= '2022-10-31T02:00:00Z' AND time < '2022-10-31T02:02:00Z' GROUP BY TIME(30s), cpu FILL(0); --- --- Not implemented: Selector queries with tag and field projections --- See: https://github.com/influxdata/influxdb_iox/issues/7533 --- - +-- Selector queries with tag and field projections SELECT FIRST(usage_idle), usage_user, usage_system FROM cpu; +SELECT field1, FIRST(field2), field3 FROM selector_test_1 WHERE time >=0 AND time < 10 GROUP BY TIME(30s); +SELECT field1, FIRST(field2), field3 FROM selector_test_1 WHERE time >=0 AND time < 10 GROUP BY TIME(30s) FILL(0); + +-- Selector queries with tag and field projections and name clashes +SELECT FIRST(f), first FROM selector_test_2 WHERE time >=0 AND time < 10 GROUP BY TIME(30s); -- -- DISTINCT @@ -638,4 +639,4 @@ SELECT min, max, max - min FROM (SELECT MIN(usage_idle), MAX(usage_system) FROM SELECT min, max, max - min FROM (SELECT MIN(usage_idle), MAX(usage_system) FROM cpu GROUP BY TIME(10s)); -- the predicate in the outer-most query is the narrowest, and therefore pushed through all the children -SELECT * FROM (SELECT * FROM (SELECT FIRST(usage_idle) FROM cpu WHERE time >= '2022-10-31T02:00:00Z') WHERE time >= '2022-10-31T02:00:00Z') WHERE time >= '2022-10-31T02:00:10Z'; \ No newline at end of file +SELECT * FROM (SELECT * FROM (SELECT FIRST(usage_idle) FROM cpu WHERE time >= '2022-10-31T02:00:00Z') WHERE time >= '2022-10-31T02:00:00Z') WHERE time >= '2022-10-31T02:00:10Z'; diff --git a/influxdb_iox/tests/query_tests/cases/in/issue_6112.influxql.expected b/influxdb_iox/tests/query_tests/cases/in/issue_6112.influxql.expected index b7c0add9e8..20a0ab2f69 100644 --- a/influxdb_iox/tests/query_tests/cases/in/issue_6112.influxql.expected +++ b/influxdb_iox/tests/query_tests/cases/in/issue_6112.influxql.expected @@ -2505,7 +2505,33 @@ tags: cpu=cpu1 | 2022-10-31T02:01:30 | 0.0 | +---------------------+-------+ -- InfluxQL: SELECT FIRST(usage_idle), usage_user, usage_system FROM cpu; -Error while planning query: This feature is not implemented: projections with a single selector and fields: See https://github.com/influxdata/influxdb_iox/issues/7533 +name: cpu ++---------------------+-------+------------+--------------+ +| time | first | usage_user | usage_system | ++---------------------+-------+------------+--------------+ +| 2022-10-31T02:00:00 | 2.98 | | 2.2 | ++---------------------+-------+------------+--------------+ +-- InfluxQL: SELECT field1, FIRST(field2), field3 FROM selector_test_1 WHERE time >=0 AND time < 10 GROUP BY TIME(30s); +name: selector_test_1 ++---------------------+--------+-------+--------+ +| time | field1 | first | field3 | ++---------------------+--------+-------+--------+ +| 1970-01-01T00:00:00 | | 2.0 | | ++---------------------+--------+-------+--------+ +-- InfluxQL: SELECT field1, FIRST(field2), field3 FROM selector_test_1 WHERE time >=0 AND time < 10 GROUP BY TIME(30s) FILL(0); +name: selector_test_1 ++---------------------+--------+-------+--------+ +| time | field1 | first | field3 | ++---------------------+--------+-------+--------+ +| 1970-01-01T00:00:00 | 0.0 | 2.0 | 0.0 | ++---------------------+--------+-------+--------+ +-- InfluxQL: SELECT FIRST(f), first FROM selector_test_2 WHERE time >=0 AND time < 10 GROUP BY TIME(30s); +name: selector_test_2 ++---------------------+-------+---------+ +| time | first | first_1 | ++---------------------+-------+---------+ +| 1970-01-01T00:00:00 | 1.0 | a | ++---------------------+-------+---------+ -- InfluxQL: SELECT COUNT(DISTINCT str) FROM m0; name: m0 +---------------------+-------+ diff --git a/influxdb_iox/tests/query_tests/setups.rs b/influxdb_iox/tests/query_tests/setups.rs index c1169be18b..dc865f0306 100644 --- a/influxdb_iox/tests/query_tests/setups.rs +++ b/influxdb_iox/tests/query_tests/setups.rs @@ -1351,6 +1351,10 @@ pub static SETUPS: Lazy<HashMap<SetupName, SetupSteps>> = Lazy::new(|| { select_test,tag0=a,tag1=b,st_tag=ab,st_tag_ab=x st_field="ab",st_field_ab=1 1667181600000000000 select_test,tag0=b,tag1=a,st_tag=ba,st_tag_ba=x st_field="ba",st_field_ba=1 1667181600000000000 select_test,tag0=b,tag1=b,st_tag=bb,st_tag_bb=x st_field="bb",st_field_bb=1 1667181600000000000 + selector_test_1,tag1=a field1=1 1 + selector_test_1,tag2=b field2=2 2 + selector_test_1,tag3=c field3=3 3 + selector_test_2,first=a f=1 1 "# .to_string(), ), diff --git a/iox_query_influxql/src/plan/planner.rs b/iox_query_influxql/src/plan/planner.rs index b6ad6cf0ce..c735fdf20f 100644 --- a/iox_query_influxql/src/plan/planner.rs +++ b/iox_query_influxql/src/plan/planner.rs @@ -85,6 +85,8 @@ use std::sync::Arc; use super::ir::DataSourceSchema; use super::parse_regex; +use super::util::contains_expr; +use super::util_copy::clone_with_replacement; /// The column index of the measurement column. const MEASUREMENT_COLUMN_INDEX: u32 = 0; @@ -860,15 +862,80 @@ impl<'a> InfluxQLToLogicalPlan<'a> { // It is possible this vector is empty, when all the fields in the // projection refer to columns that do not exist in the current // table. - let aggr_exprs = find_aggregate_exprs(&select_exprs); + let mut aggr_exprs = find_aggregate_exprs(&select_exprs); + + // gather some time-related metadata + let Some(time_column_index) = find_time_column_index(fields) else { + return error::internal("unable to find time column") + }; + + // if there's only a single selector, wrap non-aggregated fields into that selector + let mut should_fill_expr = fields.iter().map(is_aggregate_field).collect::<Vec<_>>(); + if aggr_exprs.len() == 1 { + let selector = aggr_exprs[0].clone(); + + if let Expr::AggregateUDF(mut udf) = selector.clone() { + if udf.fun.name.starts_with("selector_") { + let selector_index = select_exprs + .iter() + .enumerate() + .find(|(_i, expr)| contains_expr(expr, &selector)) + .map(|(i, _expr)| i) + .ok_or_else(|| error::map::internal("cannot find selector expression"))?; + + let group_by_tag_set = group_by_tag_set.iter().copied().collect::<HashSet<_>>(); + + let mut additional_args = vec![]; + let mut fields_to_extract = vec![]; + for (idx, expr) in select_exprs.iter().enumerate() { + if (idx == time_column_index) || (idx == selector_index) { + continue; + } + let (expr, out_name) = match expr.clone() { + Expr::Alias(expr, out_name) => (*expr, out_name), + _ => { + return error::internal("other field is not aliased"); + } + }; + if group_by_tag_set.contains(&out_name.as_str()) { + continue; + } + additional_args.push(expr); + fields_to_extract.push(( + idx, + format!("other_{}", additional_args.len()), + out_name, + )); + } + + udf.args.append(&mut additional_args); + let selector_new = Expr::AggregateUDF(udf); + select_exprs[selector_index] = + clone_with_replacement(&select_exprs[selector_index], &|expr| { + if expr == &selector { + Ok(Some(selector_new.clone())) + } else { + Ok(None) + } + }) + .expect("cannot fail"); + aggr_exprs[0] = selector_new.clone(); + + for (idx, struct_name, out_alias) in fields_to_extract { + select_exprs[idx] = Expr::GetIndexedField(GetIndexedField { + expr: Box::new(selector_new.clone()), + key: ScalarValue::Utf8(Some(struct_name)), + }) + .alias(out_alias); + should_fill_expr[idx] = true; + } + } + } + } // This block identifies the time column index and updates the time expression // based on the semantics of the projection. let time_column = { - let Some(time_column_index) = find_time_column_index(fields) else { - return error::internal("unable to find time column") - }; - // Take ownership of the alias, so we don't reallocate, and temporarily place a literal // `NULL` in its place. let Expr::Alias(_, alias) = std::mem::replace(&mut select_exprs[time_column_index], lit(ScalarValue::Null)) else { @@ -883,25 +950,21 @@ impl<'a> InfluxQLToLogicalPlan<'a> { // 3. otherwise, project the Unix epoch (0) select_exprs[time_column_index] = if let Some(i) = ctx.interval { let stride = lit(ScalarValue::new_interval_mdn(0, 0, i.duration)); - let offset = i.offset.map_or(0, |v|v); + let offset = i.offset.unwrap_or_default(); date_bin( stride, "time".as_expr(), lit(ScalarValue::TimestampNanosecond(Some(offset), None)), ) - } else if let ProjectionType::Selector { has_fields } = - ctx.projection_type - { - if has_fields { - return error::not_implemented("projections with a single selector and fields: See https://github.com/influxdata/influxdb_iox/issues/7533"); - } - + } else if let ProjectionType::Selector { has_fields: _ } = ctx.projection_type { let selector = match aggr_exprs.len() { 1 => aggr_exprs[0].clone(), len => { // Should have been validated by `select_statement_info` - return error::internal(format!("internal: expected 1 selector expression, got {len}")); + return error::internal(format!( + "internal: expected 1 selector expression, got {len}" + )); } }; @@ -912,7 +975,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> { } else { lit_timestamp_nano(0) } - .alias(alias); + .alias(alias); &select_exprs[time_column_index] }; @@ -991,11 +1054,11 @@ impl<'a> InfluxQLToLogicalPlan<'a> { // refer to the columns from the aggregate projection let select_exprs_post_aggr = select_exprs .iter() - .zip(fields) - .map(|(expr, f)| { + .zip(should_fill_expr) + .map(|(expr, should_fill)| { // This implements the `FILL(<value>)` strategy, by coalescing any aggregate // expressions to `<value>` when they are `NULL`. - let fill_if_null = if fill_if_null.is_some() && is_aggregate_field(f) { + let fill_if_null = if fill_if_null.is_some() && should_fill { fill_if_null } else { None @@ -2706,6 +2769,14 @@ mod test { .influx_field("u64_field", InfluxFieldType::UInteger) .build() .unwrap(), + // table w/ name clashes + SchemaBuilder::new() + .measurement("name_clash") + .timestamp() + .tag("first") + .influx_field("f", InfluxFieldType::Float) + .build() + .unwrap(), ]); let iox_ctx = IOxSessionContext::with_testing(); @@ -2866,6 +2937,10 @@ mod test { Limit: skip=0, fetch=1 [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)] Filter: merge_01.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)] TableScan: merge_01 [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)] + Projection: Dictionary(Int32, Utf8("measurements")) AS iox::measurement, Dictionary(Int32, Utf8("name_clash")) AS name [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)] + Limit: skip=0, fetch=1 [f:Float64;N, first:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] + Filter: name_clash.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [f:Float64;N, first:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] + TableScan: name_clash [f:Float64;N, first:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] Projection: Dictionary(Int32, Utf8("measurements")) AS iox::measurement, Dictionary(Int32, Utf8("temp_01")) AS name [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)] Limit: skip=0, fetch=1 [field_f64:Float64;N, field_i64:Int64;N, field_str:Utf8;N, field_u64:UInt64;N, shared_field0:Float64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] Filter: temp_01.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [field_f64:Float64;N, field_i64:Int64;N, field_str:Utf8;N, field_u64:UInt64;N, shared_field0:Float64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] @@ -2910,6 +2985,10 @@ mod test { Limit: skip=0, fetch=1 [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)] Filter: merge_01.time >= TimestampNanosecond(1338, None) [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)] TableScan: merge_01 [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)] + Projection: Dictionary(Int32, Utf8("measurements")) AS iox::measurement, Dictionary(Int32, Utf8("name_clash")) AS name [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)] + Limit: skip=0, fetch=1 [f:Float64;N, first:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] + Filter: name_clash.time >= TimestampNanosecond(1338, None) [f:Float64;N, first:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] + TableScan: name_clash [f:Float64;N, first:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] Projection: Dictionary(Int32, Utf8("measurements")) AS iox::measurement, Dictionary(Int32, Utf8("temp_01")) AS name [iox::measurement:Dictionary(Int32, Utf8), name:Dictionary(Int32, Utf8)] Limit: skip=0, fetch=1 [field_f64:Float64;N, field_i64:Int64;N, field_str:Utf8;N, field_u64:UInt64;N, shared_field0:Float64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] Filter: temp_01.time >= TimestampNanosecond(1338, None) [field_f64:Float64;N, field_i64:Int64;N, field_str:Utf8;N, field_u64:UInt64;N, shared_field0:Float64;N, shared_tag0:Dictionary(Int32, Utf8);N, shared_tag1:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] @@ -2991,6 +3070,13 @@ mod test { Aggregate: groupBy=[[]], aggr=[[SUM(CAST(merge_01.col1 IS NOT NULL AS UInt64)) AS col1]] [col1:UInt64;N] Filter: merge_01.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)] TableScan: merge_01 [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)] + Projection: Dictionary(Int32, Utf8("name_clash")) AS iox::measurement, tagKey [iox::measurement:Dictionary(Int32, Utf8), tagKey:Utf8;N] + Filter: tagKey IS NOT NULL [tagKey:Utf8;N] + Unnest: tagKey [tagKey:Utf8;N] + Projection: make_array(CASE WHEN first > Int32(0) THEN Utf8("first") END) AS tagKey [tagKey:List(Field { name: "item", data_type: Utf8, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} });N] + Aggregate: groupBy=[[]], aggr=[[SUM(CAST(name_clash.first IS NOT NULL AS UInt64)) AS first]] [first:UInt64;N] + Filter: name_clash.time >= TimestampNanosecond(1672444800000000000, None) AND Boolean(false) [f:Float64;N, first:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] + TableScan: name_clash [f:Float64;N, first:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] Projection: Dictionary(Int32, Utf8("temp_01")) AS iox::measurement, tagKey [iox::measurement:Dictionary(Int32, Utf8), tagKey:Utf8;N] Filter: tagKey IS NOT NULL [tagKey:Utf8;N] Unnest: tagKey [tagKey:Utf8;N] @@ -3069,6 +3155,13 @@ mod test { Aggregate: groupBy=[[]], aggr=[[SUM(CAST(merge_01.col1 IS NOT NULL AS UInt64)) AS col1]] [col1:UInt64;N] Filter: merge_01.time >= TimestampNanosecond(1338, None) [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)] TableScan: merge_01 [col0:Float64;N, col1:Dictionary(Int32, Utf8);N, col2:Utf8;N, col3:Boolean;N, time:Timestamp(Nanosecond, None)] + Projection: Dictionary(Int32, Utf8("name_clash")) AS iox::measurement, tagKey [iox::measurement:Dictionary(Int32, Utf8), tagKey:Utf8;N] + Filter: tagKey IS NOT NULL [tagKey:Utf8;N] + Unnest: tagKey [tagKey:Utf8;N] + Projection: make_array(CASE WHEN first > Int32(0) THEN Utf8("first") END) AS tagKey [tagKey:List(Field { name: "item", data_type: Utf8, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} });N] + Aggregate: groupBy=[[]], aggr=[[SUM(CAST(name_clash.first IS NOT NULL AS UInt64)) AS first]] [first:UInt64;N] + Filter: name_clash.time >= TimestampNanosecond(1338, None) [f:Float64;N, first:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] + TableScan: name_clash [f:Float64;N, first:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] Projection: Dictionary(Int32, Utf8("temp_01")) AS iox::measurement, tagKey [iox::measurement:Dictionary(Int32, Utf8), tagKey:Utf8;N] Filter: tagKey IS NOT NULL [tagKey:Utf8;N] Unnest: tagKey [tagKey:Utf8;N] @@ -3494,9 +3587,35 @@ mod test { TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] "###); - // not implemented - // See: https://github.com/influxdata/influxdb_iox/issues/7533 - assert_snapshot!(plan("SELECT LAST(usage_idle), usage_system FROM cpu"), @"This feature is not implemented: projections with a single selector and fields: See https://github.com/influxdata/influxdb_iox/issues/7533"); + // additional fields + assert_snapshot!(plan("SELECT LAST(usage_idle), usage_system FROM cpu"), @r###" + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, last:Float64;N, usage_system:Float64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, (selector_last(cpu.usage_idle,cpu.time,cpu.usage_system))[time] AS time, (selector_last(cpu.usage_idle,cpu.time,cpu.usage_system))[value] AS last, (selector_last(cpu.usage_idle,cpu.time,cpu.usage_system))[other_1] AS usage_system [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, last:Float64;N, usage_system:Float64;N] + Aggregate: groupBy=[[]], aggr=[[selector_last(cpu.usage_idle, cpu.time, cpu.usage_system)]] [selector_last(cpu.usage_idle,cpu.time,cpu.usage_system):Struct([Field { name: "value", data_type: Float64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: "time", data_type: Timestamp(Nanosecond, None), nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: "other_1", data_type: Float64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }]);N] + TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] + "###); + assert_snapshot!(plan("SELECT LAST(usage_idle), usage_system FROM cpu GROUP BY TIME(5s)"), @r###" + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, last:Float64;N, usage_system:Float64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, time, (selector_last(cpu.usage_idle,cpu.time,cpu.usage_system))[value] AS last, (selector_last(cpu.usage_idle,cpu.time,cpu.usage_system))[other_1] AS usage_system [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, last:Float64;N, usage_system:Float64;N] + GapFill: groupBy=[[time]], aggr=[[selector_last(cpu.usage_idle,cpu.time,cpu.usage_system)]], time_column=time, stride=IntervalMonthDayNano("5000000000"), range=Unbounded..Included(TimestampNanosecond(1672531200000000000, None)) [time:Timestamp(Nanosecond, None);N, selector_last(cpu.usage_idle,cpu.time,cpu.usage_system):Struct([Field { name: "value", data_type: Float64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: "time", data_type: Timestamp(Nanosecond, None), nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: "other_1", data_type: Float64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }]);N] + Aggregate: groupBy=[[date_bin(IntervalMonthDayNano("5000000000"), cpu.time, TimestampNanosecond(0, None)) AS time]], aggr=[[selector_last(cpu.usage_idle, cpu.time, cpu.usage_system)]] [time:Timestamp(Nanosecond, None);N, selector_last(cpu.usage_idle,cpu.time,cpu.usage_system):Struct([Field { name: "value", data_type: Float64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: "time", data_type: Timestamp(Nanosecond, None), nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: "other_1", data_type: Float64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }]);N] + Filter: cpu.time <= TimestampNanosecond(1672531200000000000, None) [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] + TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] + "###); + assert_snapshot!(plan("SELECT LAST(usage_idle), usage_system FROM cpu GROUP BY TIME(5s) FILL(0)"), @r###" + Sort: time ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, last:Float64;N, usage_system:Float64;N] + Projection: Dictionary(Int32, Utf8("cpu")) AS iox::measurement, time, (coalesce_struct(selector_last(cpu.usage_idle,cpu.time,cpu.usage_system), Struct({value:Float64(0),time:TimestampNanosecond(0, None),other_1:Float64(0)})))[value] AS last, (coalesce_struct(selector_last(cpu.usage_idle,cpu.time,cpu.usage_system), Struct({value:Float64(0),time:TimestampNanosecond(0, None),other_1:Float64(0)})))[other_1] AS usage_system [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, last:Float64;N, usage_system:Float64;N] + GapFill: groupBy=[[time]], aggr=[[selector_last(cpu.usage_idle,cpu.time,cpu.usage_system)]], time_column=time, stride=IntervalMonthDayNano("5000000000"), range=Unbounded..Included(TimestampNanosecond(1672531200000000000, None)) [time:Timestamp(Nanosecond, None);N, selector_last(cpu.usage_idle,cpu.time,cpu.usage_system):Struct([Field { name: "value", data_type: Float64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: "time", data_type: Timestamp(Nanosecond, None), nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: "other_1", data_type: Float64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }]);N] + Aggregate: groupBy=[[date_bin(IntervalMonthDayNano("5000000000"), cpu.time, TimestampNanosecond(0, None)) AS time]], aggr=[[selector_last(cpu.usage_idle, cpu.time, cpu.usage_system)]] [time:Timestamp(Nanosecond, None);N, selector_last(cpu.usage_idle,cpu.time,cpu.usage_system):Struct([Field { name: "value", data_type: Float64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: "time", data_type: Timestamp(Nanosecond, None), nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: "other_1", data_type: Float64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }]);N] + Filter: cpu.time <= TimestampNanosecond(1672531200000000000, None) [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] + TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] + "###); + assert_snapshot!(plan("SELECT FIRST(f), first FROM name_clash"), @r###" + Sort: time ASC NULLS LAST, first_1 ASC NULLS LAST [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, first:Float64;N, first_1:Dictionary(Int32, Utf8);N] + Projection: Dictionary(Int32, Utf8("name_clash")) AS iox::measurement, (selector_first(name_clash.f,name_clash.time,name_clash.first))[time] AS time, (selector_first(name_clash.f,name_clash.time,name_clash.first))[value] AS first, (selector_first(name_clash.f,name_clash.time,name_clash.first))[other_1] AS first_1 [iox::measurement:Dictionary(Int32, Utf8), time:Timestamp(Nanosecond, None);N, first:Float64;N, first_1:Dictionary(Int32, Utf8);N] + Aggregate: groupBy=[[]], aggr=[[selector_first(name_clash.f, name_clash.time, name_clash.first)]] [selector_first(name_clash.f,name_clash.time,name_clash.first):Struct([Field { name: "value", data_type: Float64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: "time", data_type: Timestamp(Nanosecond, None), nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: "other_1", data_type: Dictionary(Int32, Utf8), nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }]);N] + TableScan: name_clash [f:Float64;N, first:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None)] + "###); // Validate we can call the remaining supported selector functions assert_snapshot!(plan("SELECT FIRST(usage_idle) FROM cpu"), @r###" @@ -3517,6 +3636,9 @@ mod test { Aggregate: groupBy=[[]], aggr=[[selector_min(cpu.usage_idle, cpu.time)]] [selector_min(cpu.usage_idle,cpu.time):Struct([Field { name: "value", data_type: Float64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: "time", data_type: Timestamp(Nanosecond, None), nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }]);N] TableScan: cpu [cpu:Dictionary(Int32, Utf8);N, host:Dictionary(Int32, Utf8);N, region:Dictionary(Int32, Utf8);N, time:Timestamp(Nanosecond, None), usage_idle:Float64;N, usage_system:Float64;N, usage_user:Float64;N] "###); + + // Invalid number of arguments + assert_snapshot!(plan("SELECT MIN(usage_idle, usage_idle) FROM cpu"), @"Error during planning: invalid number of arguments for min, expected 1, got 2"); } } diff --git a/iox_query_influxql/src/plan/util.rs b/iox_query_influxql/src/plan/util.rs index 4c2eb3c8f6..72dac59207 100644 --- a/iox_query_influxql/src/plan/util.rs +++ b/iox_query_influxql/src/plan/util.rs @@ -1,5 +1,6 @@ use crate::plan::{error, util_copy}; use arrow::datatypes::{DataType, TimeUnit}; +use datafusion::common::tree_node::{TreeNode, VisitRecursion}; use datafusion::common::{DFSchemaRef, Result}; use datafusion::logical_expr::utils::expr_as_column_expr; use datafusion::logical_expr::{lit, Expr, ExprSchemable, LogicalPlan, Operator}; @@ -121,3 +122,17 @@ pub(crate) fn rebase_expr( }) } } + +pub(crate) fn contains_expr(expr: &Expr, needle: &Expr) -> bool { + let mut found = false; + expr.apply(&mut |expr| { + if expr == needle { + found = true; + Ok(VisitRecursion::Stop) + } else { + Ok(VisitRecursion::Continue) + } + }) + .expect("cannot fail"); + found +}
b7bd66195fade40f0693864bfed8b7958b1cd270
Phil Bracikowski
2023-04-28 09:37:08
improve logging in lister (#7695)
* follow up to #7689
null
chore(garbage collector): improve logging in lister (#7695) * follow up to #7689
diff --git a/garbage_collector/src/objectstore/lister.rs b/garbage_collector/src/objectstore/lister.rs index 593a39910f..21b251eb56 100644 --- a/garbage_collector/src/objectstore/lister.rs +++ b/garbage_collector/src/objectstore/lister.rs @@ -19,6 +19,8 @@ pub(crate) async fn perform( checker: mpsc::Sender<ObjectMeta>, sleep_interval_minutes: u64, ) -> Result<()> { + info!("beginning object store listing"); + loop { let mut backoff = Backoff::new(&BackoffConfig::default()); @@ -29,7 +31,7 @@ pub(crate) async fn perform( .await .expect("backoff retries forever"); - let mut chunked_items = items.chunks(MAX_ITEMS_PROCESSED_PER_LOOP).boxed(); + let mut chunked_items = items.chunks(MAX_ITEMS_PROCESSED_PER_LOOP); while let Some(v) = chunked_items.next().await { // relist and sleep on an error to allow time for transient errors to dissipate @@ -40,8 +42,10 @@ pub(crate) async fn perform( break; } sleep(Duration::from_secs(60 * sleep_interval_minutes)).await; + info!("starting next chunk of listed files"); // next chunk } + info!("end of object store item list: will relist in {sleep_interval_minutes} minutes"); sleep(Duration::from_secs(60 * sleep_interval_minutes)).await; } } @@ -50,12 +54,14 @@ async fn process_item_list( items: Vec<object_store::Result<ObjectMeta>>, checker: &mpsc::Sender<ObjectMeta>, ) -> Result<()> { + let mut i = 0; for item in items { let item = item.context(MalformedSnafu)?; debug!(location = %item.location, "Object store item"); checker.send(item).await?; + i += 1; } - debug!("end of object store item list"); + info!("processed {i} files of listed chunk"); Ok(()) }
4ad21e1ecaa328d2f979069cdeb4539d6a4396e9
Marco Neumann
2023-09-14 11:15:11
decode time portion of the partition key (#8725)
* refactor: make partition key parsing more flexible * feat: decode time portion of the partition key Helpful for #8705 because we can prune partitions earlier during the query planning w/o having to consider their parquet files at all.
null
feat: decode time portion of the partition key (#8725) * refactor: make partition key parsing more flexible * feat: decode time portion of the partition key Helpful for #8705 because we can prune partitions earlier during the query planning w/o having to consider their parquet files at all.
diff --git a/data_types/src/partition_template.rs b/data_types/src/partition_template.rs index e076d1388c..48d82e48ca 100644 --- a/data_types/src/partition_template.rs +++ b/data_types/src/partition_template.rs @@ -150,9 +150,14 @@ //! //! [percent encoded]: https://url.spec.whatwg.org/#percent-encoded-bytes +use chrono::{ + format::{Numeric, StrftimeItems}, + DateTime, Days, Months, Utc, +}; use generated_types::influxdata::iox::partition_template::v1 as proto; use once_cell::sync::Lazy; use percent_encoding::{percent_decode_str, AsciiSet, CONTROLS}; +use schema::TIME_COLUMN_NAME; use std::{borrow::Cow, sync::Arc}; use thiserror::Error; @@ -544,6 +549,15 @@ pub enum ColumnValue<'a> { /// be false - use [`ColumnValue::is_prefix_match_of()`] to prefix match /// instead. Prefix(Cow<'a, str>), + + /// Datetime. + Datetime { + /// Inclusive begin of the datatime partition range. + begin: DateTime<Utc>, + + /// Exclusive end of the datatime partition range. + end: DateTime<Utc>, + }, } impl<'a> ColumnValue<'a> { @@ -558,6 +572,9 @@ impl<'a> ColumnValue<'a> { let this = match self { ColumnValue::Identity(v) => v.as_bytes(), ColumnValue::Prefix(v) => v.as_bytes(), + ColumnValue::Datetime { .. } => { + return false; + } }; other.as_ref().starts_with(this) @@ -572,6 +589,7 @@ where match self { ColumnValue::Identity(v) => other.as_ref().eq(v.as_ref()), ColumnValue::Prefix(_) => false, + ColumnValue::Datetime { .. } => false, } } } @@ -611,54 +629,160 @@ pub fn build_column_values<'a>( // Produce an iterator of (template_part, template_value) template_parts .zip(key_parts) - .filter_map(|(template, mut value)| { - // Perform re-mapping of sentinel values. - match value { - PARTITION_KEY_VALUE_NULL_STR => { - // Skip null or empty partition key parts, indicated by the - // presence of a single "!" character as the part value. - return None; - } - PARTITION_KEY_VALUE_EMPTY_STR => { - // Re-map the empty string sentinel "^"" to an empty string - // value. - value = ""; - } - _ => {} + .filter_map(|(template, value)| match template { + TemplatePart::TagValue(col_name) => Some((col_name, parse_part_tag_value(value)?)), + TemplatePart::TimeFormat(format) => { + Some((TIME_COLUMN_NAME, parse_part_time_format(value, format)?)) } + }) +} + +fn parse_part_tag_value(value: &str) -> Option<ColumnValue<'_>> { + // Perform re-mapping of sentinel values. + let value = match value { + PARTITION_KEY_VALUE_NULL_STR => { + // Skip null or empty partition key parts, indicated by the + // presence of a single "!" character as the part value. + return None; + } + PARTITION_KEY_VALUE_EMPTY_STR => { + // Re-map the empty string sentinel "^"" to an empty string + // value. + "" + } + _ => value, + }; + + // Reverse the urlencoding of all value parts + let decoded = percent_decode_str(value) + .decode_utf8() + .expect("invalid partition key part encoding"); + + // Inspect the final character in the string, pre-decoding, to + // determine if it has been truncated. + if value + .as_bytes() + .last() + .map(|v| *v == PARTITION_KEY_PART_TRUNCATED as u8) + .unwrap_or_default() + { + // Remove the truncation marker. + let len = decoded.len() - 1; + + // Only allocate if needed; re-borrow a subslice of `Cow::Borrowed` if not. + let column_cow = match decoded { + Cow::Borrowed(s) => Cow::Borrowed(&s[..len]), + Cow::Owned(s) => Cow::Owned(s[..len].to_string()), + }; + Some(ColumnValue::Prefix(column_cow)) + } else { + Some(ColumnValue::Identity(decoded)) + } +} + +fn parse_part_time_format(value: &str, format: &str) -> Option<ColumnValue<'static>> { + use chrono::format::{parse, Item, Parsed}; + + let items = StrftimeItems::new(format); + + let mut parsed = Parsed::new(); + parse(&mut parsed, value, items.clone()).ok()?; + + // fill in defaults + let parsed = parsed_implicit_defaults(parsed)?; + + let begin = parsed.to_datetime_with_timezone(&Utc).ok()?; - match template { - TemplatePart::TagValue(col_name) => Some((col_name, value)), - TemplatePart::TimeFormat(_) => None, + let mut end: Option<DateTime<Utc>> = None; + for item in items { + let item_end = match item { + Item::Literal(_) | Item::OwnedLiteral(_) | Item::Space(_) | Item::OwnedSpace(_) => None, + Item::Error => { + return None; } - }) - // Reverse the urlencoding of all value parts - .map(|(name, value)| { - let decoded = percent_decode_str(value) - .decode_utf8() - .expect("invalid partition key part encoding"); - - // Inspect the final character in the string, pre-decoding, to - // determine if it has been truncated. - if value - .as_bytes() - .last() - .map(|v| *v == PARTITION_KEY_PART_TRUNCATED as u8) - .unwrap_or_default() - { - // Remove the truncation marker. - let len = decoded.len() - 1; - - // Only allocate if needed; re-borrow a subslice of `Cow::Borrowed` if not. - let column_cow = match decoded { - Cow::Borrowed(s) => Cow::Borrowed(&s[..len]), - Cow::Owned(s) => Cow::Owned(s[..len].to_string()), - }; - return (name, ColumnValue::Prefix(column_cow)); + Item::Numeric(numeric, _pad) => { + match numeric { + Numeric::Year => Some(begin + Months::new(12)), + Numeric::Month => Some(begin + Months::new(1)), + Numeric::Day => Some(begin + Days::new(1)), + _ => { + // not supported + return None; + } + } } + Item::Fixed(_) => { + // not implemented + return None; + } + }; - (name, ColumnValue::Identity(decoded)) - }) + end = match (end, item_end) { + (Some(a), Some(b)) => { + let a_d = a - begin; + let b_d = b - begin; + if a_d < b_d { + Some(a) + } else { + Some(b) + } + } + (None, Some(dt)) => Some(dt), + (Some(dt), None) => Some(dt), + (None, None) => None, + }; + } + + end.map(|end| ColumnValue::Datetime { begin, end }) +} + +fn parsed_implicit_defaults(mut parsed: chrono::format::Parsed) -> Option<chrono::format::Parsed> { + parsed.year?; + + if parsed.month.is_none() { + if parsed.day.is_some() { + return None; + } + + parsed.set_month(1).ok()?; + } + + if parsed.day.is_none() { + if parsed.hour_div_12.is_some() || parsed.hour_mod_12.is_some() { + return None; + } + + parsed.set_day(1).ok()?; + } + + if parsed.hour_div_12.is_none() || parsed.hour_mod_12.is_none() { + // consistency check + if parsed.hour_div_12.is_some() { + return None; + } + if parsed.hour_mod_12.is_some() { + return None; + } + + if parsed.minute.is_some() { + return None; + } + + parsed.set_hour(0).ok()?; + } + + if parsed.minute.is_none() { + if parsed.second.is_some() { + return None; + } + if parsed.nanosecond.is_some() { + return None; + } + + parsed.set_minute(0).ok()?; + } + + Some(parsed) } /// In production code, the template should come from protobuf that is either from the database or @@ -692,6 +816,7 @@ pub fn test_table_partition_override( mod tests { use super::*; use assert_matches::assert_matches; + use chrono::TimeZone; use sqlx::Encode; use test_helpers::assert_error; @@ -833,6 +958,13 @@ mod tests { ColumnValue::Prefix(s.into()) } + fn year(y: i32) -> ColumnValue<'static> { + ColumnValue::Datetime { + begin: Utc.with_ymd_and_hms(y, 1, 1, 0, 0, 0).unwrap(), + end: Utc.with_ymd_and_hms(y + 1, 1, 1, 0, 0, 0).unwrap(), + } + } + /// Generate a test that asserts "partition_key" is reversible, yielding /// "want" assuming the partition "template" was used. macro_rules! test_build_column_values { @@ -871,7 +1003,11 @@ mod tests { TemplatePart::TagValue("b"), ], partition_key = "2023|bananas|plátanos", - want = [("a", identity("bananas")), ("b", identity("plátanos"))] + want = [ + (TIME_COLUMN_NAME, year(2023)), + ("a", identity("bananas")), + ("b", identity("plátanos")), + ] ); test_build_column_values!( @@ -882,7 +1018,7 @@ mod tests { TemplatePart::TagValue("b"), ], partition_key = "2023|!|plátanos", - want = [("b", identity("plátanos"))] + want = [(TIME_COLUMN_NAME, year(2023)), ("b", identity("plátanos")),] ); test_build_column_values!( @@ -893,7 +1029,7 @@ mod tests { TemplatePart::TagValue("b"), ], partition_key = "2023|!|!", - want = [] + want = [(TIME_COLUMN_NAME, year(2023)),] ); test_build_column_values!( @@ -904,7 +1040,11 @@ mod tests { TemplatePart::TagValue("b"), ], partition_key = "2023|cat%7Cdog|%21", - want = [("a", identity("cat|dog")), ("b", identity("!"))] + want = [ + (TIME_COLUMN_NAME, year(2023)), + ("a", identity("cat|dog")), + ("b", identity("!")), + ] ); test_build_column_values!( @@ -915,7 +1055,7 @@ mod tests { TemplatePart::TagValue("b"), ], partition_key = "2023|%2550|!", - want = [("a", identity("%50"))] + want = [(TIME_COLUMN_NAME, year(2023)), ("a", identity("%50")),] ); test_build_column_values!( @@ -926,7 +1066,7 @@ mod tests { TemplatePart::TagValue("b"), ], partition_key = "2023|BANANAS#|!", - want = [("a", prefix("BANANAS"))] + want = [(TIME_COLUMN_NAME, year(2023)), ("a", prefix("BANANAS")),] ); test_build_column_values!( @@ -937,7 +1077,10 @@ mod tests { TemplatePart::TagValue("b"), ], partition_key = "2023|%28%E3%83%8E%E0%B2%A0%E7%9B%8A%E0%B2%A0%29%E3%83%8E%E5%BD%A1%E2%94%BB%E2%94%81%E2%94%BB#|!", - want = [("a", prefix("(ノಠ益ಠ)ノ彡┻━┻"))] + want = [ + (TIME_COLUMN_NAME, year(2023)), + ("a", prefix("(ノಠ益ಠ)ノ彡┻━┻")), + ] ); test_build_column_values!( @@ -948,7 +1091,7 @@ mod tests { TemplatePart::TagValue("b"), ], partition_key = "2023|%E0%AE%A8%E0%AE%BF#|!", - want = [("a", prefix("நி"))] + want = [(TIME_COLUMN_NAME, year(2023)), ("a", prefix("நி")),] ); test_build_column_values!( @@ -959,7 +1102,150 @@ mod tests { TemplatePart::TagValue("b"), ], partition_key = "2023|is%7Cnot%21ambiguous%2510%23|!", - want = [("a", identity("is|not!ambiguous%10#"))] + want = [ + (TIME_COLUMN_NAME, year(2023)), + ("a", identity("is|not!ambiguous%10#")), + ] + ); + + test_build_column_values!( + datetime_fixed, + template = [TemplatePart::TimeFormat("foo"),], + partition_key = "foo", + want = [] + ); + + test_build_column_values!( + datetime_range_y, + template = [TemplatePart::TimeFormat("%Y"),], + partition_key = "2023", + want = [( + TIME_COLUMN_NAME, + ColumnValue::Datetime { + begin: Utc.with_ymd_and_hms(2023, 1, 1, 0, 0, 0).unwrap(), + end: Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap(), + }, + )] + ); + + test_build_column_values!( + datetime_range_y_m, + template = [TemplatePart::TimeFormat("%Y-%m"),], + partition_key = "2023-09", + want = [( + TIME_COLUMN_NAME, + ColumnValue::Datetime { + begin: Utc.with_ymd_and_hms(2023, 9, 1, 0, 0, 0).unwrap(), + end: Utc.with_ymd_and_hms(2023, 10, 1, 0, 0, 0).unwrap(), + }, + )] + ); + + test_build_column_values!( + datetime_range_y_m_overflow_year, + template = [TemplatePart::TimeFormat("%Y-%m"),], + partition_key = "2023-12", + want = [( + TIME_COLUMN_NAME, + ColumnValue::Datetime { + begin: Utc.with_ymd_and_hms(2023, 12, 1, 0, 0, 0).unwrap(), + end: Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap(), + }, + )] + ); + + test_build_column_values!( + datetime_range_y_m_d, + template = [TemplatePart::TimeFormat("%Y-%m-%d"),], + partition_key = "2023-09-01", + want = [( + TIME_COLUMN_NAME, + ColumnValue::Datetime { + begin: Utc.with_ymd_and_hms(2023, 9, 1, 0, 0, 0).unwrap(), + end: Utc.with_ymd_and_hms(2023, 9, 2, 0, 0, 0).unwrap(), + }, + )] + ); + + test_build_column_values!( + datetime_range_y_m_d_overflow_month, + template = [TemplatePart::TimeFormat("%Y-%m-%d"),], + partition_key = "2023-09-30", + want = [( + TIME_COLUMN_NAME, + ColumnValue::Datetime { + begin: Utc.with_ymd_and_hms(2023, 9, 30, 0, 0, 0).unwrap(), + end: Utc.with_ymd_and_hms(2023, 10, 1, 0, 0, 0).unwrap(), + }, + )] + ); + + test_build_column_values!( + datetime_range_y_m_d_overflow_year, + template = [TemplatePart::TimeFormat("%Y-%m-%d"),], + partition_key = "2023-12-31", + want = [( + TIME_COLUMN_NAME, + ColumnValue::Datetime { + begin: Utc.with_ymd_and_hms(2023, 12, 31, 0, 0, 0).unwrap(), + end: Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap(), + }, + )] + ); + + test_build_column_values!( + datetime_range_d_m_y, + template = [TemplatePart::TimeFormat("%d-%m-%Y"),], + partition_key = "01-09-2023", + want = [( + TIME_COLUMN_NAME, + ColumnValue::Datetime { + begin: Utc.with_ymd_and_hms(2023, 9, 1, 0, 0, 0).unwrap(), + end: Utc.with_ymd_and_hms(2023, 9, 2, 0, 0, 0).unwrap(), + }, + )] + ); + + test_build_column_values!( + datetime_not_compact_y_d, + template = [TemplatePart::TimeFormat("%Y-%d"),], + partition_key = "2023-01", + want = [] + ); + + test_build_column_values!( + datetime_not_compact_m, + template = [TemplatePart::TimeFormat("%m"),], + partition_key = "01", + want = [] + ); + + test_build_column_values!( + datetime_not_compact_d, + template = [TemplatePart::TimeFormat("%d"),], + partition_key = "01", + want = [] + ); + + test_build_column_values!( + datetime_range_unimplemented_y_m_d_h, + template = [TemplatePart::TimeFormat("%Y-%m-%dT%H"),], + partition_key = "2023-12-31T00", + want = [] + ); + + test_build_column_values!( + datetime_range_unimplemented_y_m_d_h_m, + template = [TemplatePart::TimeFormat("%Y-%m-%dT%H:%M"),], + partition_key = "2023-12-31T00:00", + want = [] + ); + + test_build_column_values!( + datetime_range_unimplemented_y_m_d_h_m_s, + template = [TemplatePart::TimeFormat("%Y-%m-%dT%H:%M:%S"),], + partition_key = "2023-12-31T00:00:00", + want = [] ); test_build_column_values!( diff --git a/ingester/src/buffer_tree/table.rs b/ingester/src/buffer_tree/table.rs index 1cc05d6feb..e59fe1faf5 100644 --- a/ingester/src/buffer_tree/table.rs +++ b/ingester/src/buffer_tree/table.rs @@ -321,6 +321,10 @@ fn keep_after_pruning_partition_key( max_value, } } + ColumnValue::Datetime { .. } => { + // not yet supported + return None; + } }; Some((Arc::from(col), range)) diff --git a/mutable_batch/src/payload/partition.rs b/mutable_batch/src/payload/partition.rs index 84dc167e06..db367242a8 100644 --- a/mutable_batch/src/payload/partition.rs +++ b/mutable_batch/src/payload/partition.rs @@ -338,7 +338,7 @@ mod tests { use crate::writer::Writer; use assert_matches::assert_matches; - use chrono::{format::StrftimeItems, TimeZone, Utc}; + use chrono::{format::StrftimeItems, DateTime, Datelike, Days, TimeZone, Utc}; use data_types::partition_template::{ build_column_values, test_table_partition_override, ColumnValue, }; @@ -624,6 +624,13 @@ mod tests { ColumnValue::Prefix(s.into()) } + fn year(y: i32) -> ColumnValue<'static> { + ColumnValue::Datetime { + begin: Utc.with_ymd_and_hms(y, 1, 1, 0, 0, 0).unwrap(), + end: Utc.with_ymd_and_hms(y + 1, 1, 1, 0, 0, 0).unwrap(), + } + } + // Generate a test that asserts the derived partition key matches // "want_key", when using the provided "template" parts and set of "tags". // @@ -692,7 +699,11 @@ mod tests { ], tags = [("a", "bananas"), ("b", "are_good")], want_key = "2023|bananas|are_good", - want_reversed_tags = [("a", identity("bananas")), ("b", identity("are_good"))] + want_reversed_tags = [ + (TIME_COLUMN_NAME, year(2023)), + ("a", identity("bananas")), + ("b", identity("are_good")), + ] ); test_partition_key!( @@ -704,7 +715,11 @@ mod tests { ], tags = [("a", "bananas"), ("b", "plátanos")], want_key = "2023|bananas|pl%C3%A1tanos", - want_reversed_tags = [("a", identity("bananas")), ("b", identity("plátanos"))] + want_reversed_tags = [ + (TIME_COLUMN_NAME, year(2023)), + ("a", identity("bananas")), + ("b", identity("plátanos")), + ] ); test_partition_key!( @@ -744,6 +759,7 @@ mod tests { tags = [("a", "|"), ("b", "!"), ("d", "%7C%21%257C"), ("e", "^")], want_key = "2023|%7C|%21|!|%257C%2521%25257C|%5E", want_reversed_tags = [ + (TIME_COLUMN_NAME, year(2023)), ("a", identity("|")), ("b", identity("!")), ("d", identity("%7C%21%257C")), @@ -1104,6 +1120,27 @@ mod tests { .prop_shuffle() } + enum StringOrTSRange { + String(String), + TSRange(DateTime<Utc>, DateTime<Utc>), + } + + impl StringOrTSRange { + fn expect_string(&self) -> &String { + match self { + Self::String(s) => s, + Self::TSRange(_, _) => panic!("expected string, got TS range"), + } + } + + fn expect_ts_range(&self) -> (DateTime<Utc>, DateTime<Utc>) { + match self { + Self::String(_) => panic!("expected TS range, got string"), + Self::TSRange(b, e) => (*b, *e), + } + } + } + proptest! { /// A property test that asserts a write comprised of an arbitrary /// subset of [`TEST_TAG_NAME_SET`] with randomised values, that is @@ -1113,7 +1150,8 @@ mod tests { #[test] fn prop_reversible_mapping( template in arbitrary_template_parts(), - tag_values in arbitrary_tag_value_map() + tag_values in arbitrary_tag_value_map(), + ts in 0_i64..i64::MAX, ) { let mut batch = MutableBatch::new(); let mut writer = Writer::new(&mut batch, 1); @@ -1121,9 +1159,8 @@ mod tests { let template = template.clone().into_iter().collect::<Vec<_>>(); let template = test_table_partition_override(template); - // Timestamp: 2023-05-29T13:03:16Z writer - .write_time("time", vec![1685365396931384064].into_iter()) + .write_time("time", vec![ts].into_iter()) .unwrap(); for (col, value) in &tag_values { @@ -1142,38 +1179,58 @@ mod tests { // Build the expected set of reversed tags by filtering out any // NULL tags (preserving empty string values). - let want_reversed: Vec<(&str, String)> = template.parts().filter_map(|v| match v { + let ts = Utc.timestamp_nanos(ts); + let want_reversed: Vec<(&str, StringOrTSRange)> = template.parts().filter_map(|v| match v { TemplatePart::TagValue(col_name) if tag_values.contains_key(col_name) => { // This tag had a (potentially empty) value wrote and should // appear in the reversed output. - Some((col_name, tag_values.get(col_name).unwrap().to_string())) + Some((col_name, StringOrTSRange::String(tag_values.get(col_name).unwrap().to_string()))) + } + TemplatePart::TimeFormat("%Y/%m/%d" | "%Y-%m-%d") => { + let begin = Utc.with_ymd_and_hms(ts.year(), ts.month(), ts.day(), 0, 0, 0).unwrap(); + let end = begin + Days::new(1); + Some((TIME_COLUMN_NAME, StringOrTSRange::TSRange(begin, end))) } _ => None, }).collect(); assert_eq!(want_reversed.len(), reversed.len()); - for (want, got) in want_reversed.iter().zip(reversed.iter()) { - assert_eq!(got.0, want.0, "column names differ"); + for ((want_col, want_val), (got_col, got_val)) in want_reversed.iter().zip(reversed.iter()) { + assert_eq!(got_col, want_col, "column names differ"); - match got.1 { + match got_val { ColumnValue::Identity(_) => { // An identity is both equal to, and a prefix of, the // original value. - assert_eq!(got.1, want.1, "identity values differ"); + let want_val = want_val.expect_string(); + assert_eq!(got_val, &want_val, "identity values differ"); + assert!( + got_val.is_prefix_match_of(want_val), + "prefix mismatch; {:?} is not a prefix of {:?}", + got_val, + want_val, + ); + }, + ColumnValue::Prefix(_) => { + let want_val = want_val.expect_string(); assert!( - got.1.is_prefix_match_of(&want.1), + got_val.is_prefix_match_of(want_val), "prefix mismatch; {:?} is not a prefix of {:?}", - got.1, - want.1 + got_val, + want_val, ); }, - ColumnValue::Prefix(_) => assert!( - got.1.is_prefix_match_of(&want.1), - "prefix mismatch; {:?} is not a prefix of {:?}", - got.1, - want.1 - ), + ColumnValue::Datetime{..} => { + let (got_begin, got_end) = want_val.expect_ts_range(); + match got_val { + ColumnValue::Datetime{begin, end} => { + assert_eq!(got_begin, *begin); + assert_eq!(got_end, *end); + } + _ => panic!("expected datatime column value but got: {:?}", got_val) + } + }, }; } } diff --git a/querier/src/cache/partition.rs b/querier/src/cache/partition.rs index c9dc6f497b..0eab232ca3 100644 --- a/querier/src/cache/partition.rs +++ b/querier/src/cache/partition.rs @@ -323,6 +323,10 @@ impl CachedPartition { max_value, } } + ColumnValue::Datetime { .. } => { + // not yet supported + return None; + } }; Some((col, range))
667240471196b70e4ed6828e9e01a09d37411e8e
Nga Tran
2022-10-18 09:07:48
part 2 of compactor documents with best practice and guidelines (#5880)
* docs: part 2 of compactor * fix: typos * chore: Apply suggestions from code review Co-authored-by: Andrew Lamb <[email protected]> * docs: address review comments
Co-authored-by: Andrew Lamb <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
docs: part 2 of compactor documents with best practice and guidelines (#5880) * docs: part 2 of compactor * fix: typos * chore: Apply suggestions from code review Co-authored-by: Andrew Lamb <[email protected]> * docs: address review comments Co-authored-by: Andrew Lamb <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/docs/compactor.md b/docs/compactor.md index 2aa197c325..4ba5676b1c 100644 --- a/docs/compactor.md +++ b/docs/compactor.md @@ -14,7 +14,8 @@ There are 3 kinds of `compaction_level` files in IOx: level-0, level-1 and level - Level-0 files are small files ingested by the Ingesters - Level-1 files are files created by a Compactor as a result of compacting one or many level-0 files with their overlapped level-1 files. - Level-2 files are files created by a Compactor as a result of compacting one or many level-1 files with their overlapped level-2 files. -Regarless of level, a file in IOx must belong to a partition which represents data of a time range which is usually a day. Two files of different partitions never overlap in time and hence the Compactor only needs to compact files that belong to the same partition. + +Regardless of level, a file in IOx must belong to a partition which represents data of a time range which is usually a day. Two files of different partitions never overlap in time and hence the Compactor only needs to compact files that belong to the same partition. A level-0 file may overlap with other level-0, level-1 and level-2 files. A Level-1 file do not overlap with any other level-1 files but may overlap with level-2 files. A level-2 file does not overlap with any other level-2 files. @@ -38,12 +39,182 @@ If increasing memory a lot does not help, consider changing one or a combination - INFLUXDB_IOX_COMPACTION_MAX_COMPACTING_FILES: reduce this value in half or more. This puts a hard cap on the maximun number of files of a partition it can compact, even if its memory budget estimate would allow for more. - INFLUXDB_IOX_COMPACTION_MEMORY_BUDGET_BYTES: reduce this value in half or more. This tells the compact its total budget is less so it will reduce the number of partition it can compact concurrenly or reduce number of files to be compacted for a partition. -# Avoid and deal with files in skipped_compactions -todo +# Compactor Config Parameters +These are [up-to-date configurable parameters](https://github.com/influxdata/influxdb_iox/blob/main/clap_blocks/src/compactor.rs). Here are a few key parameters you may want to tune for your needs: + + - **Size of the files:** The compactor cannot control the sizes of level-0 files but they are usually small and can be adjusted by config params of the Ingesters. The compactor decides the max desired size of level-1 and level-2 files which is around `INFLUXDB_IOX_COMPACTION_MAX_DESIRED_FILE_SIZE_BYTES * (100 + INFLUXDB_IOX_COMPACTION_PERCENTAGE_MAX_FILE_SIZE) / 100`. + - **Map a compactor to several shards**: Depending on your Ingester setup, there may be several shards. A compactor can be set up to compact all or a fraction of the shards. Use range `[INFLUXDB_IOX_SHARD_INDEX_RANGE_START, INFLUXDB_IOX_SHARD_INDEX_RANGE_END]` to map them. +- **Number of partitions considered to compact per shard**: If there is enough memory which is usually the case, the compactor will compact many partitions of same or different shards concurrently. Depending on how many shards a compactor handles and how much memory that compactor is configured, you can increase/reduce the concurrent compaction level by increase/reduce number of partitions per shard by adjusting `INFLUXDB_IOX_COMPACTION_MAX_NUMBER_PARTITIONS_PER_SHARD`. +- **Concurrency capacity:** to configure this based on your available memory, you need to understand how IOx estimates memory to compact files in the next section. # Memory Estimation -todo -# Usual SQL to verify compaction status -todo +The idea of a single compaction is to compact as many small input files as possible into one or few larger output files as follows + +```text + ┌────────┐ ┌────────┐ + │ │ │ │ + │ Output │ ..... │ Output │ + │ File 1 │ │ File m │ + │ │ │ │ + └────────┘ └────────┘ + ▲ ▲ + │ │ + │ │ + .─────────────────────────. + _.──' `───. +( Compact ) + `────. _.───' + `───────────────────────' + ▲ ▲ ▲ + │ │ │ + │ │ │ + ┌───────┐┌──────┐ ┌──────┐ + │ ││ │ │ │ + │ Input ││Input │ │Input │ + │File 1 ││File 2│ ..... │File n│ + │ ││ │ │ │ + └───────┘└──────┘ └──────┘ +Figure 1: Compact a Partition +``` + + +Currently, in order to avoid over committing memory (and OOMing), the compactor computes an estimate of the memory needed for loading full input files into memory, memory for streaming input files in parallel, and memory for output streams. The boxes in the diagram below illustrates the memory needed to run the query plan above. IOx picks number of input files to compact based on their sizes, number of columns and columns types of the file's table, max desired output files, and the memory budget the compactor is provided. Details: + + +```text + ┌───────────┐ ┌────────────┐ + │Memory for │ │ Memory for │ + │ Output │ ..... │ Output │ + │ Stream 1 │ │ Stream m │ + │ │ │ │ + └───────────┘ └────────────┘ + + ▲ ▲ + │ │ + │ │ + │ │ + .─────────────────────. + _.────' `─────. + ,─' '─. + ╱ Run Compaction Plan ╲ + ( ) + `. ,' + '─. ,─' + `─────. _.────' + `───────────────────' + ▲ ▲ ▲ + │ │ │ + │ │ │ + │ │ │ +┌────────────┐ ┌────────────┐ ┌────────────┐ +│ Memory for │ │ Memory for │ │ Memory for │ +│ Streaming │ │ Streaming │ │ Streaming │ +│ File 1 │ │ File 2 │ ..... │ File n │ +│ │ │ │ │ │ +└────────────┘ └────────────┘ └────────────┘ +┌────────────┐ ┌────────────┐ ┌────────────┐ +│ Memory for │ │ Memory for │ │ Memory for │ +│Loading Full│ │Loading Full│ ..... │Loading Full│ +│ File 1 │ │ File 2 │ │ File n │ +│ │ │ │ │ │ +└────────────┘ └────────────┘ └────────────┘ +Figure 2: Memory Estimation for a Compaction Plan +``` + +- Memory for loading a full file : Twice the file size. +- Memory for streaming an input file: See [estimate_arrow_bytes_for_file](https://github.com/influxdata/influxdb_iox/blob/bb7df22aa1783e040ea165153876f1fe36838d4e/compactor/src/compact.rs#L504) for the details but, briefly, each column of the file will need `size_of_a_row_of_a_column * INFLUXDB_IOX_COMPACTION_MIN_ROWS_PER_RECORD_BATCH_TO_PLAN` in which `size_of_a_row_of_a_column` depends on column type and the file cardinality. +- Memory for an output stream is similar to memory for streaming an input file. Number of output streams is estimated based on the sizes of the input files and max desired file size `INFLUXDB_IOX_COMPACTION_MAX_DESIRED_FILE_SIZE_BYTES`. + +IOx limits the number of input files using the budget provided in `INFLUXDB_IOX_COMPACTION_MEMORY_BUDGET_BYTES`. However, IOx also caps number of input files based on `INFLUXDB_IOX_COMPACTION_MAX_COMPACTING_FILES` even if more could fit under the memory budget. + +Since the compactor keeps running to look for new files to compact, number of input files are usually small (fewer then 10) and, thus, memory needed to run such a plan is usually small enough for a compactor to be able to run many of them concurrently. + +**Best Pratice Recommendation for your configuration:** +- INFLUXDB_IOX_COMPACTION_MEMORY_BUDGET_BYTES: 1/3 (or at most 1/2) your total actual memory. +- INFLUXDB_IOX_COMPACTION_MAX_COMPACTING_FILES: 20 +- INFLUXDB_IOX_COMPACTION_MIN_ROWS_PER_RECORD_BATCH_TO_PLAN: 32 * 1024 +- INFLUXDB_IOX_COMPACTION_MAX_DESIRED_FILE_SIZE_BYTES: 100 * 1024 * 1024 +- INFLUXDB_IOX_COMPACTION_PERCENTAGE_MAX_FILE_SIZE: 5 + +# Avoid and deal with files in skipped_compactions +To deduplicate data correctly, the Compactor must compact level-0 files in ascending order of their sequence nubers and with their overlapped level-1 files. If the first level-0 and its overlapped level-1 files are too large and their memory estimation in Figure 2 is over the budget defined in INFLUXDB_IOX_COMPACTION_MEMORY_BUDGET_BYTES, the compactor won't be able to compact that partition. To avoid considering that same partition again and again, the compactor will put that partition into the catalog table `skipped_compactions`. + +If you find your queries on data of a partition that is in `skipped_compactions` are slow, we may want to force the Compactor to compact that partition by increasing your memory budget and then removing that partition from the `skipped_compactions`. If you remove the partition without adjusting your config params, the Compactor will put its back in again without compacting it. We are working on an gRPC API to let you see the content of the `skipped_compactions` and remove a specific partition from it. In the mean time, you can do this by using delete SQL statement directly from your Catalog (see SQL section for this statement) but we do not recommend modifying the Catalog unless you know your partitions and their data files very well. + +Increasing memory budget can be as simple as increasing your actual memory size and then increasing the value of INFLUXDB_IOX_COMPACTION_MEMORY_BUDGET_BYTES accordingly. You can also try to reduce the value of INFLUXDB_IOX_COMPACTION_MIN_ROWS_PER_RECORD_BATCH_TO_PLAN to minimum 8 * 1024 but you may hit OOMs if you do so. This depends on column types and number of columns of your file. + +If your partition is put into the `skipped_partition` with the reason `over limit of num files`, you have to increase INFLUXDB_IOX_COMPACTION_MAX_COMPACTING_FILES accordingly but you may hit OOMs if you do not increase your actual memory. + +# Avoid Deduplication in Querier + +Deduplication is known to be expensive. To avoid deduplication work during query time in Queriers, your files should not be overlapped in time range. This can be achieved by having all files of a partition in either level-1 or level-2. With the current design, if your compactor catches up well, partitions with recent level-0 files within the last 4 hours should have at most two level-2 files. Partitions without new level-0 files in the last 8 hours should have all level-2 files. Depending on the performance in the Querier, we can adjust the Compactor (a future feature) to have all files in level-1 or level-2. + +# Common SQL to verify compaction status + +If your Compactors catch up well with your Ingesters and do not hit memory issues, you should see: +1. Table `skipped_compactions` is empty +2. Most partitions have at most 2 level-0 files. If a partition has a lot of level-0 files, it signals either your compactor is behind and does not compact it, or that partition is put in `skipped_compactions`. +3. Most partitions without new level-0 files in the last 8 hours should have all level-2 files. +4. Most non-used files (files with `to_delete is not null`) are removed by garbage collector + +Here are SQL to verify them + + +```sql +-- Content of skipped_compactions +SELECT * FROM skipped_compactions; + +-- remove partitions from the skipped_compactions +DELETE FROM skipped_compactions WHERE partition_id in ([your_ids]); + +-- Content of skipped_compactions with their shard index, partition key and table id +SELECT shard_index, table_id, partition_id, partition_key, left(reason, 25), + num_files, limit_num_files, estimated_bytes, limit_bytes, to_timestamp(skipped_at) skipped_at +FROM skipped_compactions, partition, shard +WHERE partition.id = skipped_compactions.partition_id and partition.shard_id = shard.id +ORDER BY shard_index, table_id, partition_key, skipped_at; + +-- Number of files per level for top 50 partitions with most files of a specified day +SELECT s.shard_index, pf.table_id, partition_id, partition_key, + count(case when to_delete is null then 1 end) total_not_deleted, + count(case when compaction_level=0 and to_delete is null then 1 end) num_l0, + count(case when compaction_level=1 and to_delete is null then 1 end) num_l1, + count(case when compaction_level=2 and to_delete is null then 1 end) num_l2 , + count(case when compaction_level=0 and to_delete is not null then 1 end) deleted_num_l0, + count(case when compaction_level=1 and to_delete is not null then 1 end) deleted_num_l1, + count(case when compaction_level=2 and to_delete is not null then 1 end) deleted_num_l2 +FROM parquet_file pf, partition p, shard s +WHERE pf.partition_id = p.id AND pf.shard_id = s.id + AND partition_key = '2022-10-11' +GROUP BY s.shard_index, pf.table_id, partition_id, partition_key +ORDER BY count(case when to_delete is null then 1 end) DESC +LIMIT 50; + +-- Partitions with level-0 files ingested within the last 4 hours +SELECT partition_key, id as partition_id +FROM partition p, ( + SELECT partition_id, max(created_at) + FROM parquet_file + WHERE compaction_level = 0 AND to_delete IS NULL + GROUP BY partition_id + HAVING to_timestamp(max(created_at)/1000000000) > now() - '(4 hour)'::interval +) sq +WHERE sq.partition_id = p.id; +``` + +These are other SQL you may find useful + +```sql +-- Number of columns in a table +select left(t.name, 50), c.table_id, count(1) num_cols +from table_name t, column_name c +where t.id =c.table_id +group by 1, 2 +order by 3 desc; +``` + + + +
377d2a8215f74fa654a91234f520cd99f9ba13aa
Marco Neumann
2023-09-06 11:07:17
network layer for i->q V2 client (#8640)
Adds the actual network IO layer for #8349. This is a rather simple layer for now, but we may want to tune some connection settings in the future.
null
feat: network layer for i->q V2 client (#8640) Adds the actual network IO layer for #8349. This is a rather simple layer for now, but we may want to tune some connection settings in the future.
diff --git a/Cargo.lock b/Cargo.lock index 2f9e48fcb7..abfb9eb5db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2762,6 +2762,7 @@ version = "0.1.0" dependencies = [ "arrow", "async-trait", + "client_util", "data_types", "datafusion", "futures", @@ -2771,6 +2772,7 @@ dependencies = [ "snafu", "test_helpers", "tokio", + "tonic", "trace", "trace_http", "uuid", diff --git a/ingester_query_client/Cargo.toml b/ingester_query_client/Cargo.toml index 78af982461..6860fd0b8a 100644 --- a/ingester_query_client/Cargo.toml +++ b/ingester_query_client/Cargo.toml @@ -8,6 +8,7 @@ license.workspace = true [dependencies] # In alphabetical order arrow = { workspace = true, features = ["prettyprint", "dyn_cmp_dict"] } async-trait = "0.1" +client_util = { path = "../client_util" } data_types = { path = "../data_types" } datafusion = { workspace = true } futures = "0.3" @@ -15,6 +16,7 @@ http = "0.2.9" ingester_query_grpc = { path = "../ingester_query_grpc" } observability_deps = { path = "../observability_deps" } snafu = "0.7" +tonic = { workspace = true } trace = { path = "../trace" } trace_http = { path = "../trace_http" } uuid = "1" diff --git a/ingester_query_client/src/layers/mod.rs b/ingester_query_client/src/layers/mod.rs index 79f243cb54..0ebe11791b 100644 --- a/ingester_query_client/src/layers/mod.rs +++ b/ingester_query_client/src/layers/mod.rs @@ -1,5 +1,6 @@ //! Layers. pub mod logging; +pub mod network; pub mod serialize; pub mod testing; diff --git a/ingester_query_client/src/layers/network.rs b/ingester_query_client/src/layers/network.rs new file mode 100644 index 0000000000..54cf9876ba --- /dev/null +++ b/ingester_query_client/src/layers/network.rs @@ -0,0 +1,59 @@ +//! Network layer. +use std::fmt::Debug; + +use async_trait::async_trait; +use client_util::tower::SetRequestHeadersService; +use futures::{StreamExt, TryStreamExt}; +use http::{HeaderName, HeaderValue, Uri}; +use ingester_query_grpc::influxdata::iox::ingester::v2 as proto; +use tonic::transport::Channel; + +use crate::{ + error::DynError, + layer::{Layer, QueryResponse}, +}; + +/// Network layer. +#[derive(Debug)] +pub struct NetworkLayer { + /// Lazy-connect network channel. + /// + /// This can be cloned, all clones share the same connection pool. + channel: Channel, +} + +impl NetworkLayer { + /// Create new network layer + pub fn new(uri: Uri) -> Self { + // connect lazy / on-demand to avoid thunder herding during start-up + let channel = Channel::builder(uri).connect_lazy(); + + Self { channel } + } +} + +#[async_trait] +impl Layer for NetworkLayer { + type Request = (proto::QueryRequest, Vec<(HeaderName, HeaderValue)>); + type ResponseMetadata = (); + type ResponsePayload = proto::QueryResponse; + + async fn query( + &self, + request: Self::Request, + ) -> Result<QueryResponse<Self::ResponseMetadata, Self::ResponsePayload>, DynError> { + let (request, headers) = request; + + let mut client = proto::ingester_query_service_client::IngesterQueryServiceClient::new( + SetRequestHeadersService::new(self.channel.clone(), headers), + ); + client + .query(request) + .await + .map(|resp| QueryResponse { + metadata: (), + payload: resp.into_inner().map_err(DynError::new).boxed(), + }) + .map_err(DynError::new) + } +}
01d33f69b5d2ef91923df0b27f4c50db69398b26
Paul Dix
2024-03-12 09:47:32
wire up query from parquet files (#24749)
* feat: wire up query from parquet files This adds the functionality to query from Parquet files that have been persisted in object storage. Any segments that are loaded up on boot up will be included (limit of 1k segments at the time of this PR). In a follow on PR we should add a good end-to-end test that has persistence and query through the main API (might be tricky). * Move BufferChunk and ParquetChunk into chunk module * Add object_store_url to Persister * Register object_store on server startup * Add loaded persisted_segments to SegmentState * refactor: PR feedback
null
feat: wire up query from parquet files (#24749) * feat: wire up query from parquet files This adds the functionality to query from Parquet files that have been persisted in object storage. Any segments that are loaded up on boot up will be included (limit of 1k segments at the time of this PR). In a follow on PR we should add a good end-to-end test that has persistence and query through the main API (might be tricky). * Move BufferChunk and ParquetChunk into chunk module * Add object_store_url to Persister * Register object_store on server startup * Add loaded persisted_segments to SegmentState * refactor: PR feedback
diff --git a/Cargo.lock b/Cargo.lock index e151b47e35..7645f8f423 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2475,6 +2475,7 @@ dependencies = [ "clap", "clap_blocks", "console-subscriber", + "datafusion_util", "dotenvy", "futures", "hex", @@ -2616,6 +2617,7 @@ dependencies = [ "observability_deps", "parking_lot", "parquet", + "parquet_file", "pretty_assertions", "schema", "serde", @@ -2624,6 +2626,7 @@ dependencies = [ "test_helpers", "thiserror", "tokio", + "url", ] [[package]] diff --git a/influxdb3/Cargo.toml b/influxdb3/Cargo.toml index e62302c45f..3426db7e34 100644 --- a/influxdb3/Cargo.toml +++ b/influxdb3/Cargo.toml @@ -9,6 +9,7 @@ license.workspace = true # Core Crates authz.workspace = true clap_blocks.workspace = true +datafusion_util.workspace = true iox_query.workspace = true iox_time.workspace = true ioxd_common.workspace = true diff --git a/influxdb3/src/commands/serve.rs b/influxdb3/src/commands/serve.rs index 7c2bfa47fe..d7b50fd481 100644 --- a/influxdb3/src/commands/serve.rs +++ b/influxdb3/src/commands/serve.rs @@ -7,6 +7,7 @@ use clap_blocks::{ object_store::{make_object_store, ObjectStoreConfig}, socket_addr::SocketAddr, }; +use datafusion_util::config::register_iox_object_store; use influxdb3_server::{ auth::AllOrNothingAuthorizer, builder::ServerBuilder, query_executor::QueryExecutorImpl, serve, CommonServerState, @@ -15,7 +16,7 @@ use influxdb3_write::persister::PersisterImpl; use influxdb3_write::wal::WalImpl; use influxdb3_write::write_buffer::WriteBufferImpl; use influxdb3_write::SegmentDuration; -use iox_query::exec::{Executor, ExecutorConfig}; +use iox_query::exec::{Executor, ExecutorConfig, ExecutorType}; use iox_time::SystemProvider; use ioxd_common::reexport::trace_http::ctx::TraceHeaderParser; use object_store::DynObjectStore; @@ -242,6 +243,8 @@ pub async fn command(config: Config) -> Result<()> { metric_registry: Arc::clone(&metrics), mem_pool_size: config.exec_mem_pool_bytes.bytes(), })); + let runtime_env = exec.new_context(ExecutorType::Query).inner().runtime_env(); + register_iox_object_store(runtime_env, parquet_store.id(), Arc::clone(&object_store)); let trace_header_parser = TraceHeaderParser::new() .with_jaeger_trace_context_header_name( diff --git a/influxdb3_server/src/query_executor.rs b/influxdb3_server/src/query_executor.rs index 44cc5afe58..eb3e36c285 100644 --- a/influxdb3_server/src/query_executor.rs +++ b/influxdb3_server/src/query_executor.rs @@ -5,12 +5,10 @@ use arrow::record_batch::RecordBatch; use arrow_schema::ArrowError; use async_trait::async_trait; use data_types::NamespaceId; -use data_types::{ChunkId, ChunkOrder, TransitionPartitionId}; use datafusion::catalog::schema::SchemaProvider; use datafusion::catalog::CatalogProvider; use datafusion::common::arrow::array::StringArray; use datafusion::common::arrow::datatypes::{DataType, Field, Schema as DatafusionSchema}; -use datafusion::common::Statistics; use datafusion::datasource::{TableProvider, TableType}; use datafusion::error::DataFusionError; use datafusion::execution::context::SessionState; @@ -31,12 +29,11 @@ use iox_query::query_log::QueryLog; use iox_query::query_log::QueryText; use iox_query::query_log::StateReceived; use iox_query::QueryNamespaceProvider; -use iox_query::{QueryChunk, QueryChunkData, QueryNamespace}; +use iox_query::{QueryChunk, QueryNamespace}; use iox_query_influxql::frontend::planner::InfluxQLQueryPlanner; use iox_query_params::StatementParams; use metric::Registry; use observability_deps::tracing::{debug, info, trace}; -use schema::sort::SortKey; use schema::Schema; use serde::{Deserialize, Serialize}; use serde_arrow::schema::SchemaLike; @@ -495,48 +492,3 @@ impl<B: WriteBuffer> TableProvider for QueryTable<B> { provider.scan(ctx, projection, &filters, limit).await } } - -#[derive(Debug)] -pub struct ParquetChunk {} - -impl QueryChunk for ParquetChunk { - fn stats(&self) -> Arc<Statistics> { - todo!() - } - - fn schema(&self) -> &Schema { - todo!() - } - - fn partition_id(&self) -> &TransitionPartitionId { - todo!() - } - - fn sort_key(&self) -> Option<&SortKey> { - todo!() - } - - fn id(&self) -> ChunkId { - todo!() - } - - fn may_contain_pk_duplicates(&self) -> bool { - todo!() - } - - fn data(&self) -> QueryChunkData { - todo!() - } - - fn chunk_type(&self) -> &str { - todo!() - } - - fn order(&self) -> ChunkOrder { - todo!() - } - - fn as_any(&self) -> &dyn Any { - todo!() - } -} diff --git a/influxdb3_write/Cargo.toml b/influxdb3_write/Cargo.toml index dc8ea20d82..36ba81b643 100644 --- a/influxdb3_write/Cargo.toml +++ b/influxdb3_write/Cargo.toml @@ -13,6 +13,7 @@ influxdb-line-protocol.workspace = true iox_catalog.workspace = true iox_query.workspace = true iox_time.workspace = true +parquet_file.workspace = true observability_deps.workspace = true schema.workspace = true @@ -34,6 +35,7 @@ serde_json.workspace = true snap.workspace = true bytes.workspace = true futures-util.workspace = true +url.workspace = true [dev-dependencies] # Core Crates diff --git a/influxdb3_write/src/chunk.rs b/influxdb3_write/src/chunk.rs new file mode 100644 index 0000000000..fbc9f83cea --- /dev/null +++ b/influxdb3_write/src/chunk.rs @@ -0,0 +1,115 @@ +use arrow::array::RecordBatch; +use data_types::{ChunkId, ChunkOrder, TransitionPartitionId}; +use datafusion::common::Statistics; +use iox_query::{QueryChunk, QueryChunkData}; +use parquet_file::storage::ParquetExecInput; +use schema::sort::SortKey; +use schema::Schema; +use std::any::Any; +use std::sync::Arc; + +#[derive(Debug)] +pub struct BufferChunk { + pub(crate) batches: Vec<RecordBatch>, + pub(crate) schema: Schema, + pub(crate) stats: Arc<Statistics>, + pub(crate) partition_id: data_types::partition::TransitionPartitionId, + pub(crate) sort_key: Option<SortKey>, + pub(crate) id: data_types::ChunkId, + pub(crate) chunk_order: data_types::ChunkOrder, +} + +impl QueryChunk for BufferChunk { + fn stats(&self) -> Arc<Statistics> { + Arc::clone(&self.stats) + } + + fn schema(&self) -> &Schema { + &self.schema + } + + fn partition_id(&self) -> &data_types::partition::TransitionPartitionId { + &self.partition_id + } + + fn sort_key(&self) -> Option<&SortKey> { + self.sort_key.as_ref() + } + + fn id(&self) -> data_types::ChunkId { + self.id + } + + fn may_contain_pk_duplicates(&self) -> bool { + false + } + + fn data(&self) -> QueryChunkData { + QueryChunkData::in_mem(self.batches.clone(), Arc::clone(self.schema.inner())) + } + + fn chunk_type(&self) -> &str { + "BufferChunk" + } + + fn order(&self) -> data_types::ChunkOrder { + self.chunk_order + } + + fn as_any(&self) -> &dyn Any { + self + } +} + +#[derive(Debug)] +pub struct ParquetChunk { + pub(crate) schema: Schema, + pub(crate) stats: Arc<Statistics>, + pub(crate) partition_id: TransitionPartitionId, + pub(crate) sort_key: Option<SortKey>, + pub(crate) id: ChunkId, + pub(crate) chunk_order: ChunkOrder, + pub(crate) parquet_exec: ParquetExecInput, +} + +impl QueryChunk for ParquetChunk { + fn stats(&self) -> Arc<Statistics> { + Arc::clone(&self.stats) + } + + fn schema(&self) -> &Schema { + &self.schema + } + + fn partition_id(&self) -> &TransitionPartitionId { + &self.partition_id + } + + fn sort_key(&self) -> Option<&SortKey> { + self.sort_key.as_ref() + } + + fn id(&self) -> ChunkId { + self.id + } + + fn may_contain_pk_duplicates(&self) -> bool { + false + } + + fn data(&self) -> QueryChunkData { + QueryChunkData::Parquet(self.parquet_exec.clone()) + } + + fn chunk_type(&self) -> &str { + "Parquet" + } + + fn order(&self) -> ChunkOrder { + self.chunk_order + } + + fn as_any(&self) -> &dyn Any { + self + } +} diff --git a/influxdb3_write/src/lib.rs b/influxdb3_write/src/lib.rs index 4189f30c63..8ecad36c62 100644 --- a/influxdb3_write/src/lib.rs +++ b/influxdb3_write/src/lib.rs @@ -7,6 +7,7 @@ //! to be persisted. A new open segment will be created and new writes will be written to that segment. pub mod catalog; +mod chunk; pub mod paths; pub mod persister; pub mod wal; @@ -16,7 +17,8 @@ use crate::catalog::Catalog; use crate::paths::{ParquetFilePath, SegmentWalFilePath}; use async_trait::async_trait; use bytes::Bytes; -use data_types::NamespaceName; +use data_types::{NamespaceName, TimestampMinMax}; +use datafusion::datasource::object_store::ObjectStoreUrl; use datafusion::error::DataFusionError; use datafusion::execution::context::SessionState; use datafusion::physical_plan::SendableRecordBatchStream; @@ -306,6 +308,8 @@ impl SequenceNumber { } } +pub const DEFAULT_OBJECT_STORE_URL: &str = "iox://influxdb3/"; + #[async_trait] pub trait Persister: Debug + Send + Sync + 'static { type Error; @@ -348,6 +352,14 @@ pub trait Persister: Debug + Send + Sync + 'static { /// Returns the configured `ObjectStore` that data is loaded from and persisted to. fn object_store(&self) -> Arc<dyn object_store::ObjectStore>; + // This is used by the query engine to know where to read parquet files from. This assumes + // that there is a `ParquetStorage` with an id of `influxdb3` and that this url has been + // registered with the query execution context. Kind of ugly here, but not sure where else + // to keep this. + fn object_store_url(&self) -> ObjectStoreUrl { + ObjectStoreUrl::parse(DEFAULT_OBJECT_STORE_URL).unwrap() + } + fn as_any(&self) -> &dyn Any; } @@ -503,6 +515,15 @@ pub struct ParquetFile { pub max_time: i64, } +impl ParquetFile { + pub fn timestamp_min_max(&self) -> TimestampMinMax { + TimestampMinMax { + min: self.min_time, + max: self.max_time, + } + } +} + /// The summary data for a persisted parquet file in a segment. #[derive(Clone, Copy, Debug, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] diff --git a/influxdb3_write/src/write_buffer/buffer_segment.rs b/influxdb3_write/src/write_buffer/buffer_segment.rs index ed4e0080fc..b73bd0f2e9 100644 --- a/influxdb3_write/src/write_buffer/buffer_segment.rs +++ b/influxdb3_write/src/write_buffer/buffer_segment.rs @@ -190,7 +190,6 @@ pub(crate) fn load_buffer_from_segment( segment_reader.path().to_string(), )); } - let segment_data = validated_write.valid_segmented_data.pop().unwrap(); for (table_name, table_batch) in segment_data.table_batches { @@ -523,6 +522,13 @@ impl ClosedBufferSegment { Ok(persisted_segment) } + + pub fn table_buffer(&self, db_name: &str, table_name: &str) -> Option<TableBuffer> { + self.buffered_data + .database_buffers + .get(db_name) + .and_then(|db_buffer| db_buffer.table_buffers.get(table_name).cloned()) + } } #[cfg(test)] diff --git a/influxdb3_write/src/write_buffer/flusher.rs b/influxdb3_write/src/write_buffer/flusher.rs index e0fc7dcd3b..59bc2d1df1 100644 --- a/influxdb3_write/src/write_buffer/flusher.rs +++ b/influxdb3_write/src/write_buffer/flusher.rs @@ -246,6 +246,7 @@ mod tests { Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))), vec![open_segment, next_segment], vec![], + vec![], None, ))); let flusher = WriteBufferFlusher::new(Arc::clone(&segment_state)); diff --git a/influxdb3_write/src/write_buffer/mod.rs b/influxdb3_write/src/write_buffer/mod.rs index 3a561f3ff7..32fffc114a 100644 --- a/influxdb3_write/src/write_buffer/mod.rs +++ b/influxdb3_write/src/write_buffer/mod.rs @@ -6,7 +6,7 @@ mod loader; mod segment_state; use crate::catalog::{Catalog, DatabaseSchema, TableDefinition, TIME_COLUMN_NAME}; -use crate::write_buffer::buffer_segment::TableBuffer; +use crate::chunk::ParquetChunk; use crate::write_buffer::flusher::WriteBufferFlusher; use crate::write_buffer::loader::load_starting_state; use crate::write_buffer::segment_state::{run_buffer_segment_persist_and_cleanup, SegmentState}; @@ -14,24 +14,22 @@ use crate::{ persister, BufferSegment, BufferedWriteRequest, Bufferer, ChunkContainer, LpWriteOp, Persister, Precision, SegmentDuration, SegmentId, Wal, WalOp, WriteBuffer, WriteLineError, }; -use arrow::record_batch::RecordBatch; use async_trait::async_trait; use data_types::{ column_type_from_field, ChunkId, ChunkOrder, ColumnType, NamespaceName, NamespaceNameError, - TableId, TransitionPartitionId, }; -use datafusion::common::{DataFusionError, Statistics}; +use datafusion::common::DataFusionError; use datafusion::execution::context::SessionState; use datafusion::logical_expr::Expr; use influxdb_line_protocol::{parse_lines, FieldValue, ParsedLine}; use iox_query::chunk_statistics::create_chunk_statistics; -use iox_query::{QueryChunk, QueryChunkData}; +use iox_query::QueryChunk; use iox_time::{Time, TimeProvider}; -use observability_deps::tracing::{debug, error, info}; +use object_store::path::Path as ObjPath; +use object_store::ObjectMeta; +use observability_deps::tracing::{debug, error}; use parking_lot::{Mutex, RwLock}; -use schema::sort::SortKey; -use schema::Schema; -use std::any::Any; +use parquet_file::storage::ParquetExecInput; use std::borrow::Cow; use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; @@ -82,9 +80,10 @@ pub struct WriteRequest<'a> { } #[derive(Debug)] -pub struct WriteBufferImpl<W, T> { +pub struct WriteBufferImpl<W, T, P> { catalog: Arc<Catalog>, segment_state: Arc<RwLock<SegmentState<T, W>>>, + persister: Arc<P>, #[allow(dead_code)] wal: Option<Arc<W>>, write_buffer_flusher: WriteBufferFlusher, @@ -97,8 +96,8 @@ pub struct WriteBufferImpl<W, T> { shutdown_segment_persist_tx: watch::Sender<()>, } -impl<W: Wal, T: TimeProvider> WriteBufferImpl<W, T> { - pub async fn new<P>( +impl<W: Wal, T: TimeProvider, P: Persister> WriteBufferImpl<W, T, P> { + pub async fn new( persister: Arc<P>, wal: Option<Arc<W>>, time_provider: Arc<T>, @@ -112,6 +111,7 @@ impl<W: Wal, T: TimeProvider> WriteBufferImpl<W, T> { let now = time_provider.now(); let loaded_state = load_starting_state(Arc::clone(&persister), wal.clone(), now, segment_duration).await?; + let segment_state = Arc::new(RwLock::new(SegmentState::new( segment_duration, loaded_state.last_segment_id, @@ -119,6 +119,7 @@ impl<W: Wal, T: TimeProvider> WriteBufferImpl<W, T> { Arc::clone(&time_provider), loaded_state.open_segments, loaded_state.persisting_buffer_segments, + loaded_state.persisted_segments, wal.clone(), ))); @@ -127,11 +128,12 @@ impl<W: Wal, T: TimeProvider> WriteBufferImpl<W, T> { let segment_state_persister = Arc::clone(&segment_state); let time_provider_persister = Arc::clone(&time_provider); let wal_perister = wal.clone(); + let cloned_persister = Arc::clone(&persister); let (shutdown_segment_persist_tx, shutdown_rx) = watch::channel(()); let segment_persist_handle = tokio::task::spawn(async move { run_buffer_segment_persist_and_cleanup( - persister, + cloned_persister, segment_state_persister, shutdown_rx, time_provider_persister, @@ -143,6 +145,7 @@ impl<W: Wal, T: TimeProvider> WriteBufferImpl<W, T> { Ok(Self { catalog: loaded_state.catalog, segment_state, + persister, wal, write_buffer_flusher, time_provider, @@ -193,66 +196,95 @@ impl<W: Wal, T: TimeProvider> WriteBufferImpl<W, T> { &self, database_name: &str, table_name: &str, - _filters: &[Expr], - _projection: Option<&Vec<usize>>, - _ctx: &SessionState, + filters: &[Expr], + projection: Option<&Vec<usize>>, + ctx: &SessionState, ) -> Result<Vec<Arc<dyn QueryChunk>>, DataFusionError> { let db_schema = self .catalog .db_schema(database_name) .ok_or_else(|| DataFusionError::Execution(format!("db {} not found", database_name)))?; - let table = db_schema - .tables - .get(table_name) - .ok_or_else(|| DataFusionError::Execution(format!("table {} not found", table_name)))?; - let schema = table.schema.clone(); - let table_buffers = self.clone_table_buffers(database_name, table_name); - let chunks = table_buffers - .into_iter() - .map(|table_buffer| { - let batch = table_buffer.rows_to_record_batch(&schema, table.columns()); - let batch_stats = create_chunk_statistics( - Some(table_buffer.row_count()), - &schema, - Some(table_buffer.timestamp_min_max()), - None, - ); - - let chunk: Arc<dyn QueryChunk> = Arc::new(BufferChunk { - batches: vec![batch], - schema: schema.clone(), - stats: Arc::new(batch_stats), - partition_id: TransitionPartitionId::new( - TableId::new(0), - &table_buffer.segment_key, - ), - sort_key: None, - id: ChunkId::new(), - chunk_order: ChunkOrder::new(0), - }); - - chunk - }) - .collect(); + let table_schema = { + let table = db_schema.tables.get(table_name).ok_or_else(|| { + DataFusionError::Execution(format!( + "table {} not found in db {}", + table_name, database_name + )) + })?; - Ok(chunks) - } + table.schema.clone() + }; + + let segment_state = self.segment_state.read(); + let mut chunks = + segment_state.get_table_chunks(db_schema, table_name, filters, projection, ctx)?; + let parquet_files = segment_state.get_parquet_files(database_name, table_name); + + let mut chunk_order = chunks.len() as i64; + let object_store_url = self.persister.object_store_url(); + + for parquet_file in parquet_files { + // TODO: update persisted segments to serialize their key to use here + let partition_key = data_types::PartitionKey::from(parquet_file.path.clone()); + let partition_id = data_types::partition::TransitionPartitionId::new( + data_types::TableId::new(0), + &partition_key, + ); + + let chunk_stats = create_chunk_statistics( + Some(parquet_file.row_count as usize), + &table_schema, + Some(parquet_file.timestamp_min_max()), + None, + ); + + let location = ObjPath::from(parquet_file.path.clone()); + + let parquet_exec = ParquetExecInput { + object_store_url: object_store_url.clone(), + object_meta: ObjectMeta { + location, + last_modified: Default::default(), + size: parquet_file.size_bytes as usize, + e_tag: None, + version: None, + }, + }; - fn clone_table_buffers(&self, database_name: &str, table_name: &str) -> Vec<TableBuffer> { - let state = self.segment_state.read(); + let parquet_chunk = ParquetChunk { + schema: table_schema.clone(), + stats: Arc::new(chunk_stats), + partition_id, + sort_key: None, + id: ChunkId::new(), + chunk_order: ChunkOrder::new(chunk_order), + parquet_exec, + }; + + chunk_order += 1; + + chunks.push(Arc::new(parquet_chunk)); + } - state.clone_table_buffers(database_name, table_name) + Ok(chunks) } #[cfg(test)] - fn get_table_record_batches(&self, datbase_name: &str, table_name: &str) -> Vec<RecordBatch> { + fn get_table_record_batches( + &self, + datbase_name: &str, + table_name: &str, + ) -> Vec<arrow::record_batch::RecordBatch> { let db_schema = self.catalog.db_schema(datbase_name).unwrap(); let table = db_schema.tables.get(table_name).unwrap(); let schema = table.schema.clone(); - let table_buffer = self.clone_table_buffers(datbase_name, table_name); - table_buffer + let table_buffers = self + .segment_state + .read() + .clone_table_buffers(datbase_name, table_name); + table_buffers .into_iter() .map(|table_buffer| table_buffer.rows_to_record_batch(&schema, table.columns())) .collect() @@ -260,7 +292,7 @@ impl<W: Wal, T: TimeProvider> WriteBufferImpl<W, T> { } #[async_trait] -impl<W: Wal, T: TimeProvider> Bufferer for WriteBufferImpl<W, T> { +impl<W: Wal, T: TimeProvider, P: Persister> Bufferer for WriteBufferImpl<W, T, P> { async fn write_lp( &self, database: NamespaceName<'static>, @@ -294,7 +326,7 @@ impl<W: Wal, T: TimeProvider> Bufferer for WriteBufferImpl<W, T> { } } -impl<W: Wal, T: TimeProvider> ChunkContainer for WriteBufferImpl<W, T> { +impl<W: Wal, T: TimeProvider, P: Persister> ChunkContainer for WriteBufferImpl<W, T, P> { fn get_table_chunks( &self, database_name: &str, @@ -307,67 +339,7 @@ impl<W: Wal, T: TimeProvider> ChunkContainer for WriteBufferImpl<W, T> { } } -impl<W: Wal, T: TimeProvider> WriteBuffer for WriteBufferImpl<W, T> {} - -#[derive(Debug)] -pub struct BufferChunk { - batches: Vec<RecordBatch>, - schema: Schema, - stats: Arc<Statistics>, - partition_id: data_types::partition::TransitionPartitionId, - sort_key: Option<SortKey>, - id: data_types::ChunkId, - chunk_order: data_types::ChunkOrder, -} - -impl QueryChunk for BufferChunk { - fn stats(&self) -> Arc<Statistics> { - info!("BufferChunk stats {}", self.id); - Arc::clone(&self.stats) - } - - fn schema(&self) -> &Schema { - info!("BufferChunk schema {}", self.id); - &self.schema - } - - fn partition_id(&self) -> &data_types::partition::TransitionPartitionId { - info!("BufferChunk partition_id {}", self.id); - &self.partition_id - } - - fn sort_key(&self) -> Option<&SortKey> { - info!("BufferChunk sort_key {}", self.id); - self.sort_key.as_ref() - } - - fn id(&self) -> data_types::ChunkId { - info!("BufferChunk id {}", self.id); - self.id - } - - fn may_contain_pk_duplicates(&self) -> bool { - false - } - - fn data(&self) -> QueryChunkData { - info!("BufferChunk data {}", self.id); - QueryChunkData::in_mem(self.batches.clone(), Arc::clone(self.schema.inner())) - } - - fn chunk_type(&self) -> &str { - "BufferChunk" - } - - fn order(&self) -> data_types::ChunkOrder { - info!("BufferChunk order {}", self.id); - self.chunk_order - } - - fn as_any(&self) -> &dyn Any { - self - } -} +impl<W: Wal, T: TimeProvider, P: Persister> WriteBuffer for WriteBufferImpl<W, T, P> {} pub(crate) fn parse_validate_and_update_catalog( db_name: NamespaceName<'static>, @@ -750,7 +722,10 @@ mod tests { use crate::persister::PersisterImpl; use crate::wal::WalImpl; use crate::{SequenceNumber, WalOpBatch}; + use arrow::record_batch::RecordBatch; use arrow_util::assert_batches_eq; + use datafusion_util::config::register_iox_object_store; + use iox_query::exec::IOxSessionContext; use iox_time::{MockProvider, Time}; use object_store::memory::InMemory; use object_store::ObjectStore; @@ -846,4 +821,143 @@ mod tests { let actual = write_buffer.get_table_record_batches("foo", "cpu"); assert_batches_eq!(&expected, &actual); } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn returns_chunks_across_buffered_persisted_and_persisting_data() { + let dir = test_helpers::tmp_dir().unwrap().into_path(); + let wal = Some(Arc::new(WalImpl::new(dir.clone()).unwrap())); + let object_store: Arc<dyn ObjectStore> = Arc::new(InMemory::new()); + let persister = Arc::new(PersisterImpl::new(Arc::clone(&object_store))); + let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))); + let segment_duration = SegmentDuration::new_5m(); + let write_buffer = WriteBufferImpl::new( + Arc::clone(&persister), + wal.clone(), + Arc::clone(&time_provider), + segment_duration, + ) + .await + .unwrap(); + let session_context = IOxSessionContext::with_testing(); + let runtime_env = session_context.inner().runtime_env(); + register_iox_object_store(runtime_env, "influxdb3", Arc::clone(&object_store)); + + let _ = write_buffer + .write_lp( + NamespaceName::new("foo").unwrap(), + "cpu bar=1 10", + Time::from_timestamp_nanos(123), + false, + Precision::Nanosecond, + ) + .await + .unwrap(); + + let expected = vec![ + "+-----+--------------------------------+", + "| bar | time |", + "+-----+--------------------------------+", + "| 1.0 | 1970-01-01T00:00:00.000000010Z |", + "+-----+--------------------------------+", + ]; + let actual = get_table_batches(&write_buffer, "foo", "cpu", &session_context).await; + assert_batches_eq!(&expected, &actual); + + // advance the time and wait for it to persist + time_provider.set(Time::from_timestamp(800, 0).unwrap()); + loop { + let segment_state = write_buffer.segment_state.read(); + if !segment_state.persisted_segments().is_empty() { + break; + } + } + + // nothing should be open at this point + assert!(write_buffer + .segment_state + .read() + .open_segment_times() + .is_empty()); + + // verify we get the persisted data + let actual = get_table_batches(&write_buffer, "foo", "cpu", &session_context).await; + assert_batches_eq!(&expected, &actual); + + // now write some into the next segment we're in and verify we get both buffer and persisted + let _ = write_buffer + .write_lp( + NamespaceName::new("foo").unwrap(), + "cpu bar=2", + Time::from_timestamp(900, 0).unwrap(), + false, + Precision::Nanosecond, + ) + .await + .unwrap(); + let expected = vec![ + "+-----+--------------------------------+", + "| bar | time |", + "+-----+--------------------------------+", + "| 2.0 | 1970-01-01T00:15:00Z |", + "| 1.0 | 1970-01-01T00:00:00.000000010Z |", + "+-----+--------------------------------+", + ]; + let actual = get_table_batches(&write_buffer, "foo", "cpu", &session_context).await; + assert_batches_eq!(&expected, &actual); + + // and now reload the buffer and verify that we get persisted and the buffer again + let write_buffer = WriteBufferImpl::new( + Arc::clone(&persister), + wal, + Arc::clone(&time_provider), + segment_duration, + ) + .await + .unwrap(); + let actual = get_table_batches(&write_buffer, "foo", "cpu", &session_context).await; + assert_batches_eq!(&expected, &actual); + + // and now add to the buffer and verify that we still only get two chunks + let _ = write_buffer + .write_lp( + NamespaceName::new("foo").unwrap(), + "cpu bar=3", + Time::from_timestamp(950, 0).unwrap(), + false, + Precision::Nanosecond, + ) + .await + .unwrap(); + let expected = vec![ + "+-----+--------------------------------+", + "| bar | time |", + "+-----+--------------------------------+", + "| 2.0 | 1970-01-01T00:15:00Z |", + "| 3.0 | 1970-01-01T00:15:50Z |", + "| 1.0 | 1970-01-01T00:00:00.000000010Z |", + "+-----+--------------------------------+", + ]; + let actual = get_table_batches(&write_buffer, "foo", "cpu", &session_context).await; + assert_batches_eq!(&expected, &actual); + } + + async fn get_table_batches( + write_buffer: &WriteBufferImpl<WalImpl, MockProvider, PersisterImpl>, + database_name: &str, + table_name: &str, + ctx: &IOxSessionContext, + ) -> Vec<RecordBatch> { + let chunks = write_buffer + .get_table_chunks(database_name, table_name, &[], None, &ctx.inner().state()) + .unwrap(); + let mut batches = vec![]; + for chunk in chunks { + let chunk = chunk + .data() + .read_to_batches(chunk.schema(), ctx.inner()) + .await; + batches.extend(chunk); + } + batches + } } diff --git a/influxdb3_write/src/write_buffer/segment_state.rs b/influxdb3_write/src/write_buffer/segment_state.rs index af4c8f4ffc..0501fc24d3 100644 --- a/influxdb3_write/src/write_buffer/segment_state.rs +++ b/influxdb3_write/src/write_buffer/segment_state.rs @@ -1,14 +1,21 @@ //! State for the write buffer segments. -use crate::catalog::Catalog; +use crate::catalog::{Catalog, DatabaseSchema}; +use crate::chunk::BufferChunk; use crate::wal::WalSegmentWriterNoopImpl; use crate::write_buffer::buffer_segment::{ ClosedBufferSegment, OpenBufferSegment, TableBuffer, WriteBatch, }; use crate::{ - persister, wal, write_buffer, PersistedSegment, Persister, SegmentDuration, SegmentId, - SegmentRange, Wal, WalOp, + persister, wal, write_buffer, ParquetFile, PersistedSegment, Persister, SegmentDuration, + SegmentId, SegmentRange, Wal, WalOp, }; +use data_types::{ChunkId, ChunkOrder, TableId, TransitionPartitionId}; +use datafusion::common::DataFusionError; +use datafusion::execution::context::SessionState; +use datafusion::logical_expr::Expr; +use iox_query::chunk_statistics::create_chunk_statistics; +use iox_query::QueryChunk; use iox_time::{Time, TimeProvider}; use observability_deps::tracing::error; use parking_lot::RwLock; @@ -36,6 +43,7 @@ pub(crate) struct SegmentState<T, W> { } impl<T: TimeProvider, W: Wal> SegmentState<T, W> { + #[allow(clippy::too_many_arguments)] pub(crate) fn new( segment_duration: SegmentDuration, last_segment_id: SegmentId, @@ -43,6 +51,7 @@ impl<T: TimeProvider, W: Wal> SegmentState<T, W> { time_provider: Arc<T>, open_segments: Vec<OpenBufferSegment>, persisting_segments: Vec<ClosedBufferSegment>, + persisted_segments: Vec<PersistedSegment>, wal: Option<Arc<W>>, ) -> Self { let mut segments = BTreeMap::new(); @@ -55,6 +64,14 @@ impl<T: TimeProvider, W: Wal> SegmentState<T, W> { persisting_segments_map.insert(segment.segment_range.start_time, Arc::new(segment)); } + let mut persisted_segments_map = BTreeMap::new(); + for segment in persisted_segments { + persisted_segments_map.insert( + Time::from_timestamp_nanos(segment.segment_min_time), + Arc::new(segment), + ); + } + Self { segment_duration, last_segment_id, @@ -63,7 +80,7 @@ impl<T: TimeProvider, W: Wal> SegmentState<T, W> { wal, segments, persisting_segments: persisting_segments_map, - persisted_segments: BTreeMap::new(), + persisted_segments: persisted_segments_map, } } @@ -85,6 +102,81 @@ impl<T: TimeProvider, W: Wal> SegmentState<T, W> { segment.buffer_writes(write_batch) } + pub(crate) fn get_table_chunks( + &self, + db_schema: Arc<DatabaseSchema>, + table_name: &str, + _filters: &[Expr], + _projection: Option<&Vec<usize>>, + _ctx: &SessionState, + ) -> Result<Vec<Arc<dyn QueryChunk>>, DataFusionError> { + let table = db_schema + .tables + .get(table_name) + .ok_or_else(|| DataFusionError::Execution(format!("table {} not found", table_name)))?; + let schema = table.schema.clone(); + + let mut table_buffers = self.clone_table_buffers(&db_schema.name, table_name); + table_buffers.extend( + self.persisting_segments + .values() + .filter_map(|segment| segment.table_buffer(&db_schema.name, table_name)) + .collect::<Vec<_>>(), + ); + + let mut chunk_order = 0; + + let chunks = table_buffers + .into_iter() + .map(|table_buffer| { + let batch = table_buffer.rows_to_record_batch(&schema, table.columns()); + let batch_stats = create_chunk_statistics( + Some(table_buffer.row_count()), + &schema, + Some(table_buffer.timestamp_min_max()), + None, + ); + + let chunk: Arc<dyn QueryChunk> = Arc::new(BufferChunk { + batches: vec![batch], + schema: schema.clone(), + stats: Arc::new(batch_stats), + partition_id: TransitionPartitionId::new( + TableId::new(0), + &table_buffer.segment_key, + ), + sort_key: None, + id: ChunkId::new(), + chunk_order: ChunkOrder::new(chunk_order), + }); + + chunk_order += 1; + + chunk + }) + .collect(); + + Ok(chunks) + } + + pub(crate) fn get_parquet_files( + &self, + database_name: &str, + table_name: &str, + ) -> Vec<ParquetFile> { + let mut parquet_files = vec![]; + + for segment in self.persisted_segments.values() { + segment.databases.get(database_name).map(|db| { + db.tables.get(table_name).map(|table| { + parquet_files.extend(table.parquet_files.clone()); + }) + }); + } + + parquet_files + } + pub(crate) fn clone_table_buffers( &self, database_name: &str, @@ -96,6 +188,16 @@ impl<T: TimeProvider, W: Wal> SegmentState<T, W> { .collect::<Vec<_>>() } + #[cfg(test)] + pub(crate) fn persisted_segments(&self) -> Vec<Arc<PersistedSegment>> { + self.persisted_segments.values().cloned().collect() + } + + #[cfg(test)] + pub(crate) fn open_segment_times(&self) -> Vec<Time> { + self.segments.keys().cloned().collect() + } + #[allow(dead_code)] pub(crate) fn segment_for_time(&self, time: Time) -> Option<&OpenBufferSegment> { self.segments.get(&time) @@ -364,6 +466,7 @@ mod tests { Arc::clone(&time_provider), vec![open_segment1, open_segment2, open_segment3], vec![], + vec![], None, ); @@ -443,6 +546,7 @@ mod tests { Arc::clone(&time_provider), vec![open_segment2, open_segment3], vec![open_segment1.into_closed_segment(Arc::clone(&catalog))], + vec![], Some(Arc::clone(&wal)), ); let segment_state = Arc::new(RwLock::new(segment_state));
f026d7bdafd93fe074331b17c61c6de16c974ba7
Jeffrey Smith II
2022-11-17 14:23:10
Fixes migrating when a remote already exists (#23912)
* fix: handle migrating with already defined remotes * test: add test to verify migrating already defined remotes * fix: properly handle Up
null
fix: Fixes migrating when a remote already exists (#23912) * fix: handle migrating with already defined remotes * test: add test to verify migrating already defined remotes * fix: properly handle Up
diff --git a/replications/internal/store_test.go b/replications/internal/store_test.go index 91d0ea4bc0..bb6f95fa12 100644 --- a/replications/internal/store_test.go +++ b/replications/internal/store_test.go @@ -525,6 +525,39 @@ func TestMigrateDownFromReplicationsWithName(t *testing.T) { require.Equal(t, platform.ID(10), rs.Replications[0].ID) } +func TestMigrateUpToRemotesNullRemoteOrg(t *testing.T) { + sqlStore, clean := sqlite.NewTestStore(t) + logger := zaptest.NewLogger(t) + sqliteMigrator := sqlite.NewMigrator(sqlStore, logger) + require.NoError(t, sqliteMigrator.UpUntil(ctx, 7, migrations.AllUp)) + + // Make sure foreign-key checking is enabled. + _, err := sqlStore.DB.Exec("PRAGMA foreign_keys = ON;") + require.NoError(t, err) + + testStore := NewStore(sqlStore) + defer clean(t) + + insertRemote(t, testStore, replication.RemoteID) + + req := createReq + req.RemoteBucketID = platform.ID(100) + _, err = testStore.CreateReplication(ctx, platform.ID(10), req) + require.NoError(t, err) + + req.RemoteBucketID = platform.ID(0) + req.RemoteBucketName = "testbucket" + req.Name = "namedrepl" + _, err = testStore.CreateReplication(ctx, platform.ID(20), req) + require.NoError(t, err) + + replications, err := testStore.ListReplications(context.Background(), influxdb.ReplicationListFilter{OrgID: replication.OrgID}) + require.NoError(t, err) + require.Equal(t, 2, len(replications.Replications)) + + require.NoError(t, sqliteMigrator.UpUntil(ctx, 8, migrations.AllUp)) +} + func TestGetFullHTTPConfig(t *testing.T) { t.Parallel() diff --git a/sqlite/migrations/0008_migrate_remotes_null_remote_org.up.sql b/sqlite/migrations/0008_migrate_remotes_null_remote_org.up.sql index 539e085518..e74f571499 100644 --- a/sqlite/migrations/0008_migrate_remotes_null_remote_org.up.sql +++ b/sqlite/migrations/0008_migrate_remotes_null_remote_org.up.sql @@ -1,5 +1,6 @@ -- Removes the "NOT NULL" from remote_org_id ALTER TABLE remotes RENAME TO _remotes_old; +DROP INDEX idx_remote_url_per_org; CREATE TABLE remotes ( id VARCHAR(16) NOT NULL PRIMARY KEY, @@ -28,7 +29,6 @@ INSERT INTO remotes ( created_at, updated_at ) SELECT * FROM _remotes_old; -DROP TABLE _remotes_old; -- Create indexes on lookup patterns we expect to be common CREATE INDEX idx_remote_url_per_org ON remotes (org_id, remote_url); @@ -76,6 +76,7 @@ INSERT INTO replications ( updated_at ) SELECT * FROM _replications_old; DROP TABLE _replications_old; +DROP TABLE _remotes_old; -- Create indexes on lookup patterns we expect to be common CREATE INDEX idx_local_bucket_id_per_org ON replications (org_id, local_bucket_id); diff --git a/sqlite/migrator.go b/sqlite/migrator.go index 7f8cce0698..893b8fe60b 100644 --- a/sqlite/migrator.go +++ b/sqlite/migrator.go @@ -33,6 +33,13 @@ func (m *Migrator) SetBackupPath(path string) { } func (m *Migrator) Up(ctx context.Context, source embed.FS) error { + return m.UpUntil(ctx, -1, source) +} + +// UpUntil migrates until a specific migration. +// -1 or 0 will run all migrations, any other number will run up until that. +// Returns no error untilMigration is less than the already run migrations. +func (m *Migrator) UpUntil(ctx context.Context, untilMigration int, source embed.FS) error { knownMigrations, err := source.ReadDir(".") if err != nil { return err @@ -60,7 +67,16 @@ func (m *Migrator) Up(ctx context.Context, source embed.FS) error { } } - migrationsToDo := len(knownMigrations[lastMigration:]) + var migrationsToDo int + if untilMigration < 1 { + migrationsToDo = len(knownMigrations[lastMigration:]) + untilMigration = len(knownMigrations) + } else if untilMigration >= lastMigration { + migrationsToDo = len(knownMigrations[lastMigration:untilMigration]) + } else { + return nil + } + if migrationsToDo == 0 { return nil } @@ -85,7 +101,7 @@ func (m *Migrator) Up(ctx context.Context, source embed.FS) error { m.log.Info("Bringing up metadata migrations", zap.Int("migration_count", migrationsToDo)) - for _, f := range knownMigrations[lastMigration:] { + for _, f := range knownMigrations[lastMigration:untilMigration] { n := f.Name() m.log.Debug("Executing metadata migration", zap.String("migration_name", n))
9648207f010920f02c7e5781121955c2092ab857
Dom Dwyer
2022-11-25 15:35:44
initialise an ingester2 instance
Adds a public constructor to initialise an ingester2 instance.
null
feat(ingester2): initialise an ingester2 instance Adds a public constructor to initialise an ingester2 instance.
diff --git a/ingester2/src/init.rs b/ingester2/src/init.rs index 2e8b3dcdae..262801bd12 100644 --- a/ingester2/src/init.rs +++ b/ingester2/src/init.rs @@ -1,11 +1,24 @@ -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use arrow_flight::flight_service_server::{FlightService, FlightServiceServer}; +use backoff::BackoffConfig; use generated_types::influxdata::iox::{ catalog::v1::catalog_service_server::{CatalogService, CatalogServiceServer}, ingester::v1::write_service_server::{WriteService, WriteServiceServer}, }; use iox_catalog::interface::Catalog; +use thiserror::Error; + +use crate::{ + buffer_tree::{ + namespace::name_resolver::{NamespaceNameProvider, NamespaceNameResolver}, + partition::resolver::{CatalogPartitionResolver, PartitionCache, PartitionProvider}, + table::name_resolver::{TableNameProvider, TableNameResolver}, + BufferTree, + }, + server::grpc::GrpcDelegate, + TRANSITION_SHARD_ID, +}; /// Acquire opaque handles to the Ingester RPC service implementations. /// @@ -44,3 +57,94 @@ pub trait IngesterRpcInterface: Send + Sync + std::fmt::Debug { metrics: &metric::Registry, ) -> FlightServiceServer<Self::FlightHandler>; } + +/// Errors that occur during initialisation of an `ingester2` instance. +#[derive(Debug, Error)] +pub enum InitError { + /// A catalog error occurred while fetching the most recent partitions for + /// the internal cache. + #[error("failed to pre-warm partition cache: {0}")] + PreWarmPartitions(iox_catalog::interface::Error), +} + +/// Initialise a new `ingester2` instance, returning the gRPC service handler +/// implementations to be bound by the caller. +/// +/// # Deferred Loading for Persist Operations +/// +/// Several items within the ingester's internal state are loaded only when +/// needed at persist time; this includes string name identifiers of namespaces, +/// tables, etc that are embedded within the Parquet file metadata. +/// +/// As an optimisation, these deferred loads occur in a background task before +/// the persist action actually needs them, in order to both eliminate the +/// latency of waiting for the value to be fetched, and to avoid persistence of +/// large numbers of partitions operations causing large spike in catalog +/// requests / load. +/// +/// These values are loaded a uniformly random duration of time between +/// initialisation, and at most, `persist_background_fetch_time` duration of +/// time later. By increasing this duration value the many loads are spread +/// approximately uniformly over a longer period of time, decreasing the catalog +/// load they cause. +/// +/// If the `persist_background_fetch_time` duration is too large, they will not +/// have resolved in the background when a persist operation starts, and they +/// will require demand loading, causing an immediate catalog load spike. This +/// value should be tuned to be slightly less than the interval between persist +/// operations, but not so long that it causes catalog load spikes at persist +/// time (which can be observed by the catalog instrumentation metrics). +pub async fn new( + catalog: Arc<dyn Catalog>, + metrics: Arc<metric::Registry>, + persist_background_fetch_time: Duration, +) -> Result<impl IngesterRpcInterface, InitError> { + // Initialise the deferred namespace name resolver. + let namespace_name_provider: Arc<dyn NamespaceNameProvider> = + Arc::new(NamespaceNameResolver::new( + persist_background_fetch_time, + Arc::clone(&catalog), + BackoffConfig::default(), + )); + + // Initialise the deferred table name resolver. + let table_name_provider: Arc<dyn TableNameProvider> = Arc::new(TableNameResolver::new( + persist_background_fetch_time, + Arc::clone(&catalog), + BackoffConfig::default(), + )); + + // Read the most recently created partitions for the shards this ingester + // instance will be consuming from. + // + // By caching these hot partitions overall catalog load after an ingester + // starts up is reduced, and the associated query latency is removed from + // the (blocking) ingest hot path. + let recent_partitions = catalog + .repositories() + .await + .partitions() + .most_recent_n(10_000, &[TRANSITION_SHARD_ID]) + .await + .map_err(InitError::PreWarmPartitions)?; + + // Build the partition provider, wrapped in the partition cache. + let partition_provider = CatalogPartitionResolver::new(Arc::clone(&catalog)); + let partition_provider = PartitionCache::new( + partition_provider, + recent_partitions, + persist_background_fetch_time, + Arc::clone(&catalog), + BackoffConfig::default(), + ); + let partition_provider: Arc<dyn PartitionProvider> = Arc::new(partition_provider); + + let buffer = Arc::new(BufferTree::new( + namespace_name_provider, + table_name_provider, + partition_provider, + metrics, + )); + + Ok(GrpcDelegate::new(Arc::clone(&buffer), buffer)) +}
41c9c0f3969e804ed6b16f53a1c21c0ee6ce8bcd
Dom Dwyer
2023-07-13 15:55:07
reusable FSM / RecordBatch schemas
Cache the merged Schema of all the RecordBatch within a buffer at snapshot generation time. To be useful, this cached schema is made available to the PartitionData for re-use, allowing the schema of "hot" data within a partition's mutable buffer to be read without generating a RecordBatch first.
null
perf(ingester): reusable FSM / RecordBatch schemas Cache the merged Schema of all the RecordBatch within a buffer at snapshot generation time. To be useful, this cached schema is made available to the PartitionData for re-use, allowing the schema of "hot" data within a partition's mutable buffer to be read without generating a RecordBatch first.
diff --git a/ingester/src/buffer_tree/partition/buffer.rs b/ingester/src/buffer_tree/partition/buffer.rs index ad72df4fb3..3ce9182916 100644 --- a/ingester/src/buffer_tree/partition/buffer.rs +++ b/ingester/src/buffer_tree/partition/buffer.rs @@ -7,6 +7,7 @@ mod mutable_buffer; mod state_machine; pub(crate) mod traits; +use schema::Schema; pub(crate) use state_machine::*; use crate::query::projection::OwnedProjection; @@ -88,6 +89,12 @@ impl DataBuffer { } } + pub(crate) fn schema(&self) -> Option<Schema> { + match self.0.get() { + FsmState::Buffering(v) => v.schema(), + } + } + // Deconstruct the [`DataBuffer`] into the underlying FSM in a // [`Persisting`] state, if the buffer contains any data. pub(crate) fn into_persisting(self) -> Option<BufferState<Persisting>> { diff --git a/ingester/src/buffer_tree/partition/buffer/state_machine.rs b/ingester/src/buffer_tree/partition/buffer/state_machine.rs index 451edc2eca..59beed08c3 100644 --- a/ingester/src/buffer_tree/partition/buffer/state_machine.rs +++ b/ingester/src/buffer_tree/partition/buffer/state_machine.rs @@ -133,6 +133,10 @@ where fn timestamp_stats(&self) -> Option<TimestampMinMax> { self.state.timestamp_stats() } + + fn schema(&self) -> Option<schema::Schema> { + self.state.schema() + } } #[cfg(test)] diff --git a/ingester/src/buffer_tree/partition/buffer/state_machine/buffering.rs b/ingester/src/buffer_tree/partition/buffer/state_machine/buffering.rs index af3daa2468..61d4ff2b2c 100644 --- a/ingester/src/buffer_tree/partition/buffer/state_machine/buffering.rs +++ b/ingester/src/buffer_tree/partition/buffer/state_machine/buffering.rs @@ -3,7 +3,7 @@ use arrow::record_batch::RecordBatch; use data_types::{StatValues, TimestampMinMax}; use mutable_batch::{column::ColumnData, MutableBatch}; -use schema::TIME_COLUMN_NAME; +use schema::{Projection, TIME_COLUMN_NAME}; use super::{snapshot::Snapshot, BufferState, Transition}; use crate::{ @@ -59,6 +59,13 @@ impl Queryable for Buffering { max: v.max.unwrap(), }) } + + fn schema(&self) -> Option<schema::Schema> { + self.buffer.buffer().map(|v| { + v.schema(Projection::All) + .expect("failed to construct batch schema") + }) + } } impl Writeable for Buffering { diff --git a/ingester/src/buffer_tree/partition/buffer/state_machine/persisting.rs b/ingester/src/buffer_tree/partition/buffer/state_machine/persisting.rs index 5e3c35935b..d5cd0e6bf6 100644 --- a/ingester/src/buffer_tree/partition/buffer/state_machine/persisting.rs +++ b/ingester/src/buffer_tree/partition/buffer/state_machine/persisting.rs @@ -3,6 +3,7 @@ use arrow::record_batch::RecordBatch; use data_types::{sequence_number_set::SequenceNumberSet, TimestampMinMax}; use iox_query::util::compute_timenanosecond_min_max; +use schema::{merge::merge_record_batch_schemas, Schema}; use super::BufferState; use crate::{ @@ -20,6 +21,7 @@ pub(crate) struct Persisting { /// Statistics describing the data in snapshots. row_count: usize, timestamp_stats: TimestampMinMax, + schema: Schema, } impl Persisting { @@ -27,6 +29,7 @@ impl Persisting { snapshots: Vec<RecordBatch>, row_count: usize, timestamp_stats: TimestampMinMax, + schema: Schema, ) -> Self { // Invariant: the summary statistics provided must match the actual // data. @@ -38,11 +41,13 @@ impl Persisting { timestamp_stats, compute_timenanosecond_min_max(snapshots.iter()).unwrap() ); + debug_assert_eq!(schema, merge_record_batch_schemas(&snapshots)); Self { snapshots, row_count, timestamp_stats, + schema, } } } @@ -59,6 +64,10 @@ impl Queryable for Persisting { fn timestamp_stats(&self) -> Option<TimestampMinMax> { Some(self.timestamp_stats) } + + fn schema(&self) -> Option<schema::Schema> { + Some(self.schema.clone()) + } } impl BufferState<Persisting> { diff --git a/ingester/src/buffer_tree/partition/buffer/state_machine/snapshot.rs b/ingester/src/buffer_tree/partition/buffer/state_machine/snapshot.rs index 20858afbe7..db1fea6d1b 100644 --- a/ingester/src/buffer_tree/partition/buffer/state_machine/snapshot.rs +++ b/ingester/src/buffer_tree/partition/buffer/state_machine/snapshot.rs @@ -3,6 +3,7 @@ use arrow::record_batch::RecordBatch; use data_types::TimestampMinMax; use iox_query::util::compute_timenanosecond_min_max; +use schema::{merge::merge_record_batch_schemas, Schema}; use super::BufferState; use crate::{ @@ -21,6 +22,7 @@ pub(crate) struct Snapshot { /// Statistics describing the data in snapshots. row_count: usize, timestamp_stats: TimestampMinMax, + schema: Schema, } impl Snapshot { @@ -32,10 +34,13 @@ impl Snapshot { let timestamp_stats = compute_timenanosecond_min_max(snapshots.iter()) .expect("non-empty batch must contain timestamps"); + let schema = merge_record_batch_schemas(&snapshots); + Self { snapshots, row_count, timestamp_stats, + schema, } } } @@ -52,6 +57,10 @@ impl Queryable for Snapshot { fn timestamp_stats(&self) -> Option<TimestampMinMax> { Some(self.timestamp_stats) } + + fn schema(&self) -> Option<schema::Schema> { + Some(self.schema.clone()) + } } impl BufferState<Snapshot> { @@ -62,6 +71,7 @@ impl BufferState<Snapshot> { self.state.snapshots, self.state.row_count, self.state.timestamp_stats, + self.state.schema, ), sequence_numbers: self.sequence_numbers, } diff --git a/ingester/src/buffer_tree/partition/buffer/traits.rs b/ingester/src/buffer_tree/partition/buffer/traits.rs index 62390ddb4b..a73dc50793 100644 --- a/ingester/src/buffer_tree/partition/buffer/traits.rs +++ b/ingester/src/buffer_tree/partition/buffer/traits.rs @@ -5,6 +5,7 @@ use std::fmt::Debug; use arrow::record_batch::RecordBatch; use data_types::TimestampMinMax; use mutable_batch::MutableBatch; +use schema::Schema; use crate::query::projection::OwnedProjection; @@ -20,6 +21,8 @@ pub(crate) trait Queryable: Debug { fn timestamp_stats(&self) -> Option<TimestampMinMax>; + fn schema(&self) -> Option<Schema>; + /// Return the set of [`RecordBatch`] containing ONLY the projected columns. fn get_query_data(&self, projection: &OwnedProjection) -> Vec<RecordBatch>; }
1827866d00f0c77c05967552ff1c55fccd7a896c
Paul Dix
2024-03-25 08:26:24
initial load generator implementation (#24808)
* feat: initial load generator implementation This adds a load generator as a new crate. Initially it only generates write load, but the scaffolding is there to add a query load generator to complement the write load tool. This could have been added as a subcommand to the influxdb3 program, but I thought it best to have it separate for now. It's fairly light on tests and error handling given its an internal tooling CLI. I've added only something very basic to test the line protocol generation and run the actual write command by hand. I included pretty detailed instructions and some runnable examples. * refactor: address PR feedback
null
feat: initial load generator implementation (#24808) * feat: initial load generator implementation This adds a load generator as a new crate. Initially it only generates write load, but the scaffolding is there to add a query load generator to complement the write load tool. This could have been added as a subcommand to the influxdb3 program, but I thought it best to have it separate for now. It's fairly light on tests and error handling given its an internal tooling CLI. I've added only something very basic to test the line protocol generation and run the actual write command by hand. I included pretty detailed instructions and some runnable examples. * refactor: address PR feedback
diff --git a/Cargo.lock b/Cargo.lock index 655a7795b3..ecca5f06b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2544,6 +2544,29 @@ dependencies = [ "url", ] +[[package]] +name = "influxdb3_load_generator" +version = "0.1.0" +dependencies = [ + "anyhow", + "chrono", + "clap", + "csv", + "dotenvy", + "humantime", + "influxdb3_client", + "observability_deps", + "parking_lot", + "rand", + "secrecy", + "serde", + "serde_json", + "thiserror", + "tokio", + "trogging", + "url", +] + [[package]] name = "influxdb3_server" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 527b7f12ac..adabcd85de 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ members = [ "influxdb3", "influxdb3_client", + "influxdb3_load_generator", "influxdb3_server", "influxdb3_write", "iox_query_influxql_rewrite", @@ -49,6 +50,7 @@ crc32fast = "1.2.0" crossbeam-channel = "0.5.11" datafusion = { git = "https://github.com/erratic-pattern/arrow-datafusion.git", rev = "5965d670c88bdfa1fb74f32fd5021d400838dade" } datafusion-proto = { git = "https://github.com/erratic-pattern/arrow-datafusion.git", rev = "5965d670c88bdfa1fb74f32fd5021d400838dade" } +csv = "1.3.0" dotenvy = "0.15.7" flate2 = "1.0.27" futures = "0.3.28" @@ -56,6 +58,7 @@ futures-util = "0.3.30" hashbrown = "0.14.3" hex = "0.4.3" http = "0.2.9" +humantime = "2.1.0" hyper = "0.14" libc = { version = "0.2" } mockito = { version = "1.2.0", default-features = false } diff --git a/influxdb3_load_generator/Cargo.toml b/influxdb3_load_generator/Cargo.toml new file mode 100644 index 0000000000..c73cb03769 --- /dev/null +++ b/influxdb3_load_generator/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "influxdb3_load_generator" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +# Core Crates +observability_deps.workspace = true +trogging.workspace = true + +# Local Deps +influxdb3_client = { path = "../influxdb3_client" } + +# crates.io Dependencies +serde.workspace = true +clap.workspace = true +dotenvy.workspace = true +humantime.workspace = true +secrecy.workspace = true +serde_json.workspace = true +tokio.workspace = true +thiserror.workspace = true +url.workspace = true +rand.workspace = true +anyhow.workspace = true +csv.workspace = true +parking_lot.workspace = true +chrono.workspace = true + +[lints] +workspace = true diff --git a/influxdb3_load_generator/src/commands/common.rs b/influxdb3_load_generator/src/commands/common.rs new file mode 100644 index 0000000000..477eed3121 --- /dev/null +++ b/influxdb3_load_generator/src/commands/common.rs @@ -0,0 +1,28 @@ +use clap::Parser; +use secrecy::Secret; +use url::Url; + +#[derive(Debug, Parser)] +pub(crate) struct InfluxDb3Config { + /// The host URL of the running InfluxDB 3.0 server + #[clap( + short = 'h', + long = "host", + env = "INFLUXDB3_HOST_URL", + default_value = "http://127.0.0.1:8181" + )] + pub(crate) host_url: Url, + + /// The database name to generate load against + #[clap( + short = 'd', + long = "dbname", + env = "INFLUXDB3_DATABASE_NAME", + default_value = "load_test" + )] + pub(crate) database_name: String, + + /// The token for authentication with the InfluxDB 3.0 server + #[clap(long = "token", env = "INFLUXDB3_AUTH_TOKEN")] + pub(crate) auth_token: Option<Secret<String>>, +} diff --git a/influxdb3_load_generator/src/commands/query.rs b/influxdb3_load_generator/src/commands/query.rs new file mode 100644 index 0000000000..89ba0927f2 --- /dev/null +++ b/influxdb3_load_generator/src/commands/query.rs @@ -0,0 +1,54 @@ +use std::str::Utf8Error; + +use clap::Parser; +use influxdb3_client::Format; +use secrecy::ExposeSecret; +use tokio::io; + +use super::common::InfluxDb3Config; + +#[derive(Debug, thiserror::Error)] +pub(crate) enum Error { + #[error(transparent)] + Client(#[from] influxdb3_client::Error), + + #[error("invlid UTF8 received from server: {0}")] + Utf8(#[from] Utf8Error), + + #[error("io error: {0}")] + Io(#[from] io::Error), +} + +pub(crate) type Result<T> = std::result::Result<T, Error>; + +#[derive(Debug, Parser)] +#[clap(visible_alias = "q", trailing_var_arg = true)] +pub(crate) struct Config { + /// Common InfluxDB 3.0 config + #[clap(flatten)] + influxdb3_config: InfluxDb3Config, +} + +pub(crate) async fn command(config: Config) -> Result<()> { + let InfluxDb3Config { + host_url, + database_name, + auth_token, + } = config.influxdb3_config; + let mut client = influxdb3_client::Client::new(host_url)?; + if let Some(t) = auth_token { + client = client.with_auth_token(t.expose_secret()); + } + + println!("hello from query!"); + + let resp_bytes = client + .api_v3_query_sql(database_name, "select * from foo limit 10;") + .format(Format::Json) + .send() + .await?; + + println!("{}", std::str::from_utf8(&resp_bytes)?); + + Ok(()) +} diff --git a/influxdb3_load_generator/src/commands/write.rs b/influxdb3_load_generator/src/commands/write.rs new file mode 100644 index 0000000000..ffde8e6469 --- /dev/null +++ b/influxdb3_load_generator/src/commands/write.rs @@ -0,0 +1,476 @@ +use crate::line_protocol_generator::{create_generators, Generator}; +use crate::report::WriteReporter; +use crate::specification::DataSpec; +use anyhow::Context; +use chrono::{DateTime, Local}; +use clap::Parser; +use influxdb3_client::{Client, Precision}; +use secrecy::{ExposeSecret, Secret}; +use std::ops::Add; +use std::sync::Arc; +use std::time::Duration; +use tokio::time::Instant; +use url::Url; + +use super::common::InfluxDb3Config; + +#[derive(Debug, Parser)] +#[clap(visible_alias = "w", trailing_var_arg = true)] +pub struct Config { + /// Common InfluxDB 3.0 config + #[clap(flatten)] + influxdb3_config: InfluxDb3Config, + + /// The path to the spec file to use for this run. Or specify a name of a builtin spec to use. + /// If not specified, the generator will output a list of builtin specs along with help and + /// an example for writing your own. + #[clap(short = 's', long = "spec", env = "INFLUXDB3_LOAD_DATA_SPEC_PATH")] + spec_path: Option<String>, + + /// The name of the builtin spec to run. Use this instead of spec_path if you want to run + /// one of the builtin specs as is. + #[clap(long = "builtin-spec", env = "INFLUXDB3_LOAD_BUILTIN_SPEC")] + builtin_spec: Option<String>, + + /// The name of the builtin spec to print to stdout. This is useful for seeing the structure + /// of the builtin as a starting point for creating your own. + #[clap(long = "print-spec")] + print_spec: Option<String>, + + /// Sampling interval for the writers. They will generate data at this interval and + /// sleep for the remainder of the interval. Writers stagger writes by this interval divided + /// by the number of writers. + #[clap( + short = 'i', + long = "interval", + env = "INFLUXDB3_LOAD_SAMPLING_INTERVAL", + default_value = "1s" + )] + sampling_interval: humantime::Duration, + + /// Number of simultaneous writers. Each writer will generate data at the specified interval. + #[clap( + short = 'w', + long = "writer-count", + env = "INFLUXDB3_LOAD_WRITERS", + default_value = "1" + )] + writer_count: usize, + + /// Tells the generator to run a single sample for each writer in `writer-count` and output the data to stdout. + #[clap(long = "dry-run", default_value = "false")] + dry_run: bool, + + /// The date and time at which to start the timestamps of the generated data. + /// + /// Can be an exact datetime like `2020-01-01T01:23:45-05:00` or a fuzzy + /// specification like `1 hour` in the past. If not specified, defaults to now. + #[clap(long, action)] + start: Option<String>, + + /// The date and time at which to stop the timestamps of the generated data. + /// + /// Can be an exact datetime like `2020-01-01T01:23:45-05:00` or a fuzzy + /// specification like `1 hour` in the future. If not specified, data will continue generating forever. + #[clap(long, action)] + end: Option<String>, + + /// The file that will be used to write the results of the run. If not specified, results + /// will be written to <spec_name>_results.csv in the current directory. + #[clap( + short = 'r', + long = "results", + env = "INFLUXDB3_WRITE_LOAD_RESULTS_FILE" + )] + results_file: Option<String>, +} + +pub(crate) async fn command(config: Config) -> Result<(), anyhow::Error> { + let built_in_specs = crate::specs::built_in_specs(); + + if config.spec_path.is_none() && config.print_spec.is_none() && config.builtin_spec.is_none() { + let example = built_in_specs.first().unwrap(); + let mut generators = create_generators(&example.write_spec, 2).unwrap(); + let t = 123; + let dry_run_output_1 = generators.get_mut(0).unwrap().dry_run(t); + let dry_run_output_2 = generators.get_mut(1).unwrap().dry_run(t); + + let builtin_help = built_in_specs + .iter() + .map(|spec| { + format!( + "name: {}\ndescription: {}\n", + spec.write_spec.name, spec.description + ) + }) + .collect::<Vec<String>>() + .join("\n"); + + println!( + r#"You didn't provide a spec path, which is required. For more information about the arguments for this command run: + + influxdb_load_generator write --help + +There are some built in specs that you can run just by specifying their name. If you want +to see the JSON for their structure as a starting point, specify their name as the --print-spec +argument. Here's a list of the builtin specs: + +{} + +Or, if you need a more detailed writeup on specs and how they work here are details about +the example. A spec is just a JSON object specifying how to generate measurements and their +tags and fields. All data will have a millisecond timestamp generated (with that precision +specified) and aligned with the sampling. The generator will run against a single database +and can have many concurrent writers. The spec indicates the shape of the data that should +be generated. + +As the generator runs, it will output basic information to stdout. The stats of each +individual request will be written to a results CSV file that you can use after the run to +analyze the performance of write requests to the server. + +In the data spec there is an array of measurements. Within each is an array of tags +and an array of fields. Measurements have a name while tags and fields have keys (i.e. tag +key and field key). Tags and fields are scoped to the measurement they are under. If a +tag with key 'foo' appears under two different measurements they are considered different +tags. The same goes for fields. All measurements must have at least 1 field and can have 0 +or more tags. + +The measurement, tag and field structs have an option called 'copies' which is an integer. +When specified, the data generator will create that many copies of the measurement, tag, +or field and append the copy number to the name/keys. This is useful for generating a large +schema in a test. + +Tags have two options that work together that need explanation: cardinality, and +lines_per_sample. Cardinality is the number of unique values that the tag will have. +This cardinality will be split across the number of writers in a test run. Thus if you have +1,000 cardinality and a single writer, the unique values will all get written by that writer. +If you have 1,000 cardinality and 10 writers, each writer will write 100 unique values. + +The lines_per_sample option on the measurement is used to control how many of the unique +values are used in a single sampling round. If not specified, all unique values will be used. +This number will be rounded down to the cardinality of the tag with the highest cardinality +for the measurement. This is done on a per writer basis. If you have lines_per_sample of 10 +and a tag of 100 cardinality with 1 writer, it will generate 10 lines of that measurement with +each unique tag value going to the next 10 values on the next sample, taking 10 samples to get +through the 100 uniques before it cycles back to the beginning. + +Separately, cardinality of tags will be split across the number of writers you have. So if +you have cardinality of 100 and 1 writer, by default it will generate 100 lines of that +measurement with each unique tag value. If you have 10 writers, each writer will generate 10 +unique tag values. Thus with 10 writers, the lines_per_sample would max at 10 since each +sample can only generate 10 unique tag values. + +The tag spec also has a boolean option called "append_writer_id". Writers are the individual +threads that run and generate and write samples at the same time. The number is set through +the parameter --writer-count. If append_writer_id is set to true, the generator will append +the writer id to the tag value. This is useful for generating unique tag values across +writers, simulating a host id or something similar. + +Fields have options for generating static data, or randomly generated data within a range. For +strings, you can specify a static string or a random string of a certain length. Another option +worth noting is the null_probability. This is a float between 0 and 1 that indicates the probability +that a field will be null. If this option is used, you must have another field that does not use +this option (i.e. you must always have at least one field that is guaranteed to have a value). + +If you're unsure how an option works or what it will produce, the easiest thing to do is to create +a file and run the generator with the --dry-run option. This will output the data to stdout so you +can see what it looks like before you run it against a server. It will use the --writer-count +value and show what each writer would send in a sample. + +The example below shows this functionality generating different kinds of tags and +fields of different value types. First, we show the spec, then we show the output that gets +generated on a dry-run so you can see how the spec translates into generated line protocol. + +Here's the spec: + +{} + +And when run with writer count set to 2, here's what will be sent in a request by each writer. + +Writer 1: +{} +Writer 2: +{}"#, + builtin_help, + example.write_spec.to_json_string_pretty().unwrap(), + dry_run_output_1, + dry_run_output_2 + ); + + return Ok(()); + } + + // if print spec is set, print the spec and exit + if let Some(spec_name) = config.print_spec { + let spec = built_in_specs + .iter() + .find(|spec| spec.write_spec.name == spec_name) + .context("Spec not found")?; + println!("{}", spec.write_spec.to_json_string_pretty()?); + return Ok(()); + } + + // if builtin spec is set, use that instead of the spec path + let spec = if let Some(builtin_spec) = config.builtin_spec { + let builtin = built_in_specs + .into_iter() + .find(|spec| spec.write_spec.name == builtin_spec) + .context("Spec not found")?; + println!("using builtin spec: {}", builtin.write_spec.name); + builtin.write_spec + } else { + println!("reading spec from: {}", config.spec_path.as_ref().unwrap()); + DataSpec::from_path(&config.spec_path.unwrap())? + }; + + println!( + "creating generators for {} concurrent writers", + config.writer_count + ); + let mut generators = + create_generators(&spec, config.writer_count).context("failed to create generators")?; + + // if dry run is set, output from each generator its id and then a single sample + if config.dry_run { + println!("running dry run for each writer\n"); + for g in &mut generators { + let t = Local::now(); + let dry_run_output = g.dry_run(t.timestamp_millis()); + println!("Writer {}:\n{}", g.writer_id, dry_run_output); + } + return Ok(()); + } + + let start_time = if let Some(start_time) = config.start { + let start_time = parse_time_offset(&start_time, Local::now()); + println!("starting writers from a start time of {:?}. Historical replay will happen as fast as possible until catching up to now or hitting the end time.", start_time); + Some(start_time) + } else { + None + }; + + let end_time = if let Some(end_time) = config.end { + let end_time = parse_time_offset(&end_time, Local::now()); + println!("ending at {:?}", end_time); + Some(end_time) + } else { + println!( + "running indefinitely with each writer sending a request every {}", + config.sampling_interval + ); + None + }; + + let results_file = config + .results_file + .unwrap_or_else(|| format!("{}_results.csv", spec.name)); + + // exit if the results file already exists + if std::path::Path::new(&results_file).exists() { + eprintln!( + "results file already exists, use a different file name or delete it and re-run: {}", + results_file + ); + std::process::exit(1); + } + + println!("writing results to: {}", results_file); + + let write_reporter = + Arc::new(WriteReporter::new(&results_file).context("failed to create write reporter")?); + + // blocking task to periodically flush the report to disk + let reporter = Arc::clone(&write_reporter); + tokio::task::spawn_blocking(move || { + reporter.flush_reports(); + }); + + // spawn tokio tasks for each writer + let client = create_client( + config.influxdb3_config.host_url, + config.influxdb3_config.auth_token, + )?; + let mut tasks = Vec::new(); + for generator in generators { + let reporter = Arc::clone(&write_reporter); + let database_name = config.influxdb3_config.database_name.clone(); + let sampling_interval = config.sampling_interval.into(); + let task = tokio::spawn(run_generator( + generator, + client.clone(), + database_name, + reporter, + sampling_interval, + start_time, + end_time, + )); + tasks.push(task); + } + + // wait for all tasks to complete + for task in tasks { + task.await?; + } + println!("all writers finished"); + + write_reporter.shutdown(); + println!("reporter closed and results written to {}", results_file); + + Ok(()) +} + +fn create_client( + host_url: Url, + auth_token: Option<Secret<String>>, +) -> Result<Client, influxdb3_client::Error> { + let mut client = Client::new(host_url)?; + if let Some(t) = auth_token { + client = client.with_auth_token(t.expose_secret()); + } + Ok(client) +} + +fn parse_time_offset(s: &str, now: DateTime<Local>) -> DateTime<Local> { + humantime::parse_rfc3339(s) + .map(Into::into) + .unwrap_or_else(|_| { + let std_duration = humantime::parse_duration(s).expect("Could not parse time"); + let chrono_duration = chrono::Duration::from_std(std_duration) + .expect("Could not convert std::time::Duration to chrono::Duration"); + now - chrono_duration + }) +} + +async fn run_generator( + mut generator: Generator, + client: Client, + database_name: String, + reporter: Arc<WriteReporter>, + sampling_interval: Duration, + start_time: Option<DateTime<Local>>, + end_time: Option<DateTime<Local>>, +) { + let mut sample_buffer = vec![]; + + // if the start time is set, load the historical samples as quickly as possible + if let Some(mut start_time) = start_time { + let mut sample_len = write_sample( + &mut generator, + sample_buffer, + &client, + &database_name, + start_time, + &reporter, + true, + ) + .await; + + loop { + start_time = start_time.add(sampling_interval); + if start_time > Local::now() + || end_time + .map(|end_time| start_time > end_time) + .unwrap_or(false) + { + println!( + "writer {} finished historical replay at: {:?}", + generator.writer_id, start_time + ); + break; + } + + sample_buffer = Vec::with_capacity(sample_len); + sample_len = write_sample( + &mut generator, + sample_buffer, + &client, + &database_name, + start_time, + &reporter, + false, + ) + .await; + } + } + + // write data until end time or forever + let mut interval = tokio::time::interval(sampling_interval); + let mut sample_len = 1024 * 1024 * 1024; + + // we only want to print the error the very first time it happens + let mut print_err = false; + + loop { + interval.tick().await; + let now = Local::now(); + if let Some(end_time) = end_time { + if now > end_time { + println!( + "writer {} finished writing to end time: {:?}", + generator.writer_id, end_time + ); + return; + } + } + + sample_buffer = Vec::with_capacity(sample_len); + sample_len = write_sample( + &mut generator, + sample_buffer, + &client, + &database_name, + now, + &reporter, + print_err, + ) + .await; + print_err = true; + } +} + +async fn write_sample( + generator: &mut Generator, + mut buffer: Vec<u8>, + client: &Client, + database_name: &String, + sample_time: DateTime<Local>, + reporter: &Arc<WriteReporter>, + print_err: bool, +) -> usize { + // generate the sample, and keep track of the length to set the buffer size for the next loop + let summary = generator + .write_sample_to(sample_time.timestamp_millis(), &mut buffer) + .expect("failed to write sample"); + let sample_len = buffer.len(); + let body = String::from_utf8(buffer).expect("failed to convert sample to string"); + + // time and send the write request + let start_request = Instant::now(); + let res = client + .api_v3_write_lp(database_name) + .precision(Precision::Millisecond) + .accept_partial(false) + .body(body) + .send() + .await; + let response_time = start_request.elapsed().as_millis() as u64; + + // log the report + match res { + Ok(_) => { + reporter.report_write(generator.writer_id, summary, response_time, Local::now()); + } + Err(e) => { + // if it's the first error, print the details + if print_err { + eprintln!( + "Error on writer {} writing to server: {:?}", + generator.writer_id, e + ); + } + reporter.report_failure(generator.writer_id, response_time, Local::now()); + } + } + + sample_len +} diff --git a/influxdb3_load_generator/src/line_protocol_generator.rs b/influxdb3_load_generator/src/line_protocol_generator.rs new file mode 100644 index 0000000000..895f2d529e --- /dev/null +++ b/influxdb3_load_generator/src/line_protocol_generator.rs @@ -0,0 +1,557 @@ +//! This contains the logic for creating generators for a given spec for the number of workers. + +use crate::specification::{DataSpec, FieldKind, MeasurementSpec}; +use rand::distributions::Alphanumeric; +use rand::rngs::SmallRng; +use rand::{Rng, SeedableRng}; +use std::collections::HashMap; +use std::io::Write; +use std::ops::Range; +use std::sync::Arc; +use tokio::io; + +pub type WriterId = usize; + +pub fn create_generators( + spec: &DataSpec, + writer_count: usize, +) -> Result<Vec<Generator>, anyhow::Error> { + let mut generators = vec![]; + let mut arc_strings = HashMap::new(); + + for writer_id in 1..writer_count + 1 { + let mut measurements = vec![]; + + for m in &spec.measurements { + let copies = m.copies.unwrap_or(1); + + for measurement_id in 1..copies + 1 { + measurements.push(create_measurement( + m, + writer_id, + writer_count, + measurement_id, + &mut arc_strings, + )); + } + } + + generators.push(Generator { + writer_id, + measurements, + }); + } + + Ok(generators) +} + +fn create_measurement<'a>( + spec: &'a MeasurementSpec, + writer_id: WriterId, + writer_count: usize, + measurement_id: usize, + arc_strings: &mut HashMap<&'a str, Arc<str>>, +) -> Measurement { + let name = Arc::clone(arc_strings.entry(spec.name.as_str()).or_insert_with(|| { + let m = spec.name.replace(' ', "\\ ").replace(',', "\\,"); + Arc::from(m.as_str()) + })); + + let max_cardinality = spec + .tags + .iter() + .map(|t| t.cardinality.unwrap_or(1)) + .max() + .unwrap_or(1); + let max_cardinality = usize::div_ceil(max_cardinality, writer_count); + let lines_per_sample = spec.lines_per_sample.unwrap_or(max_cardinality); + + let mut tags = vec![]; + + for t in &spec.tags { + let key = Arc::clone(arc_strings.entry(t.key.as_str()).or_insert_with(|| { + let k = t + .key + .replace(' ', "\\ ") + .replace(',', "\\,") + .replace('=', "\\="); + Arc::from(k.as_str()) + })); + + let value = t.value.as_ref().map(|v| { + Arc::clone(arc_strings.entry(v.as_str()).or_insert_with(|| { + let v = v + .replace(' ', "\\ ") + .replace(',', "\\,") + .replace('=', "\\="); + Arc::from(v.as_str()) + })) + }); + + let (cardinality_id_min, cardinality_id_max) = t + .cardinality_min_max(writer_id, writer_count) + .unwrap_or((0, 0)); + + let append_writer_id = t.append_writer_id.unwrap_or(false); + let append_copy_id = t.append_copy_id.unwrap_or(false); + let copies = t.copies.unwrap_or(1); + + for copy_id in 1..copies + 1 { + tags.push(Tag { + key: Arc::clone(&key), + value: value.clone(), + copy_id, + cardinality_id_min, + cardinality_id_max, + cardinality_id_current: cardinality_id_min, + append_writer_id, + append_copy_id, + }); + } + } + + let mut fields = vec![]; + + for f in &spec.fields { + let key = Arc::clone(arc_strings.entry(f.key.as_str()).or_insert_with(|| { + let k = f + .key + .replace(' ', "\\ ") + .replace(',', "\\,") + .replace('=', "\\="); + Arc::from(k.as_str()) + })); + + let copies = f.copies.unwrap_or(1); + + for copy_id in 1..copies + 1 { + let random_null = f.null_probability.map(|p| (p, SmallRng::from_entropy())); + + match &f.field { + FieldKind::Bool(_) => { + fields.push(Field { + key: Arc::clone(&key), + copy_id, + random_null, + field_value: FieldValue::Boolean(BooleanValue::Random( + SmallRng::from_entropy(), + )), + }); + } + FieldKind::String(s) => { + fields.push(Field { + key: Arc::clone(&key), + copy_id, + random_null, + field_value: FieldValue::String(StringValue::Fixed(Arc::clone( + arc_strings + .entry(s.as_str()) + .or_insert_with(|| Arc::from(s.as_str())), + ))), + }); + } + FieldKind::StringRandom(size) => { + fields.push(Field { + key: Arc::clone(&key), + copy_id, + random_null, + field_value: FieldValue::String(StringValue::Random( + *size, + SmallRng::from_entropy(), + )), + }); + } + FieldKind::Integer(i) => { + fields.push(Field { + key: Arc::clone(&key), + copy_id, + random_null, + field_value: FieldValue::Integer(IntegerValue::Fixed(*i)), + }); + } + FieldKind::IntegerRange(min, max) => { + fields.push(Field { + key: Arc::clone(&key), + copy_id, + random_null, + field_value: FieldValue::Integer(IntegerValue::Random( + Range { + start: *min, + end: *max, + }, + SmallRng::from_entropy(), + )), + }); + } + FieldKind::Float(f) => { + fields.push(Field { + key: Arc::clone(&key), + copy_id, + random_null, + field_value: FieldValue::Float(FloatValue::Fixed(*f)), + }); + } + FieldKind::FloatRange(min, max) => { + fields.push(Field { + key: Arc::clone(&key), + copy_id, + random_null, + field_value: FieldValue::Float(FloatValue::Random( + Range { + start: *min, + end: *max, + }, + SmallRng::from_entropy(), + )), + }); + } + } + } + } + + Measurement { + name, + copy_id: measurement_id, + tags, + fields, + lines_per_sample, + } +} + +/// This struct holds the generator for each writer. +#[derive(Debug)] +pub struct Generator { + pub writer_id: WriterId, + measurements: Vec<Measurement>, +} + +impl Generator { + pub fn new(writer_id: WriterId) -> Self { + Self { + writer_id, + measurements: Vec::new(), + } + } + + /// Return a single sample run from the generator as a string. + pub fn dry_run(&mut self, timestamp: i64) -> String { + // create a buffer and write a single sample to it + let mut buffer = Vec::new(); + self.write_sample_to(timestamp, &mut buffer) + .expect("writing to buffer should succeed"); + + // convert the buffer to a string and return it + String::from_utf8(buffer).expect("buffer should be valid utf8") + } + + pub fn write_sample_to<W: Write>( + &mut self, + timestamp: i64, + mut w: W, + ) -> io::Result<WriteSummary> { + let mut write_summary = WriteSummary { + bytes_written: 0, + lines_written: 0, + tags_written: 0, + fields_written: 0, + }; + + let mut w = ByteCounter::new(&mut w); + + for measurement in &mut self.measurements { + for _ in 0..measurement.lines_per_sample { + if measurement.copy_id > 1 { + write!(w, "{}_{}", measurement.name, measurement.copy_id)?; + } else { + write!(w, "{}", measurement.name)?; + } + + for tag in &mut measurement.tags { + tag.write_to(self.writer_id, &mut w)?; + } + write_summary.tags_written += measurement.tags.len(); + + for (i, field) in measurement.fields.iter_mut().enumerate() { + let separator = if i == 0 { " " } else { "," }; + write!(w, "{}", separator)?; + field.write_to(&mut w)?; + } + write_summary.fields_written += measurement.fields.len(); + + writeln!(w, " {}", timestamp)?; + + write_summary.lines_written += 1; + } + } + + write_summary.bytes_written = w.bytes_written(); + + Ok(write_summary) + } +} + +#[derive(Debug, Clone, Copy)] +pub struct WriteSummary { + pub bytes_written: usize, + pub lines_written: usize, + pub tags_written: usize, + pub fields_written: usize, +} + +#[derive(Debug)] +struct Measurement { + name: Arc<str>, + copy_id: usize, + tags: Vec<Tag>, + fields: Vec<Field>, + lines_per_sample: usize, +} + +#[derive(Debug)] +struct Tag { + key: Arc<str>, + value: Option<Arc<str>>, + copy_id: usize, + cardinality_id_min: usize, + cardinality_id_max: usize, + cardinality_id_current: usize, + append_writer_id: bool, + append_copy_id: bool, +} + +impl Tag { + fn write_to<W: Write>( + &mut self, + writer_id: WriterId, + w: &mut ByteCounter<W>, + ) -> io::Result<()> { + if self.copy_id > 1 { + write!(w, ",{}_{}=", self.key, self.copy_id)?; + } else { + write!(w, ",{}=", self.key)?; + } + + if let Some(v) = &self.value { + write!(w, "{}", v)?; + } + + // append the writer id with a preceding w if we're supposed to + if self.append_writer_id { + write!(w, "{}", writer_id)?; + } + + // append the copy id with a preceding c if we're supposed to + if self.append_copy_id { + write!(w, "{}", self.copy_id)?; + } + + // keep track of the cardinality id if min and max are different + if self.cardinality_id_min != 0 && self.cardinality_id_max != 0 { + // reset the id back to min if we've cycled through them all + if self.cardinality_id_current > self.cardinality_id_max { + self.cardinality_id_current = self.cardinality_id_min; + } + + // write the cardinality counter value to the tag value + write!(w, "{}", self.cardinality_id_current)?; + + self.cardinality_id_current += 1; + } + + Ok(()) + } +} + +#[derive(Debug)] +struct Field { + key: Arc<str>, + copy_id: usize, + random_null: Option<(f64, SmallRng)>, + field_value: FieldValue, +} + +#[derive(Debug)] +enum FieldValue { + Integer(IntegerValue), + Float(FloatValue), + String(StringValue), + Boolean(BooleanValue), +} + +impl Field { + fn write_to<W: Write>(&mut self, w: &mut ByteCounter<W>) -> io::Result<()> { + // if there are random nulls, check and return without writing the field if it hits the + // probability + if let Some((probability, rng)) = &mut self.random_null { + let val: f64 = rng.gen(); + if val <= *probability { + return Ok(()); + } + } + + if self.copy_id > 1 { + write!(w, "{}_{}=", self.key, self.copy_id)?; + } else { + write!(w, "{}=", self.key)?; + } + + match &mut self.field_value { + FieldValue::Integer(f) => match f { + IntegerValue::Fixed(v) => write!(w, "{}i", v)?, + IntegerValue::Random(range, rng) => { + let v: i64 = rng.gen_range(range.clone()); + write!(w, "{}i", v)?; + } + }, + FieldValue::Float(f) => match f { + FloatValue::Fixed(v) => write!(w, "{}", v)?, + FloatValue::Random(range, rng) => { + let v: f64 = rng.gen_range(range.clone()); + write!(w, "{:.3}", v)?; + } + }, + FieldValue::String(s) => match s { + StringValue::Fixed(v) => write!(w, "\"{}\"", v)?, + StringValue::Random(size, rng) => { + let random: String = rng + .sample_iter(&Alphanumeric) + .take(*size) + .map(char::from) + .collect(); + + write!(w, "\"{}\"", random)?; + } + }, + FieldValue::Boolean(f) => match f { + BooleanValue::Random(rng) => { + let v: bool = rng.gen(); + write!(w, "{}", v)?; + } + }, + } + + Ok(()) + } +} + +#[derive(Debug)] +enum IntegerValue { + Fixed(i64), + Random(Range<i64>, SmallRng), +} + +#[derive(Debug)] +enum FloatValue { + Fixed(f64), + Random(Range<f64>, SmallRng), +} + +#[derive(Debug)] +enum StringValue { + Fixed(Arc<str>), + Random(usize, SmallRng), +} + +#[derive(Debug)] +enum BooleanValue { + Random(SmallRng), +} + +struct ByteCounter<W> { + inner: W, + count: usize, +} + +impl<W> ByteCounter<W> +where + W: Write, +{ + fn new(inner: W) -> Self { + Self { inner, count: 0 } + } + + fn bytes_written(&self) -> usize { + self.count + } +} + +impl<W> Write for ByteCounter<W> +where + W: Write, +{ + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + let res = self.inner.write(buf); + if let Ok(size) = res { + self.count += size + } + res + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.flush() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::specification::{FieldSpec, TagSpec}; + #[test] + fn example_spec_lp() { + let spec = DataSpec { + name: "foo".to_string(), + measurements: vec![MeasurementSpec { + name: "m".to_string(), + tags: vec![TagSpec { + key: "t".to_string(), + copies: Some(2), + append_copy_id: None, + value: Some("w".to_string()), + append_writer_id: None, + cardinality: Some(10), + }], + fields: vec![ + FieldSpec { + key: "i".to_string(), + copies: Some(2), + null_probability: None, + field: FieldKind::Integer(42), + }, + FieldSpec { + key: "f".to_string(), + copies: None, + null_probability: None, + field: FieldKind::Float(6.8), + }, + FieldSpec { + key: "s".to_string(), + copies: None, + null_probability: None, + field: FieldKind::String("hello".to_string()), + }, + ], + copies: Some(1), + lines_per_sample: Some(2), + }], + }; + let mut generators = create_generators(&spec, 2).unwrap(); + + let lp = generators.get_mut(0).unwrap().dry_run(123); + let actual: Vec<&str> = lp.split('\n').collect(); + let expected: Vec<&str> = vec![ + "m,t=w1,t_2=w1 i=42i,i_2=42i,f=6.8,s=\"hello\" 123", + "m,t=w2,t_2=w2 i=42i,i_2=42i,f=6.8,s=\"hello\" 123", + "", + ]; + assert_eq!(actual, expected); + + let lp = generators.get_mut(1).unwrap().dry_run(567); + let actual: Vec<&str> = lp.split('\n').collect(); + let expected: Vec<&str> = vec![ + "m,t=w6,t_2=w6 i=42i,i_2=42i,f=6.8,s=\"hello\" 567", + "m,t=w7,t_2=w7 i=42i,i_2=42i,f=6.8,s=\"hello\" 567", + "", + ]; + assert_eq!(actual, expected); + } +} diff --git a/influxdb3_load_generator/src/main.rs b/influxdb3_load_generator/src/main.rs new file mode 100644 index 0000000000..bc65c76005 --- /dev/null +++ b/influxdb3_load_generator/src/main.rs @@ -0,0 +1,166 @@ +//! Entrypoint of InfluxDB IOx binary +#![recursion_limit = "512"] // required for print_cpu +#![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)] +#![warn( + missing_debug_implementations, + clippy::explicit_iter_loop, + clippy::use_self, + clippy::clone_on_ref_ptr, + clippy::future_not_send +)] + +pub mod line_protocol_generator; +pub mod report; +pub mod specification; +mod specs; + +pub mod commands { + pub mod common; + pub mod query; + pub mod write; +} + +use dotenvy::dotenv; +use observability_deps::tracing::warn; +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; +use tokio::runtime::Runtime; + +enum ReturnCode { + Failure = 1, +} + +#[derive(Debug, clap::Parser)] +#[clap( +name = "influxdb3_load_generator", +disable_help_flag = true, +arg( +clap::Arg::new("help") +.long("help") +.help("Print help information") +.action(clap::ArgAction::Help) +.global(true) +), +about = "InfluxDB 3.0 Load Generator for writes and queries", +long_about = r#"InfluxDB 3.0 Load Generator for writes and queries + +Examples: + # Run the write load generator + influxdb3_load_generator write --help + + # Generate a sample write spec + influxdb3_load_generator write --generate-spec + + # Run the the query load generator + influxdb3_load_generator query --help + + # Generate a sample query spec + influxdb3_load_generator query --generate-spec + + # Display all commands + influxdb3_load_generator --help +"# +)] +struct Config { + #[clap(subcommand)] + command: Option<Command>, +} + +// Ignoring clippy here since this enum is just used for running +// the CLI command +#[allow(clippy::large_enum_variant)] +#[derive(Debug, clap::Parser)] +#[allow(clippy::large_enum_variant)] +enum Command { + /// Perform a query against a running InfluxDB 3.0 server + Query(commands::query::Config), + + /// Perform a set of writes to a running InfluxDB 3.0 server + Write(commands::write::Config), +} + +fn main() -> Result<(), std::io::Error> { + // load all environment variables from .env before doing anything + load_dotenv(); + + let config: Config = clap::Parser::parse(); + + let tokio_runtime = get_runtime(None)?; + tokio_runtime.block_on(async move { + match config.command { + None => println!("command required, --help for help"), + Some(Command::Query(config)) => { + if let Err(e) = commands::query::command(config).await { + eprintln!("Query command failed: {e}"); + std::process::exit(ReturnCode::Failure as _) + } + } + Some(Command::Write(config)) => { + if let Err(e) = commands::write::command(config).await { + eprintln!("Write command failed: {e}"); + std::process::exit(ReturnCode::Failure as _) + } + } + } + }); + + Ok(()) +} + +/// Creates the tokio runtime for executing +/// +/// if nthreads is none, uses the default scheduler +/// otherwise, creates a scheduler with the number of threads +fn get_runtime(num_threads: Option<usize>) -> Result<Runtime, std::io::Error> { + // NOTE: no log macros will work here! + // + // That means use eprintln!() instead of error!() and so on. The log emitter + // requires a running tokio runtime and is initialised after this function. + + use tokio::runtime::Builder; + let kind = std::io::ErrorKind::Other; + match num_threads { + None => Runtime::new(), + Some(num_threads) => { + println!("Setting number of threads to '{num_threads}' per command line request"); + + let thread_counter = Arc::new(AtomicUsize::new(1)); + match num_threads { + 0 => { + let msg = + format!("Invalid num-threads: '{num_threads}' must be greater than zero"); + Err(std::io::Error::new(kind, msg)) + } + 1 => Builder::new_current_thread().enable_all().build(), + _ => Builder::new_multi_thread() + .enable_all() + .thread_name_fn(move || { + format!("IOx main {}", thread_counter.fetch_add(1, Ordering::SeqCst)) + }) + .worker_threads(num_threads) + .build(), + } + } + } +} + +/// Source the .env file before initialising the Config struct - this sets +/// any envs in the file, which the Config struct then uses. +/// +/// Precedence is given to existing env variables. +fn load_dotenv() { + match dotenv() { + Ok(_) => {} + Err(dotenvy::Error::Io(err)) if err.kind() == std::io::ErrorKind::NotFound => { + // Ignore this - a missing env file is not an error, defaults will + // be applied when initialising the Config struct. + } + Err(e) => { + eprintln!("FATAL Error loading config from: {e}"); + eprintln!("Aborting"); + std::process::exit(1); + } + }; +} diff --git a/influxdb3_load_generator/src/report.rs b/influxdb3_load_generator/src/report.rs new file mode 100644 index 0000000000..c11368cdcb --- /dev/null +++ b/influxdb3_load_generator/src/report.rs @@ -0,0 +1,196 @@ +//! Trackers and report generators for write and query runs + +use crate::line_protocol_generator::{WriteSummary, WriterId}; +use anyhow::Context; +use chrono::{DateTime, Local}; +use parking_lot::Mutex; +use std::collections::HashMap; +use std::time::{Duration, Instant}; +// Logged reports will be flushed to the csv file on this interval +const REPORT_FLUSH_INTERVAL: Duration = Duration::from_millis(100); + +const CONSOLE_REPORT_INTERVAL: Duration = Duration::from_secs(1); + +#[derive(Debug, Clone, Copy)] +pub struct WriterReport { + summary: Option<WriteSummary>, // failed write if none + write_instant: Instant, + wall_time: DateTime<Local>, + response_time_ms: u64, + writer_id: usize, +} + +#[derive(Debug)] +pub struct WriteReporter { + state: Mutex<Vec<WriterReport>>, + csv_writer: Mutex<csv::Writer<std::fs::File>>, + shutdown: Mutex<bool>, +} + +impl WriteReporter { + pub fn new(csv_filename: &str) -> Result<Self, anyhow::Error> { + // open csv file for writing + let mut csv_writer = csv::Writer::from_path(csv_filename)?; + // write header + csv_writer + .write_record([ + "writer_id", + "response", + "latency_ms", + "test_time_ms", + "sample_number", + "bytes", + "lines", + "tags", + "fields", + "wall_time", + ]) + .context("failed to write csv report header")?; + + Ok(Self { + state: Mutex::new(Vec::new()), + csv_writer: Mutex::new(csv_writer), + shutdown: Mutex::new(false), + }) + } + + pub fn report_failure( + &self, + writer_id: usize, + response_time_ms: u64, + wall_time: DateTime<Local>, + ) { + let mut state = self.state.lock(); + state.push(WriterReport { + summary: None, + write_instant: Instant::now(), + wall_time, + response_time_ms, + writer_id, + }); + } + + pub fn report_write( + &self, + writer_id: usize, + summary: WriteSummary, + response_time_ms: u64, + wall_time: DateTime<Local>, + ) { + let mut state = self.state.lock(); + state.push(WriterReport { + summary: Some(summary), + write_instant: Instant::now(), + wall_time, + response_time_ms, + writer_id, + }); + } + + /// Run in a spawn blocking task to flush reports to the csv file + pub fn flush_reports(&self) { + let start_time = Instant::now(); + let mut sample_counts: HashMap<WriterId, usize> = HashMap::new(); + let mut console_stats = ConsoleReportStats::new(); + + loop { + let reports = { + let mut state = self.state.lock(); + let mut reports = Vec::with_capacity(state.len()); + std::mem::swap(&mut reports, &mut *state); + reports + }; + + let mut csv_writer = self.csv_writer.lock(); + for report in reports { + let test_time = report.write_instant.duration_since(start_time).as_millis(); + let sample_number = sample_counts.entry(report.writer_id).or_insert(0); + *sample_number += 1; + + if let Some(summary) = report.summary { + csv_writer + .write_record(&[ + report.writer_id.to_string(), + "200".to_string(), + report.response_time_ms.to_string(), + test_time.to_string(), + sample_number.to_string(), + summary.bytes_written.to_string(), + summary.lines_written.to_string(), + summary.tags_written.to_string(), + summary.fields_written.to_string(), + report.wall_time.to_string(), + ]) + .expect("failed to write csv report record"); + + console_stats.success += 1; + console_stats.lines += summary.lines_written; + console_stats.bytes += summary.bytes_written; + } else { + csv_writer + .write_record(&[ + report.writer_id.to_string(), + "500".to_string(), + report.response_time_ms.to_string(), + test_time.to_string(), + sample_number.to_string(), + "0".to_string(), + "0".to_string(), + "0".to_string(), + "0".to_string(), + report.wall_time.to_string(), + ]) + .expect("failed to write csv report record"); + + console_stats.error += 1; + } + } + + csv_writer.flush().expect("failed to flush csv reports"); + + if console_stats.last_console_output_time.elapsed() > CONSOLE_REPORT_INTERVAL { + let elapsed_millis = console_stats.last_console_output_time.elapsed().as_millis(); + + println!( + "success: {:.0}/s, error: {:.0}/s, lines: {:.0}/s, bytes: {:.0}/s", + console_stats.success as f64 / elapsed_millis as f64 * 1000.0, + console_stats.error as f64 / elapsed_millis as f64 * 1000.0, + console_stats.lines as f64 / elapsed_millis as f64 * 1000.0, + console_stats.bytes as f64 / elapsed_millis as f64 * 1000.0, + ); + + console_stats = ConsoleReportStats::new(); + } + + if *self.shutdown.lock() { + return; + } + + std::thread::sleep(REPORT_FLUSH_INTERVAL); + } + } + + pub fn shutdown(&self) { + *self.shutdown.lock() = true; + } +} + +struct ConsoleReportStats { + last_console_output_time: Instant, + success: usize, + error: usize, + lines: usize, + bytes: usize, +} + +impl ConsoleReportStats { + fn new() -> Self { + Self { + last_console_output_time: Instant::now(), + success: 0, + error: 0, + lines: 0, + bytes: 0, + } + } +} diff --git a/influxdb3_load_generator/src/specification.rs b/influxdb3_load_generator/src/specification.rs new file mode 100644 index 0000000000..0d6c063f50 --- /dev/null +++ b/influxdb3_load_generator/src/specification.rs @@ -0,0 +1,170 @@ +use crate::line_protocol_generator::WriterId; +use anyhow::Context; +use serde::{Deserialize, Serialize}; + +/// The specification for the data to be generated +#[derive(Debug, Deserialize, Serialize)] +pub struct DataSpec { + /// The name of this spec + pub name: String, + /// The measurements to be generated for each sample + pub measurements: Vec<MeasurementSpec>, +} + +impl DataSpec { + pub fn from_path(path: &str) -> Result<Self, anyhow::Error> { + let contents = std::fs::read_to_string(path)?; + let res = serde_json::from_str(&contents)?; + + Ok(res) + } + + pub fn to_json_string_pretty(&self) -> Result<String, anyhow::Error> { + let res = serde_json::to_string_pretty(&self).context("failed to encode json to string")?; + Ok(res) + } +} + +/// Specification for a measurement to be generated +#[derive(Debug, Deserialize, Serialize)] +pub struct MeasurementSpec { + /// The name of the measurement + pub name: String, + /// The tags to be generated for each line + pub tags: Vec<TagSpec>, + /// The fields to be generated for each line + pub fields: Vec<FieldSpec>, + /// Create this many copies of this measurement in each sample. The copy number will be + /// appended to the measurement name to uniquely identify it. + #[serde(skip_serializing_if = "Option::is_none")] + pub copies: Option<usize>, + /// If this measurement has tags with cardinality, this is the number of lines that will + /// be output per sample (up to the highest cardinality tag). If not specified, all unique + /// values will be used. Cardinality is split across the number of workers, so the number + /// of lines per sample could be less than this number. + #[serde(skip_serializing_if = "Option::is_none")] + pub lines_per_sample: Option<usize>, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct TagSpec { + /// the key/name of the tag + pub key: String, + + /// have this many copies of this tag in the measurement. Random values will be generated + /// independently (i.e. copies won't share the same random value). Will add the copy number to + /// the key of the tag to uniquely identify it. + #[serde(skip_serializing_if = "Option::is_none")] + pub copies: Option<usize>, + /// if set, appends the copy id of the tag to the value of the tag + #[serde(skip_serializing_if = "Option::is_none")] + pub append_copy_id: Option<bool>, + + /// output this string value for every line this tag is present + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option<String>, + + /// if set, appends the writer id to the value of the tag + #[serde(skip_serializing_if = "Option::is_none")] + pub append_writer_id: Option<bool>, + + /// will add a number to the value of the tag, with this number of unique values + #[serde(skip_serializing_if = "Option::is_none")] + pub cardinality: Option<usize>, +} + +impl TagSpec { + pub fn cardinality_min_max( + &self, + writer_id: WriterId, + writer_count: usize, + ) -> Option<(usize, usize)> { + if let Some(cardinality) = self.cardinality { + let cardinality_increment = usize::div_ceil(cardinality, writer_count); + let cardinality_id_min = writer_id * cardinality_increment - cardinality_increment + 1; + let cardinality_id_max = cardinality_id_min + cardinality_increment - 1; + + Some((cardinality_id_min, cardinality_id_max)) + } else { + None + } + } +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct FieldSpec { + // These options apply to any type of field + /// the key/name of the field + pub key: String, + /// have this many copies of this field in the measurement. Random values will be generated + /// independently (i.e. copies won't share the same random value). Will add the copy number to + /// the key of the field to uniquely identify it. + #[serde(skip_serializing_if = "Option::is_none")] + pub copies: Option<usize>, + /// A float between 0.0 and 1.0 that determines the probability that this field will be null. + /// At least one field in a measurement should not have this option set. + #[serde(skip_serializing_if = "Option::is_none")] + pub null_probability: Option<f64>, + + #[serde(flatten)] + pub field: FieldKind, +} + +#[derive(Debug, Deserialize, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum FieldKind { + /// generates a random bool for the value of this field + Bool(bool), + /// output this string value for every line this field is present + String(String), + /// generate a random string of this length for every line this field is present + StringRandom(usize), + /// output this integer value for every line this field is present + Integer(i64), + /// generate a random integer in this range for every line this field is present + IntegerRange(i64, i64), + /// output this float value for every line this field is present + Float(f64), + /// generate a random float in this range for every line this field is present + FloatRange(f64, f64), +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn tag_spec_splits_cardinality_for_writers() { + let mut tag_spec = TagSpec { + key: "".to_string(), + copies: None, + append_copy_id: None, + value: None, + append_writer_id: None, + cardinality: Some(100), + }; + + let (min, max) = tag_spec.cardinality_min_max(1, 10).unwrap(); + assert_eq!(min, 1); + assert_eq!(max, 10); + let (min, max) = tag_spec.cardinality_min_max(2, 10).unwrap(); + assert_eq!(min, 11); + assert_eq!(max, 20); + let (min, max) = tag_spec.cardinality_min_max(10, 10).unwrap(); + assert_eq!(min, 91); + assert_eq!(max, 100); + + // if the cardinality is not evenly divisible by the number of writers, the last writer + // will go over the cardinality set + tag_spec.cardinality = Some(30); + let (min, max) = tag_spec.cardinality_min_max(1, 7).unwrap(); + assert_eq!(min, 1); + assert_eq!(max, 5); + let (min, max) = tag_spec.cardinality_min_max(4, 7).unwrap(); + assert_eq!(min, 16); + assert_eq!(max, 20); + let (min, max) = tag_spec.cardinality_min_max(7, 7).unwrap(); + assert_eq!(min, 31); + assert_eq!(max, 35); + } +} diff --git a/influxdb3_load_generator/src/specs/example.rs b/influxdb3_load_generator/src/specs/example.rs new file mode 100644 index 0000000000..c0620007b2 --- /dev/null +++ b/influxdb3_load_generator/src/specs/example.rs @@ -0,0 +1,131 @@ +//! Spec that shows the various elements of the data generator. Gets printed to console when +//! the generator is run without a spec specified. + +use crate::specification::*; +use crate::specs::BuiltInSpec; + +pub(crate) fn spec() -> BuiltInSpec { + let description = + r#"Example that shows the various elements of the data generator."#.to_string(); + let write_spec = DataSpec { + name: "sample_spec".to_string(), + measurements: vec![ + MeasurementSpec { + name: "some_measurement".to_string(), + tags: vec![ + TagSpec { + key: "some_tag".to_string(), + copies: None, + append_copy_id: None, + value: Some("a-value-here".to_string()), + append_writer_id: None, + cardinality: None, + }, + TagSpec { + key: "random_data_tag".to_string(), + copies: None, + append_copy_id: None, + value: Some("card-val-".to_string()), + append_writer_id: None, + cardinality: Some(2), + }, + TagSpec { + key: "higher_cardinality_data_tag".to_string(), + copies: None, + append_copy_id: None, + value: Some("card-val-".to_string()), + append_writer_id: None, + cardinality: Some(6), + }, + TagSpec { + key: "copied_tag".to_string(), + copies: Some(3), + append_copy_id: Some(true), + value: Some("copy-val-".to_string()), + append_writer_id: None, + cardinality: None, + }, + TagSpec { + key: "writer_id".to_string(), + copies: None, + append_copy_id: None, + value: Some("writer-id-".to_string()), + append_writer_id: Some(true), + cardinality: None, + }, + ], + fields: vec![ + FieldSpec { + key: "f1".to_string(), + copies: None, + null_probability: None, + field: FieldKind::Float(1.2), + }, + FieldSpec { + key: "i1".to_string(), + copies: None, + null_probability: Some(0.6), + field: FieldKind::Integer(5), + }, + ], + copies: None, + lines_per_sample: None, + }, + MeasurementSpec { + name: "copied_measurement".to_string(), + tags: vec![], + fields: vec![ + FieldSpec { + key: "random_string".to_string(), + copies: None, + null_probability: None, + field: FieldKind::StringRandom(10), + }, + FieldSpec { + key: "constant_string".to_string(), + copies: None, + null_probability: None, + field: FieldKind::String("a constant string".to_string()), + }, + FieldSpec { + key: "random_integer".to_string(), + copies: None, + null_probability: None, + field: FieldKind::IntegerRange(1, 100), + }, + FieldSpec { + key: "constant_integer".to_string(), + copies: None, + null_probability: None, + field: FieldKind::Integer(42), + }, + FieldSpec { + key: "random_float".to_string(), + copies: None, + null_probability: None, + field: FieldKind::FloatRange(1.0, 100.0), + }, + FieldSpec { + key: "constant_float".to_string(), + copies: None, + null_probability: None, + field: FieldKind::Float(6.8), + }, + FieldSpec { + key: "random_bool".to_string(), + copies: None, + null_probability: None, + field: FieldKind::Bool(true), + }, + ], + copies: Some(2), + lines_per_sample: None, + }, + ], + }; + + BuiltInSpec { + description, + write_spec, + } +} diff --git a/influxdb3_load_generator/src/specs/mod.rs b/influxdb3_load_generator/src/specs/mod.rs new file mode 100644 index 0000000000..acc270fd88 --- /dev/null +++ b/influxdb3_load_generator/src/specs/mod.rs @@ -0,0 +1,17 @@ +//! This module contains the built-in specifications for the load generator. + +use crate::specification::DataSpec; +mod example; +mod one_mil; + +/// Get all built-in specs +pub(crate) fn built_in_specs() -> Vec<BuiltInSpec> { + // add new built-in specs here to the end of this vec + vec![example::spec(), one_mil::spec()] +} + +/// A built-in specification for the load generator +pub(crate) struct BuiltInSpec { + pub(crate) description: String, + pub(crate) write_spec: DataSpec, +} diff --git a/influxdb3_load_generator/src/specs/one_mil.rs b/influxdb3_load_generator/src/specs/one_mil.rs new file mode 100644 index 0000000000..c14411cb73 --- /dev/null +++ b/influxdb3_load_generator/src/specs/one_mil.rs @@ -0,0 +1,45 @@ +//! Spec for the 1 million series use case + +use crate::specification::*; +use crate::specs::BuiltInSpec; + +pub(crate) fn spec() -> BuiltInSpec { + let description = + r#"1 million series in a single table use case. If you run this with -writer-count=100 + you'll get all 1M series written every sampling interval. Our primary test is interval=10s"#.to_string(); + let write_spec = DataSpec { + name: "one_mil".to_string(), + measurements: vec![MeasurementSpec { + name: "measurement_data".to_string(), + tags: vec![TagSpec { + key: "series_id".to_string(), + copies: None, + append_copy_id: None, + value: Some("series-number-".to_string()), + append_writer_id: None, + cardinality: Some(1_000_000), + }], + fields: vec![ + FieldSpec { + key: "int_val".to_string(), + copies: Some(10), + null_probability: None, + field: FieldKind::IntegerRange(1, 100_000_000), + }, + FieldSpec { + key: "float_val".to_string(), + copies: Some(10), + null_probability: None, + field: FieldKind::FloatRange(1.0, 100.0), + }, + ], + copies: None, + lines_per_sample: Some(10_000), + }], + }; + + BuiltInSpec { + description, + write_spec, + } +}
964b2f6b9737df75cd0f4e1cee96e03ce70a4d40
Joe-Blount
2023-08-14 15:00:19
compactor simulator math error creates 0 byte files (#8478)
* fix: math error in simulator results in 0 byte files during simulations * chore: insta churn from simulator file size fix
null
fix: compactor simulator math error creates 0 byte files (#8478) * fix: math error in simulator results in 0 byte files during simulations * chore: insta churn from simulator file size fix
diff --git a/compactor/tests/layouts/backfill.rs b/compactor/tests/layouts/backfill.rs index 632c2ff2c4..6c044a7211 100644 --- a/compactor/tests/layouts/backfill.rs +++ b/compactor/tests/layouts/backfill.rs @@ -234,7 +234,7 @@ async fn random_backfill_empty_partition() { - "L0 " - "L0.?[42,356] 1.05us 33mb |-----------L0.?------------| " - "L0.?[357,670] 1.05us 33mb |-----------L0.?------------| " - - "L0.?[671,986] 1.05us 34mb |------------L0.?------------| " + - "L0.?[671,986] 1.05us 33mb |------------L0.?------------| " - "Committing partition 1:" - " Soft Deleting 3 files: L0.52, L0.53, L0.54" - " Creating 9 files" @@ -249,164 +249,208 @@ async fn random_backfill_empty_partition() { - "L1 " - "L1.?[0,357] 1.04us 100mb |-------------L1.?-------------| " - "L1.?[358,714] 1.04us 100mb |-------------L1.?-------------| " - - "L1.?[715,986] 1.04us 77mb |---------L1.?---------| " + - "L1.?[715,986] 1.04us 76mb |---------L1.?---------| " - "Committing partition 1:" - " Soft Deleting 5 files: L0.51, L0.55, L0.56, L0.57, L0.58" - " Creating 3 files" - - "**** Simulation run 7, type=split(ReduceOverlap)(split_times=[714]). 1 Input Files, 67mb total:" + - "**** Simulation run 7, type=split(HighL0OverlapSingleFile)(split_times=[476]). 1 Input Files, 66mb total:" + - "L0, all files 66mb " + - "L0.59[357,670] 1.04us |-----------------------------------------L0.59------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 66mb total:" + - "L0 " + - "L0.?[357,476] 1.04us 25mb|--------------L0.?--------------| " + - "L0.?[477,670] 1.04us 41mb |------------------------L0.?-------------------------| " + - "**** Simulation run 8, type=split(HighL0OverlapSingleFile)(split_times=[476]). 1 Input Files, 100mb total:" + - "L1, all files 100mb " + - "L1.65[358,714] 1.04us |-----------------------------------------L1.65------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:" + - "L1 " + - "L1.?[358,476] 1.04us 33mb|-----------L1.?------------| " + - "L1.?[477,714] 1.04us 67mb |--------------------------L1.?---------------------------| " + - "**** Simulation run 9, type=split(HighL0OverlapSingleFile)(split_times=[238]). 1 Input Files, 100mb total:" + - "L1, all files 100mb " + - "L1.64[0,357] 1.04us |-----------------------------------------L1.64------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:" + - "L1 " + - "L1.?[0,238] 1.04us 67mb |---------------------------L1.?---------------------------| " + - "L1.?[239,357] 1.04us 33mb |-----------L1.?------------| " + - "**** Simulation run 10, type=split(HighL0OverlapSingleFile)(split_times=[476]). 1 Input Files, 33mb total:" + - "L0, all files 33mb " + - "L0.62[357,670] 1.05us |-----------------------------------------L0.62------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 33mb total:" + - "L0 " + - "L0.?[357,476] 1.05us 13mb|--------------L0.?--------------| " + - "L0.?[477,670] 1.05us 21mb |------------------------L0.?-------------------------| " + - "Committing partition 1:" + - " Soft Deleting 4 files: L0.59, L0.62, L1.64, L1.65" + - " Creating 8 files" + - "**** Simulation run 11, type=split(ReduceOverlap)(split_times=[714]). 1 Input Files, 67mb total:" - "L0, all files 67mb " - "L0.60[671,986] 1.04us |-----------------------------------------L0.60------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 67mb total:" - "L0 " - "L0.?[671,714] 1.04us 9mb |---L0.?---| " - "L0.?[715,986] 1.04us 58mb |-----------------------------------L0.?------------------------------------| " - - "**** Simulation run 8, type=split(ReduceOverlap)(split_times=[357]). 1 Input Files, 66mb total:" - - "L0, all files 66mb " - - "L0.59[357,670] 1.04us |-----------------------------------------L0.59------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 66mb total:" + - "**** Simulation run 12, type=split(ReduceOverlap)(split_times=[357]). 1 Input Files, 25mb total:" + - "L0, all files 25mb " + - "L0.67[357,476] 1.04us |-----------------------------------------L0.67------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 25mb total:" + - "L0 " + - "L0.?[357,357] 1.04us 217kb|L0.?| " + - "L0.?[358,476] 1.04us 25mb|-----------------------------------------L0.?------------------------------------------| " + - "**** Simulation run 13, type=split(ReduceOverlap)(split_times=[238]). 1 Input Files, 33mb total:" + - "L0, all files 33mb " + - "L0.61[42,356] 1.05us |-----------------------------------------L0.61------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 33mb total:" - "L0 " - - "L0.?[357,357] 1.04us 0b |L0.?| " - - "L0.?[358,670] 1.04us 66mb|-----------------------------------------L0.?------------------------------------------| " - - "**** Simulation run 9, type=split(ReduceOverlap)(split_times=[714]). 1 Input Files, 34mb total:" - - "L0, all files 34mb " + - "L0.?[42,238] 1.05us 21mb |-------------------------L0.?-------------------------| " + - "L0.?[239,356] 1.05us 12mb |-------------L0.?--------------| " + - "**** Simulation run 14, type=split(ReduceOverlap)(split_times=[714]). 1 Input Files, 33mb total:" + - "L0, all files 33mb " - "L0.63[671,986] 1.05us |-----------------------------------------L0.63------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 34mb total:" + - "**** 2 Output Files (parquet_file_id not yet assigned), 33mb total:" - "L0 " - "L0.?[671,714] 1.05us 5mb |---L0.?---| " - "L0.?[715,986] 1.05us 29mb |-----------------------------------L0.?------------------------------------| " - - "**** Simulation run 10, type=split(ReduceOverlap)(split_times=[357]). 1 Input Files, 33mb total:" - - "L0, all files 33mb " - - "L0.62[357,670] 1.05us |-----------------------------------------L0.62------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 33mb total:" + - "**** Simulation run 15, type=split(ReduceOverlap)(split_times=[357]). 1 Input Files, 13mb total:" + - "L0, all files 13mb " + - "L0.73[357,476] 1.05us |-----------------------------------------L0.73------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 13mb total:" - "L0 " - - "L0.?[357,357] 1.05us 0b |L0.?| " - - "L0.?[358,670] 1.05us 33mb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[357,357] 1.05us 108kb|L0.?| " + - "L0.?[358,476] 1.05us 13mb|-----------------------------------------L0.?------------------------------------------| " - "Committing partition 1:" - - " Soft Deleting 4 files: L0.59, L0.60, L0.62, L0.63" - - " Creating 8 files" - - "**** Simulation run 11, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[260, 520]). 5 Input Files, 275mb total:" + - " Soft Deleting 5 files: L0.60, L0.61, L0.63, L0.67, L0.73" + - " Creating 10 files" + - "**** Simulation run 16, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[467, 695]). 7 Input Files, 209mb total:" - "L0 " - - "L0.69[357,357] 1.04us 0b |L0.69| " - - "L0.70[358,670] 1.04us 66mb |----------------L0.70----------------| " - - "L0.67[671,714] 1.04us 9mb |L0.67|" + - "L0.77[357,357] 1.04us 217kb |L0.77| " + - "L0.78[358,476] 1.04us 25mb |-------L0.78--------| " + - "L0.68[477,670] 1.04us 41mb |--------------L0.68---------------| " + - "L0.75[671,714] 1.04us 9mb |L0.75-| " - "L1 " - - "L1.64[0,357] 1.04us 100mb|-------------------L1.64-------------------| " - - "L1.65[358,714] 1.04us 100mb |------------------L1.65-------------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 275mb total:" + - "L1.72[239,357] 1.04us 33mb|-------L1.72--------| " + - "L1.69[358,476] 1.04us 33mb |-------L1.69--------| " + - "L1.70[477,714] 1.04us 67mb |------------------L1.70-------------------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 209mb total:" - "L1 " - - "L1.?[0,260] 1.04us 100mb |-------------L1.?-------------| " - - "L1.?[261,520] 1.04us 100mb |-------------L1.?-------------| " - - "L1.?[521,714] 1.04us 75mb |---------L1.?---------| " + - "L1.?[239,467] 1.04us 101mb|------------------L1.?-------------------| " + - "L1.?[468,695] 1.04us 100mb |------------------L1.?-------------------| " + - "L1.?[696,714] 1.04us 8mb |L1.?|" - "Committing partition 1:" - - " Soft Deleting 5 files: L1.64, L1.65, L0.67, L0.69, L0.70" + - " Soft Deleting 7 files: L0.68, L1.69, L1.70, L1.72, L0.75, L0.77, L0.78" - " Creating 3 files" - - "**** Simulation run 12, type=split(ReduceOverlap)(split_times=[520]). 1 Input Files, 33mb total:" - - "L0, all files 33mb " - - "L0.74[358,670] 1.05us |-----------------------------------------L0.74------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 33mb total:" + - "**** Simulation run 17, type=split(ReduceOverlap)(split_times=[695]). 1 Input Files, 5mb total:" + - "L0, all files 5mb " + - "L0.81[671,714] 1.05us |-----------------------------------------L0.81------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 5mb total:" - "L0 " - - "L0.?[358,520] 1.05us 17mb|--------------------L0.?--------------------| " - - "L0.?[521,670] 1.05us 16mb |------------------L0.?------------------| " - - "**** Simulation run 13, type=split(ReduceOverlap)(split_times=[260]). 1 Input Files, 33mb total:" - - "L0, all files 33mb " - - "L0.61[42,356] 1.05us |-----------------------------------------L0.61------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 33mb total:" + - "L0.?[671,695] 1.05us 3mb |----------------------L0.?----------------------| " + - "L0.?[696,714] 1.05us 2mb |---------------L0.?----------------| " + - "**** Simulation run 18, type=split(ReduceOverlap)(split_times=[467]). 1 Input Files, 13mb total:" + - "L0, all files 13mb " + - "L0.84[358,476] 1.05us |-----------------------------------------L0.84------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 13mb total:" - "L0 " - - "L0.?[42,260] 1.05us 23mb |----------------------------L0.?----------------------------| " - - "L0.?[261,356] 1.05us 10mb |----------L0.?-----------| " + - "L0.?[358,467] 1.05us 12mb|--------------------------------------L0.?---------------------------------------| " + - "L0.?[468,476] 1.05us 975kb |L0.?| " - "Committing partition 1:" - - " Soft Deleting 2 files: L0.61, L0.74" + - " Soft Deleting 2 files: L0.81, L0.84" - " Creating 4 files" - - "**** Simulation run 14, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[917]). 2 Input Files, 135mb total:" + - "**** Simulation run 19, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[918]). 2 Input Files, 134mb total:" - "L0 " - - "L0.68[715,986] 1.04us 58mb|-----------------------------------------L0.68------------------------------------------|" + - "L0.76[715,986] 1.04us 58mb|-----------------------------------------L0.76------------------------------------------|" - "L1 " - - "L1.66[715,986] 1.04us 77mb|-----------------------------------------L1.66------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 135mb total:" + - "L1.66[715,986] 1.04us 76mb|-----------------------------------------L1.66------------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 134mb total:" - "L1 " - - "L1.?[715,917] 1.04us 100mb|------------------------------L1.?-------------------------------| " - - "L1.?[918,986] 1.04us 34mb |--------L1.?--------| " + - "L1.?[715,918] 1.04us 100mb|------------------------------L1.?-------------------------------| " + - "L1.?[919,986] 1.04us 33mb |--------L1.?--------| " - "Committing partition 1:" - - " Soft Deleting 2 files: L1.66, L0.68" + - " Soft Deleting 2 files: L1.66, L0.76" - " Creating 2 files" - - "**** Simulation run 15, type=split(ReduceOverlap)(split_times=[917]). 1 Input Files, 29mb total:" + - "**** Simulation run 20, type=split(ReduceOverlap)(split_times=[918]). 1 Input Files, 29mb total:" - "L0, all files 29mb " - - "L0.72[715,986] 1.05us |-----------------------------------------L0.72------------------------------------------|" + - "L0.82[715,986] 1.05us |-----------------------------------------L0.82------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 29mb total:" - "L0 " - - "L0.?[715,917] 1.05us 22mb|------------------------------L0.?-------------------------------| " - - "L0.?[918,986] 1.05us 7mb |--------L0.?--------| " + - "L0.?[715,918] 1.05us 22mb|------------------------------L0.?-------------------------------| " + - "L0.?[919,986] 1.05us 7mb |--------L0.?--------| " - "Committing partition 1:" - - " Soft Deleting 1 files: L0.72" + - " Soft Deleting 1 files: L0.82" - " Creating 2 files" - - "**** Simulation run 16, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[208, 416]). 6 Input Files, 251mb total:" + - "**** Simulation run 21, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[220, 440]). 6 Input Files, 213mb total:" - "L0 " - - "L0.80[42,260] 1.05us 23mb |---------------L0.80---------------| " - - "L0.81[261,356] 1.05us 10mb |----L0.81-----| " - - "L0.73[357,357] 1.05us 0b |L0.73| " - - "L0.78[358,520] 1.05us 17mb |----------L0.78-----------| " + - "L0.79[42,238] 1.05us 21mb |---------------L0.79---------------| " + - "L0.80[239,356] 1.05us 12mb |-------L0.80--------| " + - "L0.83[357,357] 1.05us 108kb |L0.83| " + - "L0.90[358,467] 1.05us 12mb |-------L0.90-------| " - "L1 " - - "L1.75[0,260] 1.04us 100mb|-------------------L1.75-------------------| " - - "L1.76[261,520] 1.04us 100mb |------------------L1.76-------------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 251mb total:" + - "L1.71[0,238] 1.04us 67mb |-------------------L1.71-------------------| " + - "L1.85[239,467] 1.04us 101mb |------------------L1.85------------------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 213mb total:" - "L1 " - - "L1.?[0,208] 1.05us 100mb |---------------L1.?---------------| " - - "L1.?[209,416] 1.05us 100mb |--------------L1.?---------------| " - - "L1.?[417,520] 1.05us 51mb |-----L1.?------| " + - "L1.?[0,220] 1.05us 100mb |------------------L1.?------------------| " + - "L1.?[221,440] 1.05us 100mb |------------------L1.?------------------| " + - "L1.?[441,467] 1.05us 12mb |L1.?|" - "Committing partition 1:" - - " Soft Deleting 6 files: L0.73, L1.75, L1.76, L0.78, L0.80, L0.81" + - " Soft Deleting 6 files: L1.71, L0.79, L0.80, L0.83, L1.85, L0.90" - " Creating 3 files" - - "**** Simulation run 17, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[701, 881]). 7 Input Files, 259mb total:" + - "**** Simulation run 22, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[643, 818]). 10 Input Files, 297mb total:" - "L0 " - - "L0.85[918,986] 1.05us 7mb |---L0.85---| " - - "L0.84[715,917] 1.05us 22mb |----------------L0.84----------------| " - - "L0.71[671,714] 1.05us 5mb |L0.71-| " - - "L0.79[521,670] 1.05us 16mb|----------L0.79-----------| " + - "L0.95[919,986] 1.05us 7mb |--L0.95--| " + - "L0.94[715,918] 1.05us 22mb |--------------L0.94--------------| " + - "L0.89[696,714] 1.05us 2mb |L0.89| " + - "L0.88[671,695] 1.05us 3mb |L0.88| " + - "L0.74[477,670] 1.05us 21mb |-------------L0.74-------------| " + - "L0.91[468,476] 1.05us 975kb|L0.91| " - "L1 " - - "L1.83[918,986] 1.04us 34mb |---L1.83---| " - - "L1.82[715,917] 1.04us 100mb |----------------L1.82----------------| " - - "L1.77[521,714] 1.04us 75mb|---------------L1.77---------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 259mb total:" + - "L1.93[919,986] 1.04us 33mb |--L1.93--| " + - "L1.92[715,918] 1.04us 100mb |--------------L1.92--------------| " + - "L1.87[696,714] 1.04us 8mb |L1.87| " + - "L1.86[468,695] 1.04us 100mb|----------------L1.86----------------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 297mb total:" - "L1 " - - "L1.?[521,701] 1.05us 100mb|--------------L1.?--------------| " - - "L1.?[702,881] 1.05us 100mb |--------------L1.?--------------| " - - "L1.?[882,986] 1.05us 59mb |-------L1.?-------| " + - "L1.?[468,643] 1.05us 101mb|------------L1.?------------| " + - "L1.?[644,818] 1.05us 100mb |------------L1.?------------| " + - "L1.?[819,986] 1.05us 96mb |-----------L1.?------------| " - "Committing partition 1:" - - " Soft Deleting 7 files: L0.71, L1.77, L0.79, L1.82, L1.83, L0.84, L0.85" + - " Soft Deleting 10 files: L0.74, L1.86, L1.87, L0.88, L0.89, L0.91, L1.92, L1.93, L0.94, L0.95" - " Creating 3 files" - - "**** Simulation run 18, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[406, 603]). 3 Input Files, 251mb total:" + - "**** Simulation run 23, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[420, 619]). 3 Input Files, 213mb total:" - "L1 " - - "L1.87[209,416] 1.05us 100mb|---------------L1.87---------------| " - - "L1.88[417,520] 1.05us 51mb |-----L1.88------| " - - "L1.89[521,701] 1.05us 100mb |------------L1.89-------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 251mb total:" + - "L1.97[221,440] 1.05us 100mb|-------------------L1.97--------------------| " + - "L1.98[441,467] 1.05us 12mb |L1.98| " + - "L1.99[468,643] 1.05us 101mb |---------------L1.99---------------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 213mb total:" - "L2 " - - "L2.?[209,406] 1.05us 100mb|---------------L2.?---------------| " - - "L2.?[407,603] 1.05us 100mb |--------------L2.?---------------| " - - "L2.?[604,701] 1.05us 50mb |-----L2.?------| " + - "L2.?[221,420] 1.05us 101mb|------------------L2.?------------------| " + - "L2.?[421,619] 1.05us 100mb |------------------L2.?------------------| " + - "L2.?[620,643] 1.05us 12mb |L2.?|" - "Committing partition 1:" - - " Soft Deleting 3 files: L1.87, L1.88, L1.89" - - " Upgrading 1 files level to CompactionLevel::L2: L1.86" + - " Soft Deleting 3 files: L1.97, L1.98, L1.99" + - " Upgrading 1 files level to CompactionLevel::L2: L1.96" - " Creating 3 files" - - "**** Simulation run 19, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[881]). 2 Input Files, 159mb total:" - - "L1 " - - "L1.91[882,986] 1.05us 59mb |------------L1.91-------------| " - - "L1.90[702,881] 1.05us 100mb|------------------------L1.90-------------------------| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 159mb total:" + - "**** Simulation run 24, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[952]). 1 Input Files, 96mb total:" + - "L1, all files 96mb " + - "L1.101[819,986] 1.05us |-----------------------------------------L1.101-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 96mb total:" - "L2 " - - "L2.?[702,881] 1.05us 100mb|-------------------------L2.?-------------------------| " - - "L2.?[882,986] 1.05us 59mb |-------------L2.?-------------| " + - "L2.?[819,952] 1.05us 77mb|--------------------------------L2.?---------------------------------| " + - "L2.?[953,986] 1.05us 19mb |-----L2.?------| " - "Committing partition 1:" - - " Soft Deleting 2 files: L1.90, L1.91" + - " Soft Deleting 1 files: L1.101" + - " Upgrading 1 files level to CompactionLevel::L2: L1.100" - " Creating 2 files" - - "**** Final Output Files (2.83gb written)" + - "**** Final Output Files (2.89gb written)" - "L2 " - - "L2.86[0,208] 1.05us 100mb|-----L2.86------| " - - "L2.92[209,406] 1.05us 100mb |-----L2.92-----| " - - "L2.93[407,603] 1.05us 100mb |-----L2.93-----| " - - "L2.94[604,701] 1.05us 50mb |L2.94-| " - - "L2.95[702,881] 1.05us 100mb |----L2.95-----| " - - "L2.96[882,986] 1.05us 59mb |-L2.96-| " + - "L2.96[0,220] 1.05us 100mb|------L2.96-------| " + - "L2.100[644,818] 1.05us 100mb |---L2.100----| " + - "L2.102[221,420] 1.05us 101mb |-----L2.102-----| " + - "L2.103[421,619] 1.05us 100mb |-----L2.103-----| " + - "L2.104[620,643] 1.05us 12mb |L2.104| " + - "L2.105[819,952] 1.05us 77mb |--L2.105--| " + - "L2.106[953,986] 1.05us 19mb |L2.106|" "### ); } @@ -651,7 +695,7 @@ async fn random_backfill_over_l2s() { - "L0 " - "L0.?[42,356] 1.01us 33mb |-----------L0.?------------| " - "L0.?[357,670] 1.01us 33mb |-----------L0.?------------| " - - "L0.?[671,986] 1.01us 34mb |------------L0.?------------| " + - "L0.?[671,986] 1.01us 33mb |------------L0.?------------| " - "**** Simulation run 6, type=split(HighL0OverlapSingleFile)(split_times=[356, 670]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.62[42,986] 1.02us |-----------------------------------------L0.62------------------------------------------|" @@ -659,7 +703,7 @@ async fn random_backfill_over_l2s() { - "L0 " - "L0.?[42,356] 1.02us 33mb |-----------L0.?------------| " - "L0.?[357,670] 1.02us 33mb |-----------L0.?------------| " - - "L0.?[671,986] 1.02us 34mb |------------L0.?------------| " + - "L0.?[671,986] 1.02us 33mb |------------L0.?------------| " - "**** Simulation run 7, type=split(HighL0OverlapSingleFile)(split_times=[356, 670]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.63[42,986] 1.03us |-----------------------------------------L0.63------------------------------------------|" @@ -667,7 +711,7 @@ async fn random_backfill_over_l2s() { - "L0 " - "L0.?[42,356] 1.03us 33mb |-----------L0.?------------| " - "L0.?[357,670] 1.03us 33mb |-----------L0.?------------| " - - "L0.?[671,986] 1.03us 34mb |------------L0.?------------| " + - "L0.?[671,986] 1.03us 33mb |------------L0.?------------| " - "**** Simulation run 8, type=split(HighL0OverlapSingleFile)(split_times=[356, 670]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.64[42,986] 1.04us |-----------------------------------------L0.64------------------------------------------|" @@ -675,7 +719,7 @@ async fn random_backfill_over_l2s() { - "L0 " - "L0.?[42,356] 1.04us 33mb |-----------L0.?------------| " - "L0.?[357,670] 1.04us 33mb |-----------L0.?------------| " - - "L0.?[671,986] 1.04us 34mb |------------L0.?------------| " + - "L0.?[671,986] 1.04us 33mb |------------L0.?------------| " - "**** Simulation run 9, type=split(HighL0OverlapSingleFile)(split_times=[356, 670]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.65[42,986] 1.05us |-----------------------------------------L0.65------------------------------------------|" @@ -683,7 +727,7 @@ async fn random_backfill_over_l2s() { - "L0 " - "L0.?[42,356] 1.05us 33mb |-----------L0.?------------| " - "L0.?[357,670] 1.05us 33mb |-----------L0.?------------| " - - "L0.?[671,986] 1.05us 34mb |------------L0.?------------| " + - "L0.?[671,986] 1.05us 33mb |------------L0.?------------| " - "Committing partition 1:" - " Soft Deleting 5 files: L0.61, L0.62, L0.63, L0.64, L0.65" - " Creating 15 files" @@ -691,13 +735,13 @@ async fn random_backfill_over_l2s() { - "L0 " - "L0.66[42,356] 1.01us 33mb|-----------L0.66-----------| " - "L0.67[357,670] 1.01us 33mb |-----------L0.67-----------| " - - "L0.68[671,986] 1.01us 34mb |-----------L0.68------------| " + - "L0.68[671,986] 1.01us 33mb |-----------L0.68------------| " - "L0.69[42,356] 1.02us 33mb|-----------L0.69-----------| " - "L0.70[357,670] 1.02us 33mb |-----------L0.70-----------| " - - "L0.71[671,986] 1.02us 34mb |-----------L0.71------------| " + - "L0.71[671,986] 1.02us 33mb |-----------L0.71------------| " - "L0.72[42,356] 1.03us 33mb|-----------L0.72-----------| " - "L0.73[357,670] 1.03us 33mb |-----------L0.73-----------| " - - "L0.74[671,986] 1.03us 34mb |-----------L0.74------------| " + - "L0.74[671,986] 1.03us 33mb |-----------L0.74------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 300mb total:" - "L1 " - "L1.?[42,357] 1.03us 100mb|------------L1.?------------| " @@ -706,33 +750,33 @@ async fn random_backfill_over_l2s() { - "Committing partition 1:" - " Soft Deleting 9 files: L0.66, L0.67, L0.68, L0.69, L0.70, L0.71, L0.72, L0.73, L0.74" - " Creating 3 files" - - "**** Simulation run 11, type=split(ReduceOverlap)(split_times=[672]). 1 Input Files, 34mb total:" - - "L0, all files 34mb " + - "**** Simulation run 11, type=split(ReduceOverlap)(split_times=[672]). 1 Input Files, 33mb total:" + - "L0, all files 33mb " - "L0.77[671,986] 1.04us |-----------------------------------------L0.77------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 34mb total:" + - "**** 2 Output Files (parquet_file_id not yet assigned), 33mb total:" - "L0 " - - "L0.?[671,672] 1.04us 109kb|L0.?| " + - "L0.?[671,672] 1.04us 217kb|L0.?| " - "L0.?[673,986] 1.04us 33mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 12, type=split(ReduceOverlap)(split_times=[357]). 1 Input Files, 33mb total:" - "L0, all files 33mb " - "L0.76[357,670] 1.04us |-----------------------------------------L0.76------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 33mb total:" - "L0 " - - "L0.?[357,357] 1.04us 0b |L0.?| " + - "L0.?[357,357] 1.04us 108kb|L0.?| " - "L0.?[358,670] 1.04us 33mb|-----------------------------------------L0.?------------------------------------------| " - - "**** Simulation run 13, type=split(ReduceOverlap)(split_times=[672]). 1 Input Files, 34mb total:" - - "L0, all files 34mb " + - "**** Simulation run 13, type=split(ReduceOverlap)(split_times=[672]). 1 Input Files, 33mb total:" + - "L0, all files 33mb " - "L0.80[671,986] 1.05us |-----------------------------------------L0.80------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 34mb total:" + - "**** 2 Output Files (parquet_file_id not yet assigned), 33mb total:" - "L0 " - - "L0.?[671,672] 1.05us 109kb|L0.?| " + - "L0.?[671,672] 1.05us 217kb|L0.?| " - "L0.?[673,986] 1.05us 33mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 14, type=split(ReduceOverlap)(split_times=[357]). 1 Input Files, 33mb total:" - "L0, all files 33mb " - "L0.79[357,670] 1.05us |-----------------------------------------L0.79------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 33mb total:" - "L0 " - - "L0.?[357,357] 1.05us 0b |L0.?| " + - "L0.?[357,357] 1.05us 108kb|L0.?| " - "L0.?[358,670] 1.05us 33mb|-----------------------------------------L0.?------------------------------------------| " - "Committing partition 1:" - " Soft Deleting 4 files: L0.76, L0.77, L0.79, L0.80" @@ -740,12 +784,12 @@ async fn random_backfill_over_l2s() { - "**** Simulation run 15, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[260, 478, 696, 914]). 10 Input Files, 433mb total:" - "L0 " - "L0.75[42,356] 1.04us 33mb|-----------L0.75-----------| " - - "L0.86[357,357] 1.04us 0b |L0.86| " + - "L0.86[357,357] 1.04us 108kb |L0.86| " - "L0.87[358,670] 1.04us 33mb |-----------L0.87-----------| " - - "L0.84[671,672] 1.04us 109kb |L0.84| " + - "L0.84[671,672] 1.04us 217kb |L0.84| " - "L0.85[673,986] 1.04us 33mb |-----------L0.85-----------| " - "L0.78[42,356] 1.05us 33mb|-----------L0.78-----------| " - - "L0.90[357,357] 1.05us 0b |L0.90| " + - "L0.90[357,357] 1.05us 108kb |L0.90| " - "L1 " - "L1.81[42,357] 1.03us 100mb|-----------L1.81------------| " - "L1.82[358,672] 1.03us 100mb |-----------L1.82-----------| " @@ -756,7 +800,7 @@ async fn random_backfill_over_l2s() { - "L1.?[261,478] 1.05us 100mb |-------L1.?-------| " - "L1.?[479,696] 1.05us 100mb |-------L1.?-------| " - "L1.?[697,914] 1.05us 100mb |-------L1.?-------| " - - "L1.?[915,986] 1.05us 34mb |L1.?| " + - "L1.?[915,986] 1.05us 33mb |L1.?| " - "Committing partition 1:" - " Soft Deleting 10 files: L0.75, L0.78, L1.81, L1.82, L1.83, L0.84, L0.85, L0.86, L0.87, L0.90" - " Creating 5 files" @@ -765,7 +809,7 @@ async fn random_backfill_over_l2s() { - "L0.89[673,986] 1.05us |-----------------------------------------L0.89------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 33mb total:" - "L0 " - - "L0.?[673,696] 1.05us 2mb |L0.?| " + - "L0.?[673,696] 1.05us 3mb |L0.?| " - "L0.?[697,914] 1.05us 23mb |----------------------------L0.?----------------------------| " - "L0.?[915,986] 1.05us 8mb |-------L0.?-------| " - "**** Simulation run 17, type=split(ReduceOverlap)(split_times=[478]). 1 Input Files, 33mb total:" @@ -778,91 +822,91 @@ async fn random_backfill_over_l2s() { - "Committing partition 1:" - " Soft Deleting 2 files: L0.89, L0.91" - " Creating 5 files" - - "**** Simulation run 18, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[447, 633]). 6 Input Files, 235mb total:" + - "**** Simulation run 18, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[446, 631]). 6 Input Files, 236mb total:" - "L0 " - "L0.100[358,478] 1.05us 13mb |--------L0.100--------| " - "L0.101[479,670] 1.05us 20mb |---------------L0.101----------------| " - - "L0.88[671,672] 1.05us 109kb |L0.88|" - - "L0.97[673,696] 1.05us 2mb |L0.97|" + - "L0.88[671,672] 1.05us 217kb |L0.88|" + - "L0.97[673,696] 1.05us 3mb |L0.97|" - "L1 " - "L1.93[261,478] 1.05us 100mb|------------------L1.93-------------------| " - "L1.94[479,696] 1.05us 100mb |------------------L1.94-------------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 235mb total:" + - "**** 3 Output Files (parquet_file_id not yet assigned), 236mb total:" - "L1 " - - "L1.?[261,447] 1.05us 100mb|----------------L1.?----------------| " - - "L1.?[448,633] 1.05us 100mb |----------------L1.?----------------| " - - "L1.?[634,696] 1.05us 35mb |---L1.?---| " + - "L1.?[261,446] 1.05us 101mb|----------------L1.?----------------| " + - "L1.?[447,631] 1.05us 100mb |----------------L1.?----------------| " + - "L1.?[632,696] 1.05us 35mb |---L1.?----| " - "Committing partition 1:" - " Soft Deleting 6 files: L0.88, L1.93, L1.94, L0.97, L0.100, L0.101" - " Creating 3 files" - - "**** Simulation run 19, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[873]). 4 Input Files, 165mb total:" + - "**** Simulation run 19, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[874]). 4 Input Files, 164mb total:" - "L0 " - "L0.99[915,986] 1.05us 8mb |-------L0.99--------| " - "L0.98[697,914] 1.05us 23mb|------------------------------L0.98------------------------------| " - "L1 " - - "L1.96[915,986] 1.05us 34mb |-------L1.96--------| " + - "L1.96[915,986] 1.05us 33mb |-------L1.96--------| " - "L1.95[697,914] 1.05us 100mb|------------------------------L1.95------------------------------| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 165mb total:" + - "**** 2 Output Files (parquet_file_id not yet assigned), 164mb total:" - "L1 " - - "L1.?[697,873] 1.05us 101mb|------------------------L1.?------------------------| " - - "L1.?[874,986] 1.05us 65mb |--------------L1.?--------------| " + - "L1.?[697,874] 1.05us 100mb|------------------------L1.?-------------------------| " + - "L1.?[875,986] 1.05us 63mb |--------------L1.?--------------| " - "Committing partition 1:" - " Soft Deleting 4 files: L1.95, L1.96, L0.98, L0.99" - " Creating 2 files" - "**** Simulation run 20, type=split(ReduceOverlap)(split_times=[499, 599]). 1 Input Files, 100mb total:" - "L1, all files 100mb " - - "L1.103[448,633] 1.05us |-----------------------------------------L1.103-----------------------------------------|" + - "L1.103[447,631] 1.05us |-----------------------------------------L1.103-----------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:" - "L1 " - - "L1.?[448,499] 1.05us 28mb|---------L1.?---------| " - - "L1.?[500,599] 1.05us 53mb |---------------------L1.?---------------------| " - - "L1.?[600,633] 1.05us 19mb |-----L1.?-----| " - - "**** Simulation run 21, type=split(ReduceOverlap)(split_times=[299, 399]). 1 Input Files, 100mb total:" - - "L1, all files 100mb " - - "L1.102[261,447] 1.05us |----------------------------------------L1.102-----------------------------------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:" + - "L1.?[447,499] 1.05us 29mb|---------L1.?----------| " + - "L1.?[500,599] 1.05us 54mb |---------------------L1.?---------------------| " + - "L1.?[600,631] 1.05us 17mb |----L1.?-----| " + - "**** Simulation run 21, type=split(ReduceOverlap)(split_times=[299, 399]). 1 Input Files, 101mb total:" + - "L1, all files 101mb " + - "L1.102[261,446] 1.05us |-----------------------------------------L1.102-----------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 101mb total:" - "L1 " - "L1.?[261,299] 1.05us 21mb|------L1.?------| " - - "L1.?[300,399] 1.05us 53mb |--------------------L1.?---------------------| " - - "L1.?[400,447] 1.05us 26mb |--------L1.?--------| " + - "L1.?[300,399] 1.05us 54mb |---------------------L1.?---------------------| " + - "L1.?[400,446] 1.05us 25mb |--------L1.?--------| " - "**** Simulation run 22, type=split(ReduceOverlap)(split_times=[99, 199]). 1 Input Files, 100mb total:" - "L1, all files 100mb " - "L1.92[42,260] 1.05us |-----------------------------------------L1.92------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:" - "L1 " - - "L1.?[42,99] 1.05us 26mb |--------L1.?---------| " - - "L1.?[100,199] 1.05us 45mb |-----------------L1.?-----------------| " + - "L1.?[42,99] 1.05us 27mb |--------L1.?---------| " + - "L1.?[100,199] 1.05us 46mb |-----------------L1.?-----------------| " - "L1.?[200,260] 1.05us 28mb |---------L1.?---------| " - - "**** Simulation run 23, type=split(ReduceOverlap)(split_times=[899]). 1 Input Files, 65mb total:" - - "L1, all files 65mb " - - "L1.106[874,986] 1.05us |-----------------------------------------L1.106-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 65mb total:" + - "**** Simulation run 23, type=split(ReduceOverlap)(split_times=[899]). 1 Input Files, 63mb total:" + - "L1, all files 63mb " + - "L1.106[875,986] 1.05us |-----------------------------------------L1.106-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 63mb total:" - "L1 " - - "L1.?[874,899] 1.05us 14mb|-------L1.?-------| " - - "L1.?[900,986] 1.05us 50mb |-------------------------------L1.?--------------------------------| " - - "**** Simulation run 24, type=split(ReduceOverlap)(split_times=[699, 799]). 1 Input Files, 101mb total:" - - "L1, all files 101mb " - - "L1.105[697,873] 1.05us |-----------------------------------------L1.105-----------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 101mb total:" + - "L1.?[875,899] 1.05us 14mb|------L1.?-------| " + - "L1.?[900,986] 1.05us 49mb |-------------------------------L1.?--------------------------------| " + - "**** Simulation run 24, type=split(ReduceOverlap)(split_times=[699, 799]). 1 Input Files, 100mb total:" + - "L1, all files 100mb " + - "L1.105[697,874] 1.05us |-----------------------------------------L1.105-----------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:" - "L1 " - - "L1.?[697,699] 1.05us 1mb |L1.?| " - - "L1.?[700,799] 1.05us 57mb |----------------------L1.?----------------------| " - - "L1.?[800,873] 1.05us 43mb |---------------L1.?----------------| " + - "L1.?[697,699] 1.05us 2mb |L1.?| " + - "L1.?[700,799] 1.05us 56mb |----------------------L1.?----------------------| " + - "L1.?[800,874] 1.05us 42mb |---------------L1.?----------------| " - "Committing partition 1:" - " Soft Deleting 5 files: L1.92, L1.102, L1.103, L1.105, L1.106" - " Creating 14 files" - "**** Simulation run 25, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[74, 148]). 4 Input Files, 272mb total:" - "L1 " - - "L1.113[42,99] 1.05us 26mb |--------L1.113---------| " - - "L1.114[100,199] 1.05us 45mb |------------------L1.114------------------| " + - "L1.113[42,99] 1.05us 27mb |--------L1.113---------| " + - "L1.114[100,199] 1.05us 46mb |------------------L1.114------------------| " - "L2 " - "L2.1[0,99] 99ns 100mb |-------------------L2.1-------------------| " - "L2.2[100,199] 199ns 100mb |-------------------L2.2-------------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 272mb total:" - "L2 " - - "L2.?[0,74] 1.05us 101mb |-------------L2.?--------------| " - - "L2.?[75,148] 1.05us 100mb |-------------L2.?--------------| " - - "L2.?[149,199] 1.05us 71mb |--------L2.?--------| " + - "L2.?[0,74] 1.05us 102mb |-------------L2.?--------------| " + - "L2.?[75,148] 1.05us 101mb |-------------L2.?--------------| " + - "L2.?[149,199] 1.05us 69mb |--------L2.?--------| " - "Committing partition 1:" - " Soft Deleting 4 files: L2.1, L2.2, L1.113, L1.114" - " Creating 3 files" @@ -879,104 +923,104 @@ async fn random_backfill_over_l2s() { - "Committing partition 1:" - " Soft Deleting 3 files: L2.3, L1.110, L1.115" - " Creating 2 files" - - "**** Simulation run 27, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[365]). 2 Input Files, 153mb total:" + - "**** Simulation run 27, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[365]). 2 Input Files, 154mb total:" - "L1 " - - "L1.111[300,399] 1.05us 53mb|----------------------------------------L1.111-----------------------------------------| " + - "L1.111[300,399] 1.05us 54mb|----------------------------------------L1.111-----------------------------------------| " - "L2 " - "L2.4[300,399] 399ns 100mb|-----------------------------------------L2.4------------------------------------------| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 153mb total:" + - "**** 2 Output Files (parquet_file_id not yet assigned), 154mb total:" - "L2 " - - "L2.?[300,365] 1.05us 101mb|--------------------------L2.?---------------------------| " - - "L2.?[366,399] 1.05us 53mb |-----------L2.?------------| " + - "L2.?[300,365] 1.05us 102mb|--------------------------L2.?---------------------------| " + - "L2.?[366,399] 1.05us 52mb |-----------L2.?------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L2.4, L1.111" - " Creating 2 files" - "**** Simulation run 28, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[465]). 3 Input Files, 154mb total:" - "L1 " - - "L1.112[400,447] 1.05us 26mb|-----------------L1.112-----------------| " - - "L1.107[448,499] 1.05us 28mb |-------------------L1.107-------------------| " + - "L1.112[400,446] 1.05us 25mb|----------------L1.112-----------------| " + - "L1.107[447,499] 1.05us 29mb |-------------------L1.107--------------------| " - "L2 " - "L2.5[400,499] 499ns 100mb|-----------------------------------------L2.5------------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 154mb total:" - "L2 " - - "L2.?[400,465] 1.05us 101mb|--------------------------L2.?---------------------------| " - - "L2.?[466,499] 1.05us 53mb |-----------L2.?------------| " + - "L2.?[400,465] 1.05us 102mb|--------------------------L2.?---------------------------| " + - "L2.?[466,499] 1.05us 52mb |-----------L2.?------------| " - "Committing partition 1:" - " Soft Deleting 3 files: L2.5, L1.107, L1.112" - " Creating 2 files" - - "**** Simulation run 29, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[565]). 2 Input Files, 153mb total:" + - "**** Simulation run 29, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[565]). 2 Input Files, 154mb total:" - "L1 " - - "L1.108[500,599] 1.05us 53mb|----------------------------------------L1.108-----------------------------------------| " + - "L1.108[500,599] 1.05us 54mb|----------------------------------------L1.108-----------------------------------------| " - "L2 " - "L2.6[500,599] 599ns 100mb|-----------------------------------------L2.6------------------------------------------| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 153mb total:" + - "**** 2 Output Files (parquet_file_id not yet assigned), 154mb total:" - "L2 " - - "L2.?[500,565] 1.05us 101mb|--------------------------L2.?---------------------------| " - - "L2.?[566,599] 1.05us 53mb |-----------L2.?------------| " + - "L2.?[500,565] 1.05us 102mb|--------------------------L2.?---------------------------| " + - "L2.?[566,599] 1.05us 52mb |-----------L2.?------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L2.6, L1.108" - " Creating 2 files" - - "**** Simulation run 30, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[665]). 4 Input Files, 155mb total:" + - "**** Simulation run 30, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[665]). 4 Input Files, 154mb total:" - "L1 " - - "L1.109[600,633] 1.05us 19mb|----------L1.109-----------| " - - "L1.104[634,696] 1.05us 35mb |------------------------L1.104------------------------| " - - "L1.118[697,699] 1.05us 1mb |L1.118|" + - "L1.109[600,631] 1.05us 17mb|----------L1.109----------| " + - "L1.104[632,696] 1.05us 35mb |-------------------------L1.104-------------------------| " + - "L1.118[697,699] 1.05us 2mb |L1.118|" - "L2 " - "L2.7[600,699] 699ns 100mb|-----------------------------------------L2.7------------------------------------------| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 155mb total:" + - "**** 2 Output Files (parquet_file_id not yet assigned), 154mb total:" - "L2 " - "L2.?[600,665] 1.05us 102mb|--------------------------L2.?---------------------------| " - - "L2.?[666,699] 1.05us 53mb |-----------L2.?------------| " + - "L2.?[666,699] 1.05us 52mb |-----------L2.?------------| " - "Committing partition 1:" - " Soft Deleting 4 files: L2.7, L1.104, L1.109, L1.118" - " Creating 2 files" - - "**** Simulation run 31, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[764]). 2 Input Files, 157mb total:" + - "**** Simulation run 31, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[764]). 2 Input Files, 156mb total:" - "L1 " - - "L1.119[700,799] 1.05us 57mb|----------------------------------------L1.119-----------------------------------------| " + - "L1.119[700,799] 1.05us 56mb|----------------------------------------L1.119-----------------------------------------| " - "L2 " - "L2.8[700,799] 799ns 100mb|-----------------------------------------L2.8------------------------------------------| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 157mb total:" + - "**** 2 Output Files (parquet_file_id not yet assigned), 156mb total:" - "L2 " - - "L2.?[700,764] 1.05us 101mb|--------------------------L2.?--------------------------| " + - "L2.?[700,764] 1.05us 102mb|--------------------------L2.?--------------------------| " - "L2.?[765,799] 1.05us 55mb |------------L2.?------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L2.8, L1.119" - " Creating 2 files" - - "**** Simulation run 32, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[863]). 3 Input Files, 157mb total:" + - "**** Simulation run 32, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[864]). 3 Input Files, 156mb total:" - "L1 " - - "L1.120[800,873] 1.05us 43mb|-----------------------------L1.120-----------------------------| " - - "L1.116[874,899] 1.05us 14mb |-------L1.116-------| " + - "L1.120[800,874] 1.05us 42mb|-----------------------------L1.120------------------------------| " + - "L1.116[875,899] 1.05us 14mb |------L1.116-------| " - "L2 " - "L2.9[800,899] 899ns 100mb|-----------------------------------------L2.9------------------------------------------| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 157mb total:" + - "**** 2 Output Files (parquet_file_id not yet assigned), 156mb total:" - "L2 " - - "L2.?[800,863] 1.05us 100mb|-------------------------L2.?--------------------------| " - - "L2.?[864,899] 1.05us 57mb |------------L2.?-------------| " + - "L2.?[800,864] 1.05us 102mb|--------------------------L2.?--------------------------| " + - "L2.?[865,899] 1.05us 55mb |------------L2.?------------| " - "Committing partition 1:" - " Soft Deleting 3 files: L2.9, L1.116, L1.120" - " Creating 2 files" - "**** Final Output Files (4.05gb written)" - "L1 " - - "L1.117[900,986] 1.05us 50mb |L1.117| " + - "L1.117[900,986] 1.05us 49mb |L1.117| " - "L2 " - "L2.10[900,999] 999ns 100mb |L2.10-| " - - "L2.121[0,74] 1.05us 101mb|L2.121| " - - "L2.122[75,148] 1.05us 100mb |L2.122| " - - "L2.123[149,199] 1.05us 71mb |L2.123| " + - "L2.121[0,74] 1.05us 102mb|L2.121| " + - "L2.122[75,148] 1.05us 101mb |L2.122| " + - "L2.123[149,199] 1.05us 69mb |L2.123| " - "L2.124[200,267] 1.05us 101mb |L2.124| " - "L2.125[268,299] 1.05us 48mb |L2.125| " - - "L2.126[300,365] 1.05us 101mb |L2.126| " - - "L2.127[366,399] 1.05us 53mb |L2.127| " - - "L2.128[400,465] 1.05us 101mb |L2.128| " - - "L2.129[466,499] 1.05us 53mb |L2.129| " - - "L2.130[500,565] 1.05us 101mb |L2.130| " - - "L2.131[566,599] 1.05us 53mb |L2.131| " + - "L2.126[300,365] 1.05us 102mb |L2.126| " + - "L2.127[366,399] 1.05us 52mb |L2.127| " + - "L2.128[400,465] 1.05us 102mb |L2.128| " + - "L2.129[466,499] 1.05us 52mb |L2.129| " + - "L2.130[500,565] 1.05us 102mb |L2.130| " + - "L2.131[566,599] 1.05us 52mb |L2.131| " - "L2.132[600,665] 1.05us 102mb |L2.132| " - - "L2.133[666,699] 1.05us 53mb |L2.133| " - - "L2.134[700,764] 1.05us 101mb |L2.134| " + - "L2.133[666,699] 1.05us 52mb |L2.133| " + - "L2.134[700,764] 1.05us 102mb |L2.134| " - "L2.135[765,799] 1.05us 55mb |L2.135| " - - "L2.136[800,863] 1.05us 100mb |L2.136| " - - "L2.137[864,899] 1.05us 57mb |L2.137| " + - "L2.136[800,864] 1.05us 102mb |L2.136| " + - "L2.137[865,899] 1.05us 55mb |L2.137| " "### ); } @@ -2993,67 +3037,69 @@ async fn actual_case_from_catalog_1() { - "WARNING: file L0.161[327,333] 336ns 183mb exceeds soft limit 100mb by more than 50%" - "WARNING: file L0.162[330,338] 340ns 231mb exceeds soft limit 100mb by more than 50%" - "WARNING: file L0.163[331,338] 341ns 232mb exceeds soft limit 100mb by more than 50%" - - "**** Final Output Files (15.11gb written)" + - "**** Final Output Files (15.33gb written)" - "L2 " - - "L2.590[134,149] 342ns 202mb |L2.590| " - - "L2.591[150,165] 342ns 218mb |L2.591| " - - "L2.592[166,171] 342ns 118mb |L2.592| " - - "L2.595[183,197] 342ns 267mb |L2.595| " - - "L2.596[198,207] 342ns 157mb |L2.596| " - - "L2.597[208,220] 342ns 147mb |L2.597| " - - "L2.598[221,232] 342ns 270mb |L2.598| " - - "L2.599[233,244] 342ns 147mb |L2.599| " - - "L2.600[245,253] 342ns 139mb |L2.600| " - - "L2.601[254,261] 342ns 105mb |L2.601| " - - "L2.602[262,270] 342ns 184mb |L2.602| " - - "L2.603[271,276] 342ns 117mb |L2.603| " - - "L2.604[277,281] 342ns 109mb |L2.604| " - - "L2.607[297,299] 342ns 141mb |L2.607| " - - "L2.611[309,311] 342ns 101mb |L2.611|" - - "L2.612[312,314] 342ns 181mb |L2.612|" - - "L2.613[315,317] 342ns 214mb |L2.613|" - - "L2.616[318,320] 342ns 222mb |L2.616|" - - "L2.617[321,323] 342ns 146mb |L2.617|" - - "L2.618[324,326] 342ns 254mb |L2.618|" - - "L2.619[327,329] 342ns 197mb |L2.619|" - - "L2.620[330,332] 342ns 228mb |L2.620|" - - "L2.621[333,335] 342ns 199mb |L2.621|" - - "L2.622[336,337] 342ns 156mb |L2.622|" - - "L2.623[338,338] 342ns 124mb |L2.623|" - - "L2.624[1,36] 342ns 103mb |L2.624-| " - - "L2.625[37,71] 342ns 103mb |L2.625-| " - - "L2.626[72,83] 342ns 103mb |L2.626| " - - "L2.634[84,94] 342ns 107mb |L2.634| " - - "L2.635[95,104] 342ns 97mb |L2.635| " - - "L2.636[105,111] 342ns 86mb |L2.636| " - - "L2.637[172,177] 342ns 109mb |L2.637| " - - "L2.638[178,182] 342ns 109mb |L2.638| " - - "L2.639[112,119] 342ns 114mb |L2.639| " - - "L2.640[120,126] 342ns 98mb |L2.640| " - - "L2.641[127,130] 342ns 82mb |L2.641| " - - "L2.642[282,288] 342ns 100mb |L2.642| " - - "L2.645[131,132] 342ns 38mb |L2.645| " - - "L2.646[133,133] 342ns 38mb |L2.646| " - - "L2.647[289,295] 342ns 115mb |L2.647| " - - "L2.648[296,296] 342ns 19mb |L2.648| " - - "L2.649[300,303] 342ns 110mb |L2.649| " - - "L2.652[304,306] 342ns 113mb |L2.652| " - - "L2.653[307,308] 342ns 113mb |L2.653| " - - "L2.654[339,339] 342ns 25mb |L2.654|" - - "WARNING: file L2.590[134,149] 342ns 202mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.591[150,165] 342ns 218mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.595[183,197] 342ns 267mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.596[198,207] 342ns 157mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.598[221,232] 342ns 270mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.602[262,270] 342ns 184mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.612[312,314] 342ns 181mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.613[315,317] 342ns 214mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.616[318,320] 342ns 222mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.618[324,326] 342ns 254mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.619[327,329] 342ns 197mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.620[330,332] 342ns 228mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.621[333,335] 342ns 199mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.622[336,337] 342ns 156mb exceeds soft limit 100mb by more than 50%" + - "L2.598[150,165] 342ns 224mb |L2.598| " + - "L2.599[166,171] 342ns 108mb |L2.599| " + - "L2.602[183,197] 342ns 267mb |L2.602| " + - "L2.603[198,207] 342ns 157mb |L2.603| " + - "L2.604[208,220] 342ns 149mb |L2.604| " + - "L2.605[221,232] 342ns 279mb |L2.605| " + - "L2.606[233,244] 342ns 136mb |L2.606| " + - "L2.607[245,253] 342ns 144mb |L2.607| " + - "L2.608[254,261] 342ns 118mb |L2.608| " + - "L2.609[262,270] 342ns 167mb |L2.609| " + - "L2.614[297,299] 342ns 110mb |L2.614| " + - "L2.615[300,302] 342ns 125mb |L2.615| " + - "L2.616[303,305] 342ns 142mb |L2.616| " + - "L2.617[306,308] 342ns 155mb |L2.617| " + - "L2.618[271,276] 342ns 122mb |L2.618| " + - "L2.619[277,281] 342ns 112mb |L2.619| " + - "L2.620[309,311] 342ns 151mb |L2.620|" + - "L2.621[312,314] 342ns 157mb |L2.621|" + - "L2.622[315,317] 342ns 194mb |L2.622|" + - "L2.623[318,320] 342ns 215mb |L2.623|" + - "L2.624[321,323] 342ns 210mb |L2.624|" + - "L2.625[324,326] 342ns 194mb |L2.625|" + - "L2.626[327,329] 342ns 179mb |L2.626|" + - "L2.627[330,332] 342ns 235mb |L2.627|" + - "L2.628[333,335] 342ns 211mb |L2.628|" + - "L2.629[336,339] 342ns 187mb |L2.629|" + - "L2.630[1,33] 342ns 102mb |L2.630| " + - "L2.633[34,57] 342ns 104mb |L2.633| " + - "L2.644[125,130] 342ns 104mb |L2.644| " + - "L2.646[172,177] 342ns 114mb |L2.646| " + - "L2.650[58,74] 342ns 106mb |L2.650| " + - "L2.651[75,80] 342ns 38mb |L2.651| " + - "L2.652[81,91] 342ns 113mb |L2.652| " + - "L2.653[92,101] 342ns 102mb |L2.653| " + - "L2.654[102,106] 342ns 51mb |L2.654| " + - "L2.655[107,116] 342ns 116mb |L2.655| " + - "L2.656[117,124] 342ns 93mb |L2.656| " + - "L2.657[282,287] 342ns 114mb |L2.657| " + - "L2.660[131,139] 342ns 121mb |L2.660| " + - "L2.661[140,147] 342ns 108mb |L2.661| " + - "L2.662[148,149] 342ns 27mb |L2.662| " + - "L2.663[178,181] 342ns 76mb |L2.663| " + - "L2.664[182,182] 342ns 19mb |L2.664| " + - "L2.665[288,293] 342ns 114mb |L2.665| " + - "L2.666[294,296] 342ns 57mb |L2.666| " + - "WARNING: file L2.598[150,165] 342ns 224mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.602[183,197] 342ns 267mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.603[198,207] 342ns 157mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.605[221,232] 342ns 279mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.609[262,270] 342ns 167mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.617[306,308] 342ns 155mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.620[309,311] 342ns 151mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.621[312,314] 342ns 157mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.622[315,317] 342ns 194mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.623[318,320] 342ns 215mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.624[321,323] 342ns 210mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.625[324,326] 342ns 194mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.626[327,329] 342ns 179mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.627[330,332] 342ns 235mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.628[333,335] 342ns 211mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.629[336,339] 342ns 187mb exceeds soft limit 100mb by more than 50%" "### ); } diff --git a/compactor/tests/layouts/common_use_cases.rs b/compactor/tests/layouts/common_use_cases.rs index 1908df1c48..7be19a8003 100644 --- a/compactor/tests/layouts/common_use_cases.rs +++ b/compactor/tests/layouts/common_use_cases.rs @@ -81,30 +81,30 @@ async fn test_keep_ingesting_l0_files_20_percent_overlap() { - "L2 " - "L2.29[0,160] 19ns 80mb |L2.29| " - "L2.59[161,361] 39ns 100mb |L2.59| " - - "L2.89[362,562] 59ns 100mb |L2.89| " - - "L2.119[563,753] 79ns 95mb |L2.119| " - - "L2.149[754,954] 99ns 101mb |L2.149| " - - "L2.179[955,1155] 119ns 101mb |L2.179| " - - "L2.209[1156,1356] 139ns 101mb |L2.209| " - - "L2.239[1357,1557] 159ns 101mb |L2.239| " - - "L2.269[1558,1758] 179ns 101mb |L2.269| " - - "L2.299[1759,1958] 199ns 100mb |L2.299| " - - "L2.329[1959,2158] 219ns 100mb |L2.329| " - - "L2.359[2159,2358] 239ns 100mb |L2.359| " - - "L2.389[2359,2558] 259ns 100mb |L2.389| " - - "L2.419[2559,2758] 279ns 100mb |L2.419| " - - "L2.449[2759,2958] 299ns 100mb |L2.449| " - - "L2.479[2959,3158] 319ns 100mb |L2.479| " - - "L2.509[3159,3358] 339ns 100mb |L2.509| " - - "L2.539[3359,3558] 359ns 100mb |L2.539| " - - "L2.569[3559,3758] 379ns 100mb |L2.569| " - - "L2.599[3759,3958] 399ns 100mb |L2.599| " - - "L2.629[3959,4158] 419ns 100mb |L2.629| " - - "L2.659[4159,4358] 439ns 100mb |L2.659| " - - "L2.689[4359,4558] 459ns 100mb |L2.689| " - - "L2.719[4559,4758] 479ns 100mb |L2.719| " - - "L2.749[4759,4958] 499ns 100mb |L2.749|" - - "L2.750[4959,5001] 499ns 22mb |L2.750|" + - "L2.89[362,553] 59ns 96mb |L2.89| " + - "L2.119[554,754] 79ns 100mb |L2.119| " + - "L2.149[755,955] 99ns 100mb |L2.149| " + - "L2.179[956,1156] 119ns 100mb |L2.179| " + - "L2.209[1157,1357] 139ns 100mb |L2.209| " + - "L2.239[1358,1558] 159ns 100mb |L2.239| " + - "L2.269[1559,1759] 179ns 100mb |L2.269| " + - "L2.299[1760,1960] 199ns 101mb |L2.299| " + - "L2.329[1961,2161] 219ns 101mb |L2.329| " + - "L2.359[2162,2353] 239ns 96mb |L2.359| " + - "L2.389[2354,2554] 259ns 101mb |L2.389| " + - "L2.419[2555,2755] 279ns 101mb |L2.419| " + - "L2.449[2756,2956] 299ns 101mb |L2.449| " + - "L2.479[2957,3157] 319ns 101mb |L2.479| " + - "L2.509[3158,3358] 339ns 101mb |L2.509| " + - "L2.539[3359,3559] 359ns 101mb |L2.539| " + - "L2.569[3560,3760] 379ns 101mb |L2.569| " + - "L2.599[3761,3961] 399ns 101mb |L2.599| " + - "L2.629[3962,4153] 419ns 96mb |L2.629| " + - "L2.659[4154,4354] 439ns 101mb |L2.659| " + - "L2.689[4355,4555] 459ns 101mb |L2.689| " + - "L2.719[4556,4756] 479ns 101mb |L2.719| " + - "L2.749[4757,4957] 499ns 101mb |L2.749|" + - "L2.750[4958,5001] 499ns 22mb |L2.750|" "### ); } @@ -178,31 +178,31 @@ async fn test_keep_ingesting_l0_files_40_percent_overlap() { - "L0.753[5020,5034] 502ns 5mb |L0.753|" - "L2 " - "L2.29[0,163] 19ns 80mb |L2.29| " - - "L2.59[164,364] 39ns 100mb |L2.59| " - - "L2.89[365,565] 59ns 100mb |L2.89| " - - "L2.119[566,756] 79ns 95mb |L2.119| " - - "L2.149[757,957] 99ns 101mb |L2.149| " - - "L2.179[958,1158] 119ns 101mb |L2.179| " - - "L2.209[1159,1359] 139ns 101mb |L2.209| " - - "L2.239[1360,1560] 159ns 101mb |L2.239| " - - "L2.269[1561,1761] 179ns 101mb |L2.269| " - - "L2.299[1762,1962] 199ns 101mb |L2.299| " - - "L2.329[1963,2162] 219ns 100mb |L2.329| " - - "L2.359[2163,2362] 239ns 100mb |L2.359| " - - "L2.389[2363,2562] 259ns 100mb |L2.389| " - - "L2.419[2563,2762] 279ns 100mb |L2.419| " - - "L2.449[2763,2962] 299ns 100mb |L2.449| " - - "L2.479[2963,3162] 319ns 100mb |L2.479| " - - "L2.509[3163,3362] 339ns 100mb |L2.509| " - - "L2.539[3363,3562] 359ns 100mb |L2.539| " - - "L2.569[3563,3762] 379ns 100mb |L2.569| " - - "L2.599[3763,3962] 399ns 100mb |L2.599| " - - "L2.629[3963,4162] 419ns 100mb |L2.629| " - - "L2.659[4163,4362] 439ns 100mb |L2.659| " - - "L2.689[4363,4562] 459ns 100mb |L2.689| " - - "L2.719[4563,4762] 479ns 100mb |L2.719| " - - "L2.749[4763,4962] 499ns 100mb |L2.749|" - - "L2.750[4963,5004] 499ns 21mb |L2.750|" + - "L2.59[164,356] 39ns 96mb |L2.59| " + - "L2.89[357,557] 59ns 100mb |L2.89| " + - "L2.119[558,758] 79ns 100mb |L2.119| " + - "L2.149[759,959] 99ns 100mb |L2.149| " + - "L2.179[960,1160] 119ns 100mb |L2.179| " + - "L2.209[1161,1361] 139ns 100mb |L2.209| " + - "L2.239[1362,1562] 159ns 100mb |L2.239| " + - "L2.269[1563,1763] 179ns 100mb |L2.269| " + - "L2.299[1764,1964] 199ns 101mb |L2.299| " + - "L2.329[1965,2156] 219ns 96mb |L2.329| " + - "L2.359[2157,2357] 239ns 101mb |L2.359| " + - "L2.389[2358,2558] 259ns 101mb |L2.389| " + - "L2.419[2559,2759] 279ns 101mb |L2.419| " + - "L2.449[2760,2960] 299ns 101mb |L2.449| " + - "L2.479[2961,3161] 319ns 101mb |L2.479| " + - "L2.509[3162,3362] 339ns 101mb |L2.509| " + - "L2.539[3363,3563] 359ns 101mb |L2.539| " + - "L2.569[3564,3764] 379ns 101mb |L2.569| " + - "L2.599[3765,3956] 399ns 96mb |L2.599| " + - "L2.629[3957,4157] 419ns 101mb |L2.629| " + - "L2.659[4158,4358] 439ns 101mb |L2.659| " + - "L2.689[4359,4559] 459ns 101mb |L2.689| " + - "L2.719[4560,4760] 479ns 101mb |L2.719| " + - "L2.749[4761,4961] 499ns 101mb |L2.749|" + - "L2.750[4962,5004] 499ns 22mb |L2.750|" "### ); } @@ -288,13 +288,13 @@ async fn test_keep_ingesting_l0_files_40_percent_overlap_l1_left() { - "L0.215[1440,1454] 144ns 5mb |L0.215|" - "L2 " - "L2.29[0,163] 19ns 80mb |-L2.29--| " - - "L2.59[164,364] 39ns 100mb |--L2.59---| " - - "L2.89[365,565] 59ns 100mb |--L2.89---| " - - "L2.119[566,756] 79ns 95mb |-L2.119--| " - - "L2.149[757,957] 99ns 101mb |--L2.149--| " - - "L2.179[958,1158] 119ns 101mb |--L2.179--| " - - "L2.209[1159,1359] 139ns 101mb |--L2.209--| " - - "L2.210[1360,1404] 139ns 23mb |L2.210|" + - "L2.59[164,356] 39ns 96mb |--L2.59--| " + - "L2.89[357,557] 59ns 100mb |--L2.89---| " + - "L2.119[558,758] 79ns 100mb |--L2.119--| " + - "L2.149[759,959] 99ns 100mb |--L2.149--| " + - "L2.179[960,1160] 119ns 100mb |--L2.179--| " + - "L2.209[1161,1361] 139ns 100mb |--L2.209--| " + - "L2.210[1362,1404] 139ns 21mb |L2.210|" - "**** Simulation run 35, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[1443]). 5 Input Files, 25mb total:" - "L0, all files 5mb " - "L0.215[1440,1454] 144ns |-------L0.215--------| " @@ -315,13 +315,13 @@ async fn test_keep_ingesting_l0_files_40_percent_overlap_l1_left() { - "L1.217[1444,1454] 144ns 5mb |L1.217|" - "L2 " - "L2.29[0,163] 19ns 80mb |-L2.29--| " - - "L2.59[164,364] 39ns 100mb |--L2.59---| " - - "L2.89[365,565] 59ns 100mb |--L2.89---| " - - "L2.119[566,756] 79ns 95mb |-L2.119--| " - - "L2.149[757,957] 99ns 101mb |--L2.149--| " - - "L2.179[958,1158] 119ns 101mb |--L2.179--| " - - "L2.209[1159,1359] 139ns 101mb |--L2.209--| " - - "L2.210[1360,1404] 139ns 23mb |L2.210|" + - "L2.59[164,356] 39ns 96mb |--L2.59--| " + - "L2.89[357,557] 59ns 100mb |--L2.89---| " + - "L2.119[558,758] 79ns 100mb |--L2.119--| " + - "L2.149[759,959] 99ns 100mb |--L2.149--| " + - "L2.179[960,1160] 119ns 100mb |--L2.179--| " + - "L2.209[1161,1361] 139ns 100mb |--L2.209--| " + - "L2.210[1362,1404] 139ns 21mb |L2.210|" "### ); } else if i == show_intermediate_result_runs[2] { @@ -341,26 +341,26 @@ async fn test_keep_ingesting_l0_files_40_percent_overlap_l1_left() { - "L1.337[2244,2254] 224ns 5mb |L1.337|" - "L2 " - "L2.29[0,163] 19ns 80mb |L2.29| " - - "L2.59[164,364] 39ns 100mb |L2.59| " - - "L2.89[365,565] 59ns 100mb |L2.89| " - - "L2.119[566,756] 79ns 95mb |L2.119| " - - "L2.149[757,957] 99ns 101mb |L2.149| " - - "L2.179[958,1158] 119ns 101mb |L2.179| " - - "L2.209[1159,1359] 139ns 101mb |L2.209| " - - "L2.239[1360,1560] 159ns 101mb |L2.239| " - - "L2.269[1561,1761] 179ns 101mb |L2.269| " - - "L2.299[1762,1962] 199ns 101mb |L2.299| " - - "L2.329[1963,2162] 219ns 100mb |L2.329| " - - "L2.330[2163,2204] 219ns 21mb |L2.330|" + - "L2.59[164,356] 39ns 96mb |L2.59| " + - "L2.89[357,557] 59ns 100mb |L2.89| " + - "L2.119[558,758] 79ns 100mb |L2.119| " + - "L2.149[759,959] 99ns 100mb |L2.149| " + - "L2.179[960,1160] 119ns 100mb |L2.179| " + - "L2.209[1161,1361] 139ns 100mb |L2.209| " + - "L2.239[1362,1562] 159ns 100mb |L2.239| " + - "L2.269[1563,1763] 179ns 100mb |L2.269| " + - "L2.299[1764,1964] 199ns 101mb |L2.299| " + - "L2.329[1965,2156] 219ns 96mb |L2.329| " + - "L2.330[2157,2204] 219ns 24mb |L2.330|" - "**** Simulation run 56, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[2292]). 6 Input Files, 30mb total:" - - "L0 " - - "L0.342[2290,2304] 229ns 5mb |------L0.342-------|" - - "L0.341[2280,2294] 228ns 5mb |------L0.341-------| " - - "L0.340[2270,2284] 227ns 5mb |------L0.340-------| " - - "L0.339[2260,2274] 226ns 5mb |------L0.339-------| " - - "L0.338[2250,2264] 225ns 5mb |------L0.338-------| " - - "L1 " - - "L1.337[2244,2254] 224ns 5mb|---L1.337----| " + - "L0, all files 5mb " + - "L0.342[2290,2304] 229ns |------L0.342-------|" + - "L0.341[2280,2294] 228ns |------L0.341-------| " + - "L0.340[2270,2284] 227ns |------L0.340-------| " + - "L0.339[2260,2274] 226ns |------L0.339-------| " + - "L0.338[2250,2264] 225ns |------L0.338-------| " + - "L1, all files 5mb " + - "L1.337[2244,2254] 224ns |---L1.337----| " - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L1 " - "L1.?[2244,2292] 229ns 24mb|---------------------------------L1.?---------------------------------| " @@ -368,24 +368,24 @@ async fn test_keep_ingesting_l0_files_40_percent_overlap_l1_left() { - "Committing partition 1:" - " Soft Deleting 6 files: L1.337, L0.338, L0.339, L0.340, L0.341, L0.342" - " Creating 2 files" - - "**** Final Output Files (2.61gb written)" + - "**** Final Output Files (2.6gb written)" - "L1 " - "L1.336[2200,2243] 224ns 20mb |L1.336|" - "L1.343[2244,2292] 229ns 24mb |L1.343|" - "L1.344[2293,2304] 229ns 6mb |L1.344|" - "L2 " - "L2.29[0,163] 19ns 80mb |L2.29| " - - "L2.59[164,364] 39ns 100mb |L2.59| " - - "L2.89[365,565] 59ns 100mb |L2.89| " - - "L2.119[566,756] 79ns 95mb |L2.119| " - - "L2.149[757,957] 99ns 101mb |L2.149| " - - "L2.179[958,1158] 119ns 101mb |L2.179| " - - "L2.209[1159,1359] 139ns 101mb |L2.209| " - - "L2.239[1360,1560] 159ns 101mb |L2.239| " - - "L2.269[1561,1761] 179ns 101mb |L2.269| " - - "L2.299[1762,1962] 199ns 101mb |L2.299| " - - "L2.329[1963,2162] 219ns 100mb |L2.329| " - - "L2.330[2163,2204] 219ns 21mb |L2.330|" + - "L2.59[164,356] 39ns 96mb |L2.59| " + - "L2.89[357,557] 59ns 100mb |L2.89| " + - "L2.119[558,758] 79ns 100mb |L2.119| " + - "L2.149[759,959] 99ns 100mb |L2.149| " + - "L2.179[960,1160] 119ns 100mb |L2.179| " + - "L2.209[1161,1361] 139ns 100mb |L2.209| " + - "L2.239[1362,1562] 159ns 100mb |L2.239| " + - "L2.269[1563,1763] 179ns 100mb |L2.269| " + - "L2.299[1764,1964] 199ns 101mb |L2.299| " + - "L2.329[1965,2156] 219ns 96mb |L2.329| " + - "L2.330[2157,2204] 219ns 24mb |L2.330|" "### ); } else if i == show_intermediate_result_runs[3] { @@ -404,26 +404,26 @@ async fn test_keep_ingesting_l0_files_40_percent_overlap_l1_left() { - "L1.486[3200,3243] 324ns 20mb |L1.486|" - "L1.493[3244,3292] 329ns 24mb |L1.493|" - "L1.500[3293,3341] 334ns 24mb |L1.500|" - - "L1.501[3342,3354] 334ns 7mb |L1.501|" + - "L1.501[3342,3354] 334ns 6mb |L1.501|" - "L2 " - "L2.29[0,163] 19ns 80mb |L2.29| " - - "L2.59[164,364] 39ns 100mb |L2.59| " - - "L2.89[365,565] 59ns 100mb |L2.89| " - - "L2.119[566,756] 79ns 95mb |L2.119| " - - "L2.149[757,957] 99ns 101mb |L2.149| " - - "L2.179[958,1158] 119ns 101mb |L2.179| " - - "L2.209[1159,1359] 139ns 101mb |L2.209| " - - "L2.239[1360,1560] 159ns 101mb |L2.239| " - - "L2.269[1561,1761] 179ns 101mb |L2.269| " - - "L2.299[1762,1962] 199ns 101mb |L2.299| " - - "L2.329[1963,2162] 219ns 100mb |L2.329| " - - "L2.359[2163,2362] 239ns 100mb |L2.359| " - - "L2.389[2363,2562] 259ns 100mb |L2.389| " - - "L2.419[2563,2762] 279ns 100mb |L2.419| " - - "L2.449[2763,2962] 299ns 100mb |L2.449| " - - "L2.479[2963,3162] 319ns 100mb |L2.479| " - - "L2.480[3163,3204] 319ns 21mb |L2.480|" - - "**** Simulation run 83, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[3391]). 6 Input Files, 32mb total:" + - "L2.59[164,356] 39ns 96mb |L2.59| " + - "L2.89[357,557] 59ns 100mb |L2.89| " + - "L2.119[558,758] 79ns 100mb |L2.119| " + - "L2.149[759,959] 99ns 100mb |L2.149| " + - "L2.179[960,1160] 119ns 100mb |L2.179| " + - "L2.209[1161,1361] 139ns 100mb |L2.209| " + - "L2.239[1362,1562] 159ns 100mb |L2.239| " + - "L2.269[1563,1763] 179ns 100mb |L2.269| " + - "L2.299[1764,1964] 199ns 101mb |L2.299| " + - "L2.329[1965,2156] 219ns 96mb |L2.329| " + - "L2.359[2157,2357] 239ns 101mb |L2.359| " + - "L2.389[2358,2558] 259ns 101mb |L2.389| " + - "L2.419[2559,2759] 279ns 101mb |L2.419| " + - "L2.449[2760,2960] 299ns 101mb |L2.449| " + - "L2.479[2961,3161] 319ns 101mb |L2.479| " + - "L2.480[3162,3204] 319ns 22mb |L2.480|" + - "**** Simulation run 83, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[3391]). 6 Input Files, 31mb total:" - "L0 " - "L0.506[3390,3404] 339ns 5mb |------L0.506------| " - "L0.505[3380,3394] 338ns 5mb |------L0.505------| " @@ -431,26 +431,26 @@ async fn test_keep_ingesting_l0_files_40_percent_overlap_l1_left() { - "L0.503[3360,3374] 336ns 5mb |------L0.503------| " - "L0.502[3350,3364] 335ns 5mb |------L0.502------| " - "L1 " - - "L1.501[3342,3354] 334ns 7mb|----L1.501-----| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 32mb total:" + - "L1.501[3342,3354] 334ns 6mb|----L1.501-----| " + - "**** 2 Output Files (parquet_file_id not yet assigned), 31mb total:" - "L1 " - "L1.?[3342,3391] 339ns 25mb|--------------------------------L1.?---------------------------------| " - - "L1.?[3392,3404] 339ns 7mb |-----L1.?------| " + - "L1.?[3392,3404] 339ns 6mb |-----L1.?------| " - "Committing partition 1:" - " Soft Deleting 6 files: L1.501, L0.502, L0.503, L0.504, L0.505, L0.506" - " Creating 2 files" - - "**** Simulation run 84, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[3362]). 6 Input Files, 121mb total:" + - "**** Simulation run 84, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[3362]). 6 Input Files, 122mb total:" - "L1 " - "L1.493[3244,3292] 329ns 24mb |----L1.493-----| " - - "L1.486[3200,3243] 324ns 20mb |----L1.486----| " + - "L1.486[3200,3243] 324ns 20mb |---L1.486----| " - "L1.500[3293,3341] 334ns 24mb |----L1.500-----| " - - "L1.508[3392,3404] 339ns 7mb |L1.508|" + - "L1.508[3392,3404] 339ns 6mb |L1.508|" - "L1.507[3342,3391] 339ns 25mb |-----L1.507-----| " - "L2 " - - "L2.480[3163,3204] 319ns 21mb|---L2.480----| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 121mb total:" + - "L2.480[3162,3204] 319ns 22mb|---L2.480----| " + - "**** 2 Output Files (parquet_file_id not yet assigned), 122mb total:" - "L2 " - - "L2.?[3163,3362] 339ns 100mb|----------------------------------L2.?----------------------------------| " + - "L2.?[3162,3362] 339ns 101mb|----------------------------------L2.?----------------------------------| " - "L2.?[3363,3404] 339ns 21mb |----L2.?-----| " - "Committing partition 1:" - " Soft Deleting 6 files: L2.480, L1.486, L1.493, L1.500, L1.507, L1.508" @@ -458,22 +458,22 @@ async fn test_keep_ingesting_l0_files_40_percent_overlap_l1_left() { - "**** Final Output Files (3.95gb written)" - "L2 " - "L2.29[0,163] 19ns 80mb |L2.29| " - - "L2.59[164,364] 39ns 100mb |L2.59| " - - "L2.89[365,565] 59ns 100mb |L2.89| " - - "L2.119[566,756] 79ns 95mb |L2.119| " - - "L2.149[757,957] 99ns 101mb |L2.149| " - - "L2.179[958,1158] 119ns 101mb |L2.179| " - - "L2.209[1159,1359] 139ns 101mb |L2.209| " - - "L2.239[1360,1560] 159ns 101mb |L2.239| " - - "L2.269[1561,1761] 179ns 101mb |L2.269| " - - "L2.299[1762,1962] 199ns 101mb |L2.299| " - - "L2.329[1963,2162] 219ns 100mb |L2.329| " - - "L2.359[2163,2362] 239ns 100mb |L2.359| " - - "L2.389[2363,2562] 259ns 100mb |L2.389| " - - "L2.419[2563,2762] 279ns 100mb |L2.419| " - - "L2.449[2763,2962] 299ns 100mb |L2.449| " - - "L2.479[2963,3162] 319ns 100mb |L2.479| " - - "L2.509[3163,3362] 339ns 100mb |L2.509|" + - "L2.59[164,356] 39ns 96mb |L2.59| " + - "L2.89[357,557] 59ns 100mb |L2.89| " + - "L2.119[558,758] 79ns 100mb |L2.119| " + - "L2.149[759,959] 99ns 100mb |L2.149| " + - "L2.179[960,1160] 119ns 100mb |L2.179| " + - "L2.209[1161,1361] 139ns 100mb |L2.209| " + - "L2.239[1362,1562] 159ns 100mb |L2.239| " + - "L2.269[1563,1763] 179ns 100mb |L2.269| " + - "L2.299[1764,1964] 199ns 101mb |L2.299| " + - "L2.329[1965,2156] 219ns 96mb |L2.329| " + - "L2.359[2157,2357] 239ns 101mb |L2.359| " + - "L2.389[2358,2558] 259ns 101mb |L2.389| " + - "L2.419[2559,2759] 279ns 101mb |L2.419| " + - "L2.449[2760,2960] 299ns 101mb |L2.449| " + - "L2.479[2961,3161] 319ns 101mb |L2.479| " + - "L2.509[3162,3362] 339ns 101mb |L2.509|" - "L2.510[3363,3404] 339ns 21mb |L2.510|" "### ); @@ -494,37 +494,37 @@ async fn test_keep_ingesting_l0_files_40_percent_overlap_l1_left() { - "L1.667[4444,4454] 444ns 5mb |L1.667|" - "L2 " - "L2.29[0,163] 19ns 80mb |L2.29| " - - "L2.59[164,364] 39ns 100mb |L2.59| " - - "L2.89[365,565] 59ns 100mb |L2.89| " - - "L2.119[566,756] 79ns 95mb |L2.119| " - - "L2.149[757,957] 99ns 101mb |L2.149| " - - "L2.179[958,1158] 119ns 101mb |L2.179| " - - "L2.209[1159,1359] 139ns 101mb |L2.209| " - - "L2.239[1360,1560] 159ns 101mb |L2.239| " - - "L2.269[1561,1761] 179ns 101mb |L2.269| " - - "L2.299[1762,1962] 199ns 101mb |L2.299| " - - "L2.329[1963,2162] 219ns 100mb |L2.329| " - - "L2.359[2163,2362] 239ns 100mb |L2.359| " - - "L2.389[2363,2562] 259ns 100mb |L2.389| " - - "L2.419[2563,2762] 279ns 100mb |L2.419| " - - "L2.449[2763,2962] 299ns 100mb |L2.449| " - - "L2.479[2963,3162] 319ns 100mb |L2.479| " - - "L2.509[3163,3362] 339ns 100mb |L2.509| " - - "L2.539[3363,3562] 359ns 100mb |L2.539| " - - "L2.569[3563,3762] 379ns 100mb |L2.569| " - - "L2.599[3763,3962] 399ns 100mb |L2.599| " - - "L2.629[3963,4162] 419ns 100mb |L2.629| " - - "L2.659[4163,4362] 439ns 100mb |L2.659|" - - "L2.660[4363,4404] 439ns 21mb |L2.660|" + - "L2.59[164,356] 39ns 96mb |L2.59| " + - "L2.89[357,557] 59ns 100mb |L2.89| " + - "L2.119[558,758] 79ns 100mb |L2.119| " + - "L2.149[759,959] 99ns 100mb |L2.149| " + - "L2.179[960,1160] 119ns 100mb |L2.179| " + - "L2.209[1161,1361] 139ns 100mb |L2.209| " + - "L2.239[1362,1562] 159ns 100mb |L2.239| " + - "L2.269[1563,1763] 179ns 100mb |L2.269| " + - "L2.299[1764,1964] 199ns 101mb |L2.299| " + - "L2.329[1965,2156] 219ns 96mb |L2.329| " + - "L2.359[2157,2357] 239ns 101mb |L2.359| " + - "L2.389[2358,2558] 259ns 101mb |L2.389| " + - "L2.419[2559,2759] 279ns 101mb |L2.419| " + - "L2.449[2760,2960] 299ns 101mb |L2.449| " + - "L2.479[2961,3161] 319ns 101mb |L2.479| " + - "L2.509[3162,3362] 339ns 101mb |L2.509| " + - "L2.539[3363,3563] 359ns 101mb |L2.539| " + - "L2.569[3564,3764] 379ns 101mb |L2.569| " + - "L2.599[3765,3956] 399ns 96mb |L2.599| " + - "L2.629[3957,4157] 419ns 101mb |L2.629| " + - "L2.659[4158,4358] 439ns 101mb |L2.659|" + - "L2.660[4359,4404] 439ns 23mb |L2.660|" - "**** Simulation run 111, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[4492]). 6 Input Files, 30mb total:" - - "L0 " - - "L0.672[4490,4504] 449ns 5mb |------L0.672-------|" - - "L0.671[4480,4494] 448ns 5mb |------L0.671-------| " - - "L0.670[4470,4484] 447ns 5mb |------L0.670-------| " - - "L0.669[4460,4474] 446ns 5mb |------L0.669-------| " - - "L0.668[4450,4464] 445ns 5mb |------L0.668-------| " - - "L1 " - - "L1.667[4444,4454] 444ns 5mb|---L1.667----| " + - "L0, all files 5mb " + - "L0.672[4490,4504] 449ns |------L0.672-------|" + - "L0.671[4480,4494] 448ns |------L0.671-------| " + - "L0.670[4470,4484] 447ns |------L0.670-------| " + - "L0.669[4460,4474] 446ns |------L0.669-------| " + - "L0.668[4450,4464] 445ns |------L0.668-------| " + - "L1, all files 5mb " + - "L1.667[4444,4454] 444ns |---L1.667----| " - "**** 2 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L1 " - "L1.?[4444,4492] 449ns 24mb|---------------------------------L1.?---------------------------------| " @@ -532,35 +532,35 @@ async fn test_keep_ingesting_l0_files_40_percent_overlap_l1_left() { - "Committing partition 1:" - " Soft Deleting 6 files: L1.667, L0.668, L0.669, L0.670, L0.671, L0.672" - " Creating 2 files" - - "**** Final Output Files (5.17gb written)" + - "**** Final Output Files (5.18gb written)" - "L1 " - "L1.666[4400,4443] 444ns 20mb |L1.666|" - "L1.673[4444,4492] 449ns 24mb |L1.673|" - "L1.674[4493,4504] 449ns 6mb |L1.674|" - "L2 " - "L2.29[0,163] 19ns 80mb |L2.29| " - - "L2.59[164,364] 39ns 100mb |L2.59| " - - "L2.89[365,565] 59ns 100mb |L2.89| " - - "L2.119[566,756] 79ns 95mb |L2.119| " - - "L2.149[757,957] 99ns 101mb |L2.149| " - - "L2.179[958,1158] 119ns 101mb |L2.179| " - - "L2.209[1159,1359] 139ns 101mb |L2.209| " - - "L2.239[1360,1560] 159ns 101mb |L2.239| " - - "L2.269[1561,1761] 179ns 101mb |L2.269| " - - "L2.299[1762,1962] 199ns 101mb |L2.299| " - - "L2.329[1963,2162] 219ns 100mb |L2.329| " - - "L2.359[2163,2362] 239ns 100mb |L2.359| " - - "L2.389[2363,2562] 259ns 100mb |L2.389| " - - "L2.419[2563,2762] 279ns 100mb |L2.419| " - - "L2.449[2763,2962] 299ns 100mb |L2.449| " - - "L2.479[2963,3162] 319ns 100mb |L2.479| " - - "L2.509[3163,3362] 339ns 100mb |L2.509| " - - "L2.539[3363,3562] 359ns 100mb |L2.539| " - - "L2.569[3563,3762] 379ns 100mb |L2.569| " - - "L2.599[3763,3962] 399ns 100mb |L2.599| " - - "L2.629[3963,4162] 419ns 100mb |L2.629| " - - "L2.659[4163,4362] 439ns 100mb |L2.659|" - - "L2.660[4363,4404] 439ns 21mb |L2.660|" + - "L2.59[164,356] 39ns 96mb |L2.59| " + - "L2.89[357,557] 59ns 100mb |L2.89| " + - "L2.119[558,758] 79ns 100mb |L2.119| " + - "L2.149[759,959] 99ns 100mb |L2.149| " + - "L2.179[960,1160] 119ns 100mb |L2.179| " + - "L2.209[1161,1361] 139ns 100mb |L2.209| " + - "L2.239[1362,1562] 159ns 100mb |L2.239| " + - "L2.269[1563,1763] 179ns 100mb |L2.269| " + - "L2.299[1764,1964] 199ns 101mb |L2.299| " + - "L2.329[1965,2156] 219ns 96mb |L2.329| " + - "L2.359[2157,2357] 239ns 101mb |L2.359| " + - "L2.389[2358,2558] 259ns 101mb |L2.389| " + - "L2.419[2559,2759] 279ns 101mb |L2.419| " + - "L2.449[2760,2960] 299ns 101mb |L2.449| " + - "L2.479[2961,3161] 319ns 101mb |L2.479| " + - "L2.509[3162,3362] 339ns 101mb |L2.509| " + - "L2.539[3363,3563] 359ns 101mb |L2.539| " + - "L2.569[3564,3764] 379ns 101mb |L2.569| " + - "L2.599[3765,3956] 399ns 96mb |L2.599| " + - "L2.629[3957,4157] 419ns 101mb |L2.629| " + - "L2.659[4158,4358] 439ns 101mb |L2.659|" + - "L2.660[4359,4404] 439ns 23mb |L2.660|" "### ); } else if i == show_intermediate_result_runs[5] { @@ -579,34 +579,34 @@ async fn test_keep_ingesting_l0_files_40_percent_overlap_l1_left() { - "L1.726[4800,4843] 484ns 20mb |L1.726|" - "L1.733[4844,4892] 489ns 24mb |L1.733|" - "L1.740[4893,4941] 494ns 24mb |L1.740|" - - "L1.741[4942,4954] 494ns 7mb |L1.741|" + - "L1.741[4942,4954] 494ns 6mb |L1.741|" - "L2 " - "L2.29[0,163] 19ns 80mb |L2.29| " - - "L2.59[164,364] 39ns 100mb |L2.59| " - - "L2.89[365,565] 59ns 100mb |L2.89| " - - "L2.119[566,756] 79ns 95mb |L2.119| " - - "L2.149[757,957] 99ns 101mb |L2.149| " - - "L2.179[958,1158] 119ns 101mb |L2.179| " - - "L2.209[1159,1359] 139ns 101mb |L2.209| " - - "L2.239[1360,1560] 159ns 101mb |L2.239| " - - "L2.269[1561,1761] 179ns 101mb |L2.269| " - - "L2.299[1762,1962] 199ns 101mb |L2.299| " - - "L2.329[1963,2162] 219ns 100mb |L2.329| " - - "L2.359[2163,2362] 239ns 100mb |L2.359| " - - "L2.389[2363,2562] 259ns 100mb |L2.389| " - - "L2.419[2563,2762] 279ns 100mb |L2.419| " - - "L2.449[2763,2962] 299ns 100mb |L2.449| " - - "L2.479[2963,3162] 319ns 100mb |L2.479| " - - "L2.509[3163,3362] 339ns 100mb |L2.509| " - - "L2.539[3363,3562] 359ns 100mb |L2.539| " - - "L2.569[3563,3762] 379ns 100mb |L2.569| " - - "L2.599[3763,3962] 399ns 100mb |L2.599| " - - "L2.629[3963,4162] 419ns 100mb |L2.629| " - - "L2.659[4163,4362] 439ns 100mb |L2.659| " - - "L2.689[4363,4562] 459ns 100mb |L2.689| " - - "L2.719[4563,4762] 479ns 100mb |L2.719|" - - "L2.720[4763,4804] 479ns 21mb |L2.720|" - - "**** Simulation run 123, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[4991]). 6 Input Files, 32mb total:" + - "L2.59[164,356] 39ns 96mb |L2.59| " + - "L2.89[357,557] 59ns 100mb |L2.89| " + - "L2.119[558,758] 79ns 100mb |L2.119| " + - "L2.149[759,959] 99ns 100mb |L2.149| " + - "L2.179[960,1160] 119ns 100mb |L2.179| " + - "L2.209[1161,1361] 139ns 100mb |L2.209| " + - "L2.239[1362,1562] 159ns 100mb |L2.239| " + - "L2.269[1563,1763] 179ns 100mb |L2.269| " + - "L2.299[1764,1964] 199ns 101mb |L2.299| " + - "L2.329[1965,2156] 219ns 96mb |L2.329| " + - "L2.359[2157,2357] 239ns 101mb |L2.359| " + - "L2.389[2358,2558] 259ns 101mb |L2.389| " + - "L2.419[2559,2759] 279ns 101mb |L2.419| " + - "L2.449[2760,2960] 299ns 101mb |L2.449| " + - "L2.479[2961,3161] 319ns 101mb |L2.479| " + - "L2.509[3162,3362] 339ns 101mb |L2.509| " + - "L2.539[3363,3563] 359ns 101mb |L2.539| " + - "L2.569[3564,3764] 379ns 101mb |L2.569| " + - "L2.599[3765,3956] 399ns 96mb |L2.599| " + - "L2.629[3957,4157] 419ns 101mb |L2.629| " + - "L2.659[4158,4358] 439ns 101mb |L2.659| " + - "L2.689[4359,4559] 459ns 101mb |L2.689| " + - "L2.719[4560,4760] 479ns 101mb |L2.719|" + - "L2.720[4761,4804] 479ns 22mb |L2.720|" + - "**** Simulation run 123, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[4991]). 6 Input Files, 31mb total:" - "L0 " - "L0.746[4990,5004] 499ns 5mb |------L0.746------| " - "L0.745[4980,4994] 498ns 5mb |------L0.745------| " @@ -614,58 +614,58 @@ async fn test_keep_ingesting_l0_files_40_percent_overlap_l1_left() { - "L0.743[4960,4974] 496ns 5mb |------L0.743------| " - "L0.742[4950,4964] 495ns 5mb |------L0.742------| " - "L1 " - - "L1.741[4942,4954] 494ns 7mb|----L1.741-----| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 32mb total:" + - "L1.741[4942,4954] 494ns 6mb|----L1.741-----| " + - "**** 2 Output Files (parquet_file_id not yet assigned), 31mb total:" - "L1 " - "L1.?[4942,4991] 499ns 25mb|--------------------------------L1.?---------------------------------| " - - "L1.?[4992,5004] 499ns 7mb |-----L1.?------| " + - "L1.?[4992,5004] 499ns 6mb |-----L1.?------| " - "Committing partition 1:" - " Soft Deleting 6 files: L1.741, L0.742, L0.743, L0.744, L0.745, L0.746" - " Creating 2 files" - - "**** Simulation run 124, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[4962]). 6 Input Files, 121mb total:" + - "**** Simulation run 124, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[4961]). 6 Input Files, 122mb total:" - "L1 " - "L1.733[4844,4892] 489ns 24mb |----L1.733-----| " - - "L1.726[4800,4843] 484ns 20mb |----L1.726----| " + - "L1.726[4800,4843] 484ns 20mb |---L1.726----| " - "L1.740[4893,4941] 494ns 24mb |----L1.740-----| " - - "L1.748[4992,5004] 499ns 7mb |L1.748|" - - "L1.747[4942,4991] 499ns 25mb |-----L1.747-----| " + - "L1.748[4992,5004] 499ns 6mb |L1.748|" + - "L1.747[4942,4991] 499ns 25mb |-----L1.747-----| " - "L2 " - - "L2.720[4763,4804] 479ns 21mb|---L2.720----| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 121mb total:" + - "L2.720[4761,4804] 479ns 22mb|---L2.720----| " + - "**** 2 Output Files (parquet_file_id not yet assigned), 122mb total:" - "L2 " - - "L2.?[4763,4962] 499ns 100mb|----------------------------------L2.?----------------------------------| " - - "L2.?[4963,5004] 499ns 21mb |----L2.?-----| " + - "L2.?[4761,4961] 499ns 101mb|----------------------------------L2.?----------------------------------| " + - "L2.?[4962,5004] 499ns 22mb |----L2.?-----| " - "Committing partition 1:" - " Soft Deleting 6 files: L2.720, L1.726, L1.733, L1.740, L1.747, L1.748" - " Creating 2 files" - - "**** Final Output Files (5.82gb written)" + - "**** Final Output Files (5.83gb written)" - "L2 " - "L2.29[0,163] 19ns 80mb |L2.29| " - - "L2.59[164,364] 39ns 100mb |L2.59| " - - "L2.89[365,565] 59ns 100mb |L2.89| " - - "L2.119[566,756] 79ns 95mb |L2.119| " - - "L2.149[757,957] 99ns 101mb |L2.149| " - - "L2.179[958,1158] 119ns 101mb |L2.179| " - - "L2.209[1159,1359] 139ns 101mb |L2.209| " - - "L2.239[1360,1560] 159ns 101mb |L2.239| " - - "L2.269[1561,1761] 179ns 101mb |L2.269| " - - "L2.299[1762,1962] 199ns 101mb |L2.299| " - - "L2.329[1963,2162] 219ns 100mb |L2.329| " - - "L2.359[2163,2362] 239ns 100mb |L2.359| " - - "L2.389[2363,2562] 259ns 100mb |L2.389| " - - "L2.419[2563,2762] 279ns 100mb |L2.419| " - - "L2.449[2763,2962] 299ns 100mb |L2.449| " - - "L2.479[2963,3162] 319ns 100mb |L2.479| " - - "L2.509[3163,3362] 339ns 100mb |L2.509| " - - "L2.539[3363,3562] 359ns 100mb |L2.539| " - - "L2.569[3563,3762] 379ns 100mb |L2.569| " - - "L2.599[3763,3962] 399ns 100mb |L2.599| " - - "L2.629[3963,4162] 419ns 100mb |L2.629| " - - "L2.659[4163,4362] 439ns 100mb |L2.659| " - - "L2.689[4363,4562] 459ns 100mb |L2.689| " - - "L2.719[4563,4762] 479ns 100mb |L2.719|" - - "L2.749[4763,4962] 499ns 100mb |L2.749|" - - "L2.750[4963,5004] 499ns 21mb |L2.750|" + - "L2.59[164,356] 39ns 96mb |L2.59| " + - "L2.89[357,557] 59ns 100mb |L2.89| " + - "L2.119[558,758] 79ns 100mb |L2.119| " + - "L2.149[759,959] 99ns 100mb |L2.149| " + - "L2.179[960,1160] 119ns 100mb |L2.179| " + - "L2.209[1161,1361] 139ns 100mb |L2.209| " + - "L2.239[1362,1562] 159ns 100mb |L2.239| " + - "L2.269[1563,1763] 179ns 100mb |L2.269| " + - "L2.299[1764,1964] 199ns 101mb |L2.299| " + - "L2.329[1965,2156] 219ns 96mb |L2.329| " + - "L2.359[2157,2357] 239ns 101mb |L2.359| " + - "L2.389[2358,2558] 259ns 101mb |L2.389| " + - "L2.419[2559,2759] 279ns 101mb |L2.419| " + - "L2.449[2760,2960] 299ns 101mb |L2.449| " + - "L2.479[2961,3161] 319ns 101mb |L2.479| " + - "L2.509[3162,3362] 339ns 101mb |L2.509| " + - "L2.539[3363,3563] 359ns 101mb |L2.539| " + - "L2.569[3564,3764] 379ns 101mb |L2.569| " + - "L2.599[3765,3956] 399ns 96mb |L2.599| " + - "L2.629[3957,4157] 419ns 101mb |L2.629| " + - "L2.659[4158,4358] 439ns 101mb |L2.659| " + - "L2.689[4359,4559] 459ns 101mb |L2.689| " + - "L2.719[4560,4760] 479ns 101mb |L2.719|" + - "L2.749[4761,4961] 499ns 101mb |L2.749|" + - "L2.750[4962,5004] 499ns 22mb |L2.750|" "### ); } else { @@ -711,31 +711,31 @@ async fn test_keep_ingesting_l0_files_40_percent_overlap_l1_left() { - "L1.757[5044,5054] 504ns 5mb |L1.757|" - "L2 " - "L2.29[0,163] 19ns 80mb |L2.29| " - - "L2.59[164,364] 39ns 100mb |L2.59| " - - "L2.89[365,565] 59ns 100mb |L2.89| " - - "L2.119[566,756] 79ns 95mb |L2.119| " - - "L2.149[757,957] 99ns 101mb |L2.149| " - - "L2.179[958,1158] 119ns 101mb |L2.179| " - - "L2.209[1159,1359] 139ns 101mb |L2.209| " - - "L2.239[1360,1560] 159ns 101mb |L2.239| " - - "L2.269[1561,1761] 179ns 101mb |L2.269| " - - "L2.299[1762,1962] 199ns 101mb |L2.299| " - - "L2.329[1963,2162] 219ns 100mb |L2.329| " - - "L2.359[2163,2362] 239ns 100mb |L2.359| " - - "L2.389[2363,2562] 259ns 100mb |L2.389| " - - "L2.419[2563,2762] 279ns 100mb |L2.419| " - - "L2.449[2763,2962] 299ns 100mb |L2.449| " - - "L2.479[2963,3162] 319ns 100mb |L2.479| " - - "L2.509[3163,3362] 339ns 100mb |L2.509| " - - "L2.539[3363,3562] 359ns 100mb |L2.539| " - - "L2.569[3563,3762] 379ns 100mb |L2.569| " - - "L2.599[3763,3962] 399ns 100mb |L2.599| " - - "L2.629[3963,4162] 419ns 100mb |L2.629| " - - "L2.659[4163,4362] 439ns 100mb |L2.659| " - - "L2.689[4363,4562] 459ns 100mb |L2.689| " - - "L2.719[4563,4762] 479ns 100mb |L2.719| " - - "L2.749[4763,4962] 499ns 100mb |L2.749|" - - "L2.750[4963,5004] 499ns 21mb |L2.750|" + - "L2.59[164,356] 39ns 96mb |L2.59| " + - "L2.89[357,557] 59ns 100mb |L2.89| " + - "L2.119[558,758] 79ns 100mb |L2.119| " + - "L2.149[759,959] 99ns 100mb |L2.149| " + - "L2.179[960,1160] 119ns 100mb |L2.179| " + - "L2.209[1161,1361] 139ns 100mb |L2.209| " + - "L2.239[1362,1562] 159ns 100mb |L2.239| " + - "L2.269[1563,1763] 179ns 100mb |L2.269| " + - "L2.299[1764,1964] 199ns 101mb |L2.299| " + - "L2.329[1965,2156] 219ns 96mb |L2.329| " + - "L2.359[2157,2357] 239ns 101mb |L2.359| " + - "L2.389[2358,2558] 259ns 101mb |L2.389| " + - "L2.419[2559,2759] 279ns 101mb |L2.419| " + - "L2.449[2760,2960] 299ns 101mb |L2.449| " + - "L2.479[2961,3161] 319ns 101mb |L2.479| " + - "L2.509[3162,3362] 339ns 101mb |L2.509| " + - "L2.539[3363,3563] 359ns 101mb |L2.539| " + - "L2.569[3564,3764] 379ns 101mb |L2.569| " + - "L2.599[3765,3956] 399ns 96mb |L2.599| " + - "L2.629[3957,4157] 419ns 101mb |L2.629| " + - "L2.659[4158,4358] 439ns 101mb |L2.659| " + - "L2.689[4359,4559] 459ns 101mb |L2.689| " + - "L2.719[4560,4760] 479ns 101mb |L2.719| " + - "L2.749[4761,4961] 499ns 101mb |L2.749|" + - "L2.750[4962,5004] 499ns 22mb |L2.750|" "### ); } @@ -815,8 +815,8 @@ async fn test_keep_ingesting_l0_files_40_percent_overlap_output_250mb() { - "L1.74[493,504] 49ns 6mb |L1.74|" - "L2 " - "L2.29[0,163] 19ns 80mb |----------L2.29----------| " - - "L2.59[164,364] 39ns 100mb |-------------L2.59-------------| " - - "L2.60[365,404] 39ns 20mb |L2.60| " + - "L2.59[164,356] 39ns 96mb |------------L2.59-------------| " + - "L2.60[357,404] 39ns 24mb |L2.60| " "### ); } diff --git a/compactor/tests/layouts/core.rs b/compactor/tests/layouts/core.rs index ec0c297999..c68bae8a5c 100644 --- a/compactor/tests/layouts/core.rs +++ b/compactor/tests/layouts/core.rs @@ -521,9 +521,9 @@ async fn l1_too_much_with_non_overlapping_l0() { - "L1.3[150,199] 120s 70mb |-----------L1.3------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 240mb total:" - "L2 " - - "L2.?[50,113] 120s 101mb |----------------L2.?----------------| " - - "L2.?[114,176] 120s 100mb |---------------L2.?----------------| " - - "L2.?[177,199] 120s 39mb |---L2.?----| " + - "L2.?[50,113] 120s 102mb |----------------L2.?----------------| " + - "L2.?[114,176] 120s 101mb |---------------L2.?----------------| " + - "L2.?[177,199] 120s 37mb |---L2.?----| " - "Committing partition 1:" - " Soft Deleting 3 files: L1.1, L1.2, L1.3" - " Creating 3 files" @@ -535,9 +535,9 @@ async fn l1_too_much_with_non_overlapping_l0() { - "L1.7[350,399] 360s |--------L1.7--------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 280mb total:" - "L2 " - - "L2.?[200,272] 360s 101mb |-------------L2.?-------------| " - - "L2.?[273,344] 360s 100mb |-------------L2.?-------------| " - - "L2.?[345,399] 360s 79mb |---------L2.?---------| " + - "L2.?[200,272] 360s 102mb |-------------L2.?-------------| " + - "L2.?[273,344] 360s 101mb |-------------L2.?-------------| " + - "L2.?[345,399] 360s 77mb |---------L2.?---------| " - "Committing partition 1:" - " Soft Deleting 4 files: L1.4, L1.5, L1.6, L1.7" - " Creating 3 files" @@ -549,23 +549,23 @@ async fn l1_too_much_with_non_overlapping_l0() { - "L1.8[400,449] 420s 70mb |-----L1.8------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 225mb total:" - "L2 " - - "L2.?[400,511] 780s 100mb |-----------------L2.?-----------------| " - - "L2.?[512,622] 780s 99mb |----------------L2.?-----------------| " - - "L2.?[623,649] 780s 25mb |-L2.?--| " + - "L2.?[400,511] 780s 101mb |-----------------L2.?-----------------| " + - "L2.?[512,622] 780s 100mb |----------------L2.?-----------------| " + - "L2.?[623,649] 780s 24mb |-L2.?--| " - "Committing partition 1:" - " Soft Deleting 4 files: L1.8, L1.9, L1.10, L1.14" - " Creating 3 files" - "**** Final Output Files (760mb written)" - "L2 " - - "L2.15[50,113] 120s 101mb |-L2.15-| " - - "L2.16[114,176] 120s 100mb |-L2.16-| " - - "L2.17[177,199] 120s 39mb |L2.17| " - - "L2.18[200,272] 360s 101mb |-L2.18--| " - - "L2.19[273,344] 360s 100mb |-L2.19--| " - - "L2.20[345,399] 360s 79mb |L2.20-| " - - "L2.21[400,511] 780s 100mb |----L2.21-----| " - - "L2.22[512,622] 780s 99mb |----L2.22-----| " - - "L2.23[623,649] 780s 25mb |L2.23|" + - "L2.15[50,113] 120s 102mb |-L2.15-| " + - "L2.16[114,176] 120s 101mb |-L2.16-| " + - "L2.17[177,199] 120s 37mb |L2.17| " + - "L2.18[200,272] 360s 102mb |-L2.18--| " + - "L2.19[273,344] 360s 101mb |-L2.19--| " + - "L2.20[345,399] 360s 77mb |L2.20-| " + - "L2.21[400,511] 780s 101mb |----L2.21-----| " + - "L2.22[512,622] 780s 100mb |----L2.22-----| " + - "L2.23[623,649] 780s 24mb |L2.23|" "### ); } diff --git a/compactor/tests/layouts/created_at.rs b/compactor/tests/layouts/created_at.rs index 371511f1c3..ec4259d60f 100644 --- a/compactor/tests/layouts/created_at.rs +++ b/compactor/tests/layouts/created_at.rs @@ -95,15 +95,15 @@ async fn base1_level1_level2_overlaps() { - "L2.1[4,7] 7ns 100mb |------------L2.1------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 200mb total:" - "L2 " - - "L2.?[1,6] 10ns 111mb |----------------------L2.?----------------------| " - - "L2.?[7,10] 10ns 89mb |------------L2.?------------|" + - "L2.?[1,6] 10ns 120mb |----------------------L2.?----------------------| " + - "L2.?[7,10] 10ns 80mb |------------L2.?------------|" - "Committing partition 1:" - " Soft Deleting 3 files: L2.1, L1.2, L1.3" - " Creating 2 files" - "**** Final Output Files (200mb written)" - "L2 " - - "L2.4[1,6] 10ns 111mb |----------------------L2.4----------------------| " - - "L2.5[7,10] 10ns 89mb |------------L2.5------------|" + - "L2.4[1,6] 10ns 120mb |----------------------L2.4----------------------| " + - "L2.5[7,10] 10ns 80mb |------------L2.5------------|" "### ); } @@ -191,31 +191,31 @@ async fn base1_level0_level1() { - "L1.2[1,5] 8ns |------------------------------------------L1.2------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:" - "L1 " - - "L1.?[1,4] 11ns 75mb |------------------------------L1.?-------------------------------| " - - "L1.?[5,5] 11ns 25mb |L1.?|" + - "L1.?[1,4] 11ns 80mb |------------------------------L1.?-------------------------------| " + - "L1.?[5,5] 11ns 20mb |L1.?|" - "Committing partition 1:" - " Soft Deleting 2 files: L1.2, L0.4" - " Creating 2 files" - "**** Simulation run 1, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[5, 9]). 4 Input Files, 250mb total:" - "L1 " - "L1.3[6,10] 10ns 50mb |-----------------L1.3-----------------|" - - "L1.6[5,5] 11ns 25mb |L1.6| " - - "L1.5[1,4] 11ns 75mb |------------L1.5------------| " + - "L1.6[5,5] 11ns 20mb |L1.6| " + - "L1.5[1,4] 11ns 80mb |------------L1.5------------| " - "L2 " - "L2.1[4,7] 7ns 100mb |------------L2.1------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 250mb total:" - "L2 " - - "L2.?[1,5] 11ns 111mb |-----------------L2.?-----------------| " - - "L2.?[6,9] 11ns 83mb |------------L2.?------------| " - - "L2.?[10,10] 11ns 56mb |L2.?|" + - "L2.?[1,5] 11ns 125mb |-----------------L2.?-----------------| " + - "L2.?[6,9] 11ns 100mb |------------L2.?------------| " + - "L2.?[10,10] 11ns 25mb |L2.?|" - "Committing partition 1:" - " Soft Deleting 4 files: L2.1, L1.3, L1.5, L1.6" - " Creating 3 files" - "**** Final Output Files (350mb written)" - "L2 " - - "L2.7[1,5] 11ns 111mb |-----------------L2.7-----------------| " - - "L2.8[6,9] 11ns 83mb |------------L2.8------------| " - - "L2.9[10,10] 11ns 56mb |L2.9|" + - "L2.7[1,5] 11ns 125mb |-----------------L2.7-----------------| " + - "L2.8[6,9] 11ns 100mb |------------L2.8------------| " + - "L2.9[10,10] 11ns 25mb |L2.9|" "### ); } diff --git a/compactor/tests/layouts/knobs.rs b/compactor/tests/layouts/knobs.rs index 80fdb3fdeb..bdfdd00142 100644 --- a/compactor/tests/layouts/knobs.rs +++ b/compactor/tests/layouts/knobs.rs @@ -56,14 +56,14 @@ async fn all_overlapping_l0_split_percentage() { - "**** 2 Output Files (parquet_file_id not yet assigned), 90mb total:" - "L1 " - "L1.?[100,190005] 10ns 86mb|---------------------------------------L1.?----------------------------------------| " - - "L1.?[190006,200000] 10ns 5mb |L1.?|" + - "L1.?[190006,200000] 10ns 4mb |L1.?|" - "Committing partition 1:" - " Soft Deleting 10 files: L0.1, L0.2, L0.3, L0.4, L0.5, L0.6, L0.7, L0.8, L0.9, L0.10" - " Creating 2 files" - "**** Final Output Files (90mb written)" - "L1 " - "L1.11[100,190005] 10ns 86mb|---------------------------------------L1.11---------------------------------------| " - - "L1.12[190006,200000] 10ns 5mb |L1.12|" + - "L1.12[190006,200000] 10ns 4mb |L1.12|" "### ); } @@ -189,23 +189,21 @@ async fn all_overlapping_l0_split_percentage_and_max_file_size() { - "Committing partition 1:" - " Soft Deleting 10 files: L0.1, L0.2, L0.3, L0.4, L0.5, L0.6, L0.7, L0.8, L0.9, L0.10" - " Creating 3 files" - - "**** Simulation run 1, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[177789]). 2 Input Files, 50mb total:" - - "L1 " - - "L1.13[177791,200000] 10ns 10mb |-----L1.13-----| " - - "L1.12[88946,177790] 10ns 40mb|--------------------------------L1.12---------------------------------| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 50mb total:" - - "L2 " - - "L2.?[88946,177789] 10ns 40mb|--------------------------------L2.?---------------------------------| " - - "L2.?[177790,200000] 10ns 10mb |-----L2.?------| " + - "**** Simulation run 1, type=compact(TotalSizeLessThanMaxCompactSize). 1 Input Files, 10mb total:" + - "L1, all files 10mb " + - "L1.13[177791,200000] 10ns|-----------------------------------------L1.13------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 10mb total:" + - "L2, all files 10mb " + - "L2.?[177791,200000] 10ns |------------------------------------------L2.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 2 files: L1.12, L1.13" - - " Upgrading 1 files level to CompactionLevel::L2: L1.11" - - " Creating 2 files" - - "**** Final Output Files (140mb written)" + - " Soft Deleting 1 files: L1.13" + - " Upgrading 2 files level to CompactionLevel::L2: L1.11, L1.12" + - " Creating 1 files" + - "**** Final Output Files (100mb written)" - "L2 " - "L2.11[100,88945] 10ns 40mb|----------------L2.11-----------------| " - - "L2.14[88946,177789] 10ns 40mb |----------------L2.14----------------| " - - "L2.15[177790,200000] 10ns 10mb |-L2.15-| " + - "L2.12[88946,177790] 10ns 40mb |----------------L2.12----------------| " + - "L2.14[177791,200000] 10ns 10mb |-L2.14-| " "### ); } @@ -458,7 +456,7 @@ async fn all_overlapping_l0_max_input_bytes_per_partition() { - "Committing partition 1:" - " Soft Deleting 10 files: L0.1, L0.2, L0.3, L0.4, L0.5, L0.6, L0.7, L0.8, L0.9, L0.10" - " Creating 70 files" - - "**** Simulation run 10, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[67700, 135300]). 23 Input Files, 30mb total:" + - "**** Simulation run 10, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[67699, 135298]). 23 Input Files, 30mb total:" - "L0 " - "L0.11[100,28657] 1ns 1mb |--L0.11---| " - "L0.12[28658,57214] 1ns 1mb |--L0.12---| " @@ -485,155 +483,155 @@ async fn all_overlapping_l0_max_input_bytes_per_partition() { - "L0.33[28658,57214] 4ns 1mb |--L0.33---| " - "**** 3 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L1 " - - "L1.?[100,67700] 4ns 10mb |------------L1.?------------| " - - "L1.?[67701,135300] 4ns 10mb |------------L1.?------------| " - - "L1.?[135301,200000] 4ns 10mb |-----------L1.?------------| " + - "L1.?[100,67699] 4ns 10mb |------------L1.?------------| " + - "L1.?[67700,135298] 4ns 10mb |------------L1.?------------| " + - "L1.?[135299,200000] 4ns 10mb |-----------L1.?------------| " - "Committing partition 1:" - " Soft Deleting 23 files: L0.11, L0.12, L0.13, L0.14, L0.15, L0.16, L0.17, L0.18, L0.19, L0.20, L0.21, L0.22, L0.23, L0.24, L0.25, L0.26, L0.27, L0.28, L0.29, L0.30, L0.31, L0.32, L0.33" - " Creating 3 files" - "**** Simulation run 11, type=split(HighL0OverlapTotalBacklog)(split_times=[28657, 57214]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.81[100,67700] 4ns |-----------------------------------------L1.81------------------------------------------|" + - "L1.81[100,67699] 4ns |-----------------------------------------L1.81------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - "L1.?[100,28657] 4ns 4mb |----------------L1.?----------------| " - "L1.?[28658,57214] 4ns 4mb |----------------L1.?----------------| " - - "L1.?[57215,67700] 4ns 2mb |---L1.?----| " + - "L1.?[57215,67699] 4ns 2mb |---L1.?----| " - "**** Simulation run 12, type=split(HighL0OverlapTotalBacklog)(split_times=[85771, 114328]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.82[67701,135300] 4ns |-----------------------------------------L1.82------------------------------------------|" + - "L1.82[67700,135298] 4ns |-----------------------------------------L1.82------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[67701,85771] 4ns 3mb|---------L1.?---------| " + - "L1.?[67700,85771] 4ns 3mb|---------L1.?---------| " - "L1.?[85772,114328] 4ns 4mb |----------------L1.?----------------| " - - "L1.?[114329,135300] 4ns 3mb |----------L1.?-----------| " + - "L1.?[114329,135298] 4ns 3mb |----------L1.?-----------| " - "**** Simulation run 13, type=split(HighL0OverlapTotalBacklog)(split_times=[142885, 171442]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.83[135301,200000] 4ns |-----------------------------------------L1.83------------------------------------------|" + - "L1.83[135299,200000] 4ns |-----------------------------------------L1.83------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[135301,142885] 4ns 1mb|--L1.?--| " + - "L1.?[135299,142885] 4ns 1mb|--L1.?--| " - "L1.?[142886,171442] 4ns 4mb |----------------L1.?-----------------| " - "L1.?[171443,200000] 4ns 4mb |----------------L1.?-----------------| " - "Committing partition 1:" - " Soft Deleting 3 files: L1.81, L1.82, L1.83" - " Creating 9 files" - - "**** Simulation run 14, type=split(ReduceOverlap)(split_times=[67700]). 1 Input Files, 1mb total:" + - "**** Simulation run 14, type=split(ReduceOverlap)(split_times=[67699]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.34[57215,85771] 4ns |-----------------------------------------L0.34------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[57215,67700] 4ns 483kb|-------------L0.?--------------| " - - "L0.?[67701,85771] 4ns 833kb |-------------------------L0.?-------------------------| " - - "**** Simulation run 15, type=split(ReduceOverlap)(split_times=[135300]). 1 Input Files, 1mb total:" + - "L0.?[57215,67699] 4ns 483kb|-------------L0.?--------------| " + - "L0.?[67700,85771] 4ns 833kb |-------------------------L0.?-------------------------| " + - "**** Simulation run 15, type=split(ReduceOverlap)(split_times=[135298]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.36[114329,142885] 4ns |-----------------------------------------L0.36------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[114329,135300] 4ns 967kb|------------------------------L0.?------------------------------| " - - "L0.?[135301,142885] 4ns 350kb |--------L0.?---------| " - - "**** Simulation run 16, type=split(ReduceOverlap)(split_times=[67700]). 1 Input Files, 1mb total:" + - "L0.?[114329,135298] 4ns 967kb|------------------------------L0.?------------------------------| " + - "L0.?[135299,142885] 4ns 350kb |--------L0.?---------| " + - "**** Simulation run 16, type=split(ReduceOverlap)(split_times=[67699]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.41[57215,85771] 5ns |-----------------------------------------L0.41------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[57215,67700] 5ns 483kb|-------------L0.?--------------| " - - "L0.?[67701,85771] 5ns 833kb |-------------------------L0.?-------------------------| " - - "**** Simulation run 17, type=split(ReduceOverlap)(split_times=[135300]). 1 Input Files, 1mb total:" + - "L0.?[57215,67699] 5ns 483kb|-------------L0.?--------------| " + - "L0.?[67700,85771] 5ns 833kb |-------------------------L0.?-------------------------| " + - "**** Simulation run 17, type=split(ReduceOverlap)(split_times=[135298]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.43[114329,142885] 5ns |-----------------------------------------L0.43------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[114329,135300] 5ns 967kb|------------------------------L0.?------------------------------| " - - "L0.?[135301,142885] 5ns 350kb |--------L0.?---------| " - - "**** Simulation run 18, type=split(ReduceOverlap)(split_times=[67700]). 1 Input Files, 1mb total:" + - "L0.?[114329,135298] 5ns 967kb|------------------------------L0.?------------------------------| " + - "L0.?[135299,142885] 5ns 350kb |--------L0.?---------| " + - "**** Simulation run 18, type=split(ReduceOverlap)(split_times=[67699]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.48[57215,85771] 6ns |-----------------------------------------L0.48------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[57215,67700] 6ns 483kb|-------------L0.?--------------| " - - "L0.?[67701,85771] 6ns 833kb |-------------------------L0.?-------------------------| " - - "**** Simulation run 19, type=split(ReduceOverlap)(split_times=[135300]). 1 Input Files, 1mb total:" + - "L0.?[57215,67699] 6ns 483kb|-------------L0.?--------------| " + - "L0.?[67700,85771] 6ns 833kb |-------------------------L0.?-------------------------| " + - "**** Simulation run 19, type=split(ReduceOverlap)(split_times=[135298]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.50[114329,142885] 6ns |-----------------------------------------L0.50------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[114329,135300] 6ns 967kb|------------------------------L0.?------------------------------| " - - "L0.?[135301,142885] 6ns 350kb |--------L0.?---------| " - - "**** Simulation run 20, type=split(ReduceOverlap)(split_times=[67700]). 1 Input Files, 1mb total:" + - "L0.?[114329,135298] 6ns 967kb|------------------------------L0.?------------------------------| " + - "L0.?[135299,142885] 6ns 350kb |--------L0.?---------| " + - "**** Simulation run 20, type=split(ReduceOverlap)(split_times=[67699]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.55[57215,85771] 7ns |-----------------------------------------L0.55------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[57215,67700] 7ns 483kb|-------------L0.?--------------| " - - "L0.?[67701,85771] 7ns 833kb |-------------------------L0.?-------------------------| " - - "**** Simulation run 21, type=split(ReduceOverlap)(split_times=[135300]). 1 Input Files, 1mb total:" + - "L0.?[57215,67699] 7ns 483kb|-------------L0.?--------------| " + - "L0.?[67700,85771] 7ns 833kb |-------------------------L0.?-------------------------| " + - "**** Simulation run 21, type=split(ReduceOverlap)(split_times=[135298]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.57[114329,142885] 7ns |-----------------------------------------L0.57------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[114329,135300] 7ns 967kb|------------------------------L0.?------------------------------| " - - "L0.?[135301,142885] 7ns 350kb |--------L0.?---------| " - - "**** Simulation run 22, type=split(ReduceOverlap)(split_times=[67700]). 1 Input Files, 1mb total:" + - "L0.?[114329,135298] 7ns 967kb|------------------------------L0.?------------------------------| " + - "L0.?[135299,142885] 7ns 350kb |--------L0.?---------| " + - "**** Simulation run 22, type=split(ReduceOverlap)(split_times=[67699]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.62[57215,85771] 8ns |-----------------------------------------L0.62------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[57215,67700] 8ns 483kb|-------------L0.?--------------| " - - "L0.?[67701,85771] 8ns 833kb |-------------------------L0.?-------------------------| " - - "**** Simulation run 23, type=split(ReduceOverlap)(split_times=[135300]). 1 Input Files, 1mb total:" + - "L0.?[57215,67699] 8ns 483kb|-------------L0.?--------------| " + - "L0.?[67700,85771] 8ns 833kb |-------------------------L0.?-------------------------| " + - "**** Simulation run 23, type=split(ReduceOverlap)(split_times=[135298]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.64[114329,142885] 8ns |-----------------------------------------L0.64------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[114329,135300] 8ns 967kb|------------------------------L0.?------------------------------| " - - "L0.?[135301,142885] 8ns 350kb |--------L0.?---------| " - - "**** Simulation run 24, type=split(ReduceOverlap)(split_times=[67700]). 1 Input Files, 1mb total:" + - "L0.?[114329,135298] 8ns 967kb|------------------------------L0.?------------------------------| " + - "L0.?[135299,142885] 8ns 350kb |--------L0.?---------| " + - "**** Simulation run 24, type=split(ReduceOverlap)(split_times=[67699]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.69[57215,85771] 9ns |-----------------------------------------L0.69------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[57215,67700] 9ns 483kb|-------------L0.?--------------| " - - "L0.?[67701,85771] 9ns 833kb |-------------------------L0.?-------------------------| " - - "**** Simulation run 25, type=split(ReduceOverlap)(split_times=[135300]). 1 Input Files, 1mb total:" + - "L0.?[57215,67699] 9ns 483kb|-------------L0.?--------------| " + - "L0.?[67700,85771] 9ns 833kb |-------------------------L0.?-------------------------| " + - "**** Simulation run 25, type=split(ReduceOverlap)(split_times=[135298]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.71[114329,142885] 9ns |-----------------------------------------L0.71------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[114329,135300] 9ns 967kb|------------------------------L0.?------------------------------| " - - "L0.?[135301,142885] 9ns 350kb |--------L0.?---------| " - - "**** Simulation run 26, type=split(ReduceOverlap)(split_times=[67700]). 1 Input Files, 1mb total:" + - "L0.?[114329,135298] 9ns 967kb|------------------------------L0.?------------------------------| " + - "L0.?[135299,142885] 9ns 350kb |--------L0.?---------| " + - "**** Simulation run 26, type=split(ReduceOverlap)(split_times=[67699]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.76[57215,85771] 10ns |-----------------------------------------L0.76------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[57215,67700] 10ns 483kb|-------------L0.?--------------| " - - "L0.?[67701,85771] 10ns 833kb |-------------------------L0.?-------------------------| " - - "**** Simulation run 27, type=split(ReduceOverlap)(split_times=[135300]). 1 Input Files, 1mb total:" + - "L0.?[57215,67699] 10ns 483kb|-------------L0.?--------------| " + - "L0.?[67700,85771] 10ns 833kb |-------------------------L0.?-------------------------| " + - "**** Simulation run 27, type=split(ReduceOverlap)(split_times=[135298]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.78[114329,142885] 10ns|-----------------------------------------L0.78------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[114329,135300] 10ns 967kb|------------------------------L0.?------------------------------| " - - "L0.?[135301,142885] 10ns 350kb |--------L0.?---------| " + - "L0.?[114329,135298] 10ns 967kb|------------------------------L0.?------------------------------| " + - "L0.?[135299,142885] 10ns 350kb |--------L0.?---------| " - "Committing partition 1:" - " Soft Deleting 14 files: L0.34, L0.36, L0.41, L0.43, L0.48, L0.50, L0.55, L0.57, L0.62, L0.64, L0.69, L0.71, L0.76, L0.78" - " Creating 28 files" - "**** Simulation run 28, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[109041, 160867]). 14 Input Files, 28mb total:" - "L0 " - - "L0.93[57215,67700] 4ns 483kb|L0.93| " - - "L0.94[67701,85771] 4ns 833kb |--L0.94--| " + - "L0.93[57215,67699] 4ns 483kb|L0.93| " + - "L0.94[67700,85771] 4ns 833kb |--L0.94--| " - "L0.35[85772,114328] 4ns 1mb |-----L0.35-----| " - - "L0.95[114329,135300] 4ns 967kb |---L0.95---| " - - "L0.96[135301,142885] 4ns 350kb |L0.96| " + - "L0.95[114329,135298] 4ns 967kb |---L0.95---| " + - "L0.96[135299,142885] 4ns 350kb |L0.96| " - "L0.37[142886,171442] 4ns 1mb |-----L0.37-----| " - "L0.38[171443,200000] 4ns 1mb |-----L0.38------|" - "L1 " - - "L1.86[57215,67700] 4ns 2mb|L1.86| " - - "L1.87[67701,85771] 4ns 3mb |--L1.87--| " + - "L1.86[57215,67699] 4ns 2mb|L1.86| " + - "L1.87[67700,85771] 4ns 3mb |--L1.87--| " - "L1.88[85772,114328] 4ns 4mb |-----L1.88-----| " - - "L1.89[114329,135300] 4ns 3mb |---L1.89---| " - - "L1.90[135301,142885] 4ns 1mb |L1.90| " + - "L1.89[114329,135298] 4ns 3mb |---L1.89---| " + - "L1.90[135299,142885] 4ns 1mb |L1.90| " - "L1.91[142886,171442] 4ns 4mb |-----L1.91-----| " - "L1.92[171443,200000] 4ns 4mb |-----L1.92------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 28mb total:" @@ -656,42 +654,42 @@ async fn all_overlapping_l0_max_input_bytes_per_partition() { - "L0.42[85772,114328] 5ns |-----------------------------------------L0.42------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85772,85772] 5ns 0b |L0.?| " + - "L0.?[85772,85772] 5ns 47b|L0.?| " - "L0.?[85773,114328] 5ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 31, type=split(HighL0OverlapTotalBacklog)(split_times=[85772]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.49[85772,114328] 6ns |-----------------------------------------L0.49------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85772,85772] 6ns 0b |L0.?| " + - "L0.?[85772,85772] 6ns 47b|L0.?| " - "L0.?[85773,114328] 6ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 32, type=split(HighL0OverlapTotalBacklog)(split_times=[85772]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.56[85772,114328] 7ns |-----------------------------------------L0.56------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85772,85772] 7ns 0b |L0.?| " + - "L0.?[85772,85772] 7ns 47b|L0.?| " - "L0.?[85773,114328] 7ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 33, type=split(HighL0OverlapTotalBacklog)(split_times=[85772]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.63[85772,114328] 8ns |-----------------------------------------L0.63------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85772,85772] 8ns 0b |L0.?| " + - "L0.?[85772,85772] 8ns 47b|L0.?| " - "L0.?[85773,114328] 8ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 34, type=split(HighL0OverlapTotalBacklog)(split_times=[85772]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.70[85772,114328] 9ns |-----------------------------------------L0.70------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85772,85772] 9ns 0b |L0.?| " + - "L0.?[85772,85772] 9ns 47b|L0.?| " - "L0.?[85773,114328] 9ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 35, type=split(HighL0OverlapTotalBacklog)(split_times=[85772]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.77[85772,114328] 10ns |-----------------------------------------L0.77------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85772,85772] 10ns 0b|L0.?| " + - "L0.?[85772,85772] 10ns 47b|L0.?| " - "L0.?[85773,114328] 10ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 36, type=split(HighL0OverlapTotalBacklog)(split_times=[114329, 142886]). 1 Input Files, 10mb total:" - "L1, all files 10mb " @@ -703,87 +701,87 @@ async fn all_overlapping_l0_max_input_bytes_per_partition() { - "L1.?[142887,160867] 4ns 3mb |------------L1.?-------------| " - "**** Simulation run 37, type=split(HighL0OverlapTotalBacklog)(split_times=[114329]). 1 Input Files, 967kb total:" - "L0, all files 967kb " - - "L0.99[114329,135300] 5ns |-----------------------------------------L0.99------------------------------------------|" + - "L0.99[114329,135298] 5ns |-----------------------------------------L0.99------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 967kb total:" - "L0 " - - "L0.?[114329,114329] 5ns 0b|L0.?| " - - "L0.?[114330,135300] 5ns 967kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[114329,114329] 5ns 47b|L0.?| " + - "L0.?[114330,135298] 5ns 967kb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 38, type=split(HighL0OverlapTotalBacklog)(split_times=[114329]). 1 Input Files, 967kb total:" - "L0, all files 967kb " - - "L0.103[114329,135300] 6ns|-----------------------------------------L0.103-----------------------------------------|" + - "L0.103[114329,135298] 6ns|-----------------------------------------L0.103-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 967kb total:" - "L0 " - - "L0.?[114329,114329] 6ns 0b|L0.?| " - - "L0.?[114330,135300] 6ns 967kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[114329,114329] 6ns 47b|L0.?| " + - "L0.?[114330,135298] 6ns 967kb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 39, type=split(HighL0OverlapTotalBacklog)(split_times=[114329]). 1 Input Files, 967kb total:" - "L0, all files 967kb " - - "L0.107[114329,135300] 7ns|-----------------------------------------L0.107-----------------------------------------|" + - "L0.107[114329,135298] 7ns|-----------------------------------------L0.107-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 967kb total:" - "L0 " - - "L0.?[114329,114329] 7ns 0b|L0.?| " - - "L0.?[114330,135300] 7ns 967kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[114329,114329] 7ns 47b|L0.?| " + - "L0.?[114330,135298] 7ns 967kb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 40, type=split(HighL0OverlapTotalBacklog)(split_times=[114329]). 1 Input Files, 967kb total:" - "L0, all files 967kb " - - "L0.111[114329,135300] 8ns|-----------------------------------------L0.111-----------------------------------------|" + - "L0.111[114329,135298] 8ns|-----------------------------------------L0.111-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 967kb total:" - "L0 " - - "L0.?[114329,114329] 8ns 0b|L0.?| " - - "L0.?[114330,135300] 8ns 967kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[114329,114329] 8ns 47b|L0.?| " + - "L0.?[114330,135298] 8ns 967kb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 41, type=split(HighL0OverlapTotalBacklog)(split_times=[114329]). 1 Input Files, 967kb total:" - "L0, all files 967kb " - - "L0.115[114329,135300] 9ns|-----------------------------------------L0.115-----------------------------------------|" + - "L0.115[114329,135298] 9ns|-----------------------------------------L0.115-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 967kb total:" - "L0 " - - "L0.?[114329,114329] 9ns 0b|L0.?| " - - "L0.?[114330,135300] 9ns 967kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[114329,114329] 9ns 47b|L0.?| " + - "L0.?[114330,135298] 9ns 967kb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 42, type=split(HighL0OverlapTotalBacklog)(split_times=[114329]). 1 Input Files, 967kb total:" - "L0, all files 967kb " - - "L0.119[114329,135300] 10ns|-----------------------------------------L0.119-----------------------------------------|" + - "L0.119[114329,135298] 10ns|-----------------------------------------L0.119-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 967kb total:" - "L0 " - - "L0.?[114329,114329] 10ns 0b|L0.?| " - - "L0.?[114330,135300] 10ns 967kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[114329,114329] 10ns 47b|L0.?| " + - "L0.?[114330,135298] 10ns 967kb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 43, type=split(HighL0OverlapTotalBacklog)(split_times=[142886]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.44[142886,171442] 5ns |-----------------------------------------L0.44------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[142886,142886] 5ns 0b|L0.?| " + - "L0.?[142886,142886] 5ns 47b|L0.?| " - "L0.?[142887,171442] 5ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 44, type=split(HighL0OverlapTotalBacklog)(split_times=[142886]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.51[142886,171442] 6ns |-----------------------------------------L0.51------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[142886,142886] 6ns 0b|L0.?| " + - "L0.?[142886,142886] 6ns 47b|L0.?| " - "L0.?[142887,171442] 6ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 45, type=split(HighL0OverlapTotalBacklog)(split_times=[142886]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.58[142886,171442] 7ns |-----------------------------------------L0.58------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[142886,142886] 7ns 0b|L0.?| " + - "L0.?[142886,142886] 7ns 47b|L0.?| " - "L0.?[142887,171442] 7ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 46, type=split(HighL0OverlapTotalBacklog)(split_times=[142886]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.65[142886,171442] 8ns |-----------------------------------------L0.65------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[142886,142886] 8ns 0b|L0.?| " + - "L0.?[142886,142886] 8ns 47b|L0.?| " - "L0.?[142887,171442] 8ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 47, type=split(HighL0OverlapTotalBacklog)(split_times=[142886]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.72[142886,171442] 9ns |-----------------------------------------L0.72------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[142886,142886] 9ns 0b|L0.?| " + - "L0.?[142886,142886] 9ns 47b|L0.?| " - "L0.?[142887,171442] 9ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 48, type=split(HighL0OverlapTotalBacklog)(split_times=[142886]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.79[142886,171442] 10ns|-----------------------------------------L0.79------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[142886,142886] 10ns 0b|L0.?| " + - "L0.?[142886,142886] 10ns 47b|L0.?| " - "L0.?[142887,171442] 10ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 49, type=split(HighL0OverlapTotalBacklog)(split_times=[171443]). 1 Input Files, 8mb total:" - "L1, all files 8mb " @@ -797,42 +795,42 @@ async fn all_overlapping_l0_max_input_bytes_per_partition() { - "L0.45[171443,200000] 5ns |-----------------------------------------L0.45------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171443,171443] 5ns 0b|L0.?| " + - "L0.?[171443,171443] 5ns 47b|L0.?| " - "L0.?[171444,200000] 5ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 51, type=split(HighL0OverlapTotalBacklog)(split_times=[171443]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.52[171443,200000] 6ns |-----------------------------------------L0.52------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171443,171443] 6ns 0b|L0.?| " + - "L0.?[171443,171443] 6ns 47b|L0.?| " - "L0.?[171444,200000] 6ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 52, type=split(HighL0OverlapTotalBacklog)(split_times=[171443]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.59[171443,200000] 7ns |-----------------------------------------L0.59------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171443,171443] 7ns 0b|L0.?| " + - "L0.?[171443,171443] 7ns 47b|L0.?| " - "L0.?[171444,200000] 7ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 53, type=split(HighL0OverlapTotalBacklog)(split_times=[171443]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.66[171443,200000] 8ns |-----------------------------------------L0.66------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171443,171443] 8ns 0b|L0.?| " + - "L0.?[171443,171443] 8ns 47b|L0.?| " - "L0.?[171444,200000] 8ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 54, type=split(HighL0OverlapTotalBacklog)(split_times=[171443]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.73[171443,200000] 9ns |-----------------------------------------L0.73------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171443,171443] 9ns 0b|L0.?| " + - "L0.?[171443,171443] 9ns 47b|L0.?| " - "L0.?[171444,200000] 9ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 55, type=split(HighL0OverlapTotalBacklog)(split_times=[171443]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.80[171443,200000] 10ns|-----------------------------------------L0.80------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171443,171443] 10ns 0b|L0.?| " + - "L0.?[171443,171443] 10ns 47b|L0.?| " - "L0.?[171444,200000] 10ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "Committing partition 1:" - " Soft Deleting 27 files: L0.42, L0.44, L0.45, L0.49, L0.51, L0.52, L0.56, L0.58, L0.59, L0.63, L0.65, L0.66, L0.70, L0.72, L0.73, L0.77, L0.79, L0.80, L0.99, L0.103, L0.107, L0.111, L0.115, L0.119, L1.121, L1.122, L1.123" @@ -924,16 +922,16 @@ async fn all_overlapping_l0_max_input_bytes_per_partition() { - "Committing partition 1:" - " Soft Deleting 12 files: L0.127, L0.129, L0.131, L0.133, L0.135, L0.137, L0.154, L0.156, L0.158, L0.160, L0.162, L0.164" - " Creating 24 files" - - "**** Simulation run 68, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[46513, 92926]). 13 Input Files, 25mb total:" + - "**** Simulation run 68, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[46512, 92924]). 13 Input Files, 25mb total:" - "L0 " - "L0.39[100,28657] 5ns 1mb |-------L0.39--------| " - "L0.40[28658,57214] 5ns 1mb |-------L0.40--------| " - - "L0.97[57215,67700] 5ns 483kb |L0.97-| " - - "L0.98[67701,85771] 5ns 833kb |---L0.98----| " - - "L0.126[85772,85772] 5ns 0b |L0.126| " + - "L0.97[57215,67699] 5ns 483kb |L0.97-| " + - "L0.98[67700,85771] 5ns 833kb |---L0.98----| " + - "L0.126[85772,85772] 5ns 47b |L0.126| " - "L0.179[85773,109041] 5ns 1mb |-----L0.179-----| " - "L0.180[109042,114328] 5ns 244kb |L0.180|" - - "L0.141[114329,114329] 5ns 0b |L0.141|" + - "L0.141[114329,114329] 5ns 47b |L0.141|" - "L1 " - "L1.84[100,28657] 4ns 4mb |-------L1.84--------| " - "L1.85[28658,57214] 4ns 4mb |-------L1.85--------| " @@ -942,19 +940,19 @@ async fn all_overlapping_l0_max_input_bytes_per_partition() { - "L1.138[109042,114329] 4ns 1mb |L1.138|" - "**** 3 Output Files (parquet_file_id not yet assigned), 25mb total:" - "L1 " - - "L1.?[100,46513] 5ns 10mb |---------------L1.?---------------| " - - "L1.?[46514,92926] 5ns 10mb |---------------L1.?---------------| " - - "L1.?[92927,114329] 5ns 5mb |-----L1.?-----| " + - "L1.?[100,46512] 5ns 10mb |---------------L1.?---------------| " + - "L1.?[46513,92924] 5ns 10mb |---------------L1.?---------------| " + - "L1.?[92925,114329] 5ns 5mb |-----L1.?-----| " - "Committing partition 1:" - " Soft Deleting 13 files: L0.39, L0.40, L1.84, L1.85, L0.97, L0.98, L1.124, L1.125, L0.126, L1.138, L0.141, L0.179, L0.180" - " Creating 3 files" - "**** Simulation run 69, type=split(HighL0OverlapTotalBacklog)(split_times=[38176]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.203[100,46513] 5ns |----------------------------------------L1.203-----------------------------------------| " + - "L1.203[100,46512] 5ns |-----------------------------------------L1.203-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - "L1.?[100,38176] 5ns 8mb |---------------------------------L1.?----------------------------------| " - - "L1.?[38177,46513] 5ns 2mb |-----L1.?-----| " + - "L1.?[38177,46512] 5ns 2mb |-----L1.?-----| " - "**** Simulation run 70, type=split(HighL0OverlapTotalBacklog)(split_times=[38176]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.47[28658,57214] 6ns |-----------------------------------------L0.47------------------------------------------|" @@ -992,130 +990,130 @@ async fn all_overlapping_l0_max_input_bytes_per_partition() { - "L0.?[38177,57214] 10ns 878kb |--------------------------L0.?---------------------------| " - "**** Simulation run 75, type=split(HighL0OverlapTotalBacklog)(split_times=[76252]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.204[46514,92926] 5ns |-----------------------------------------L1.204-----------------------------------------|" + - "L1.204[46513,92924] 5ns |-----------------------------------------L1.204-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[46514,76252] 5ns 6mb|-------------------------L1.?--------------------------| " - - "L1.?[76253,92926] 5ns 4mb |-------------L1.?-------------| " + - "L1.?[46513,76252] 5ns 6mb|-------------------------L1.?--------------------------| " + - "L1.?[76253,92924] 5ns 4mb |-------------L1.?-------------| " - "**** Simulation run 76, type=split(HighL0OverlapTotalBacklog)(split_times=[76252]). 1 Input Files, 833kb total:" - "L0, all files 833kb " - - "L0.102[67701,85771] 6ns |-----------------------------------------L0.102-----------------------------------------|" + - "L0.102[67700,85771] 6ns |-----------------------------------------L0.102-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 833kb total:" - "L0 " - - "L0.?[67701,76252] 6ns 394kb|------------------L0.?------------------| " + - "L0.?[67700,76252] 6ns 394kb|------------------L0.?------------------| " - "L0.?[76253,85771] 6ns 439kb |--------------------L0.?---------------------| " - "**** Simulation run 77, type=split(HighL0OverlapTotalBacklog)(split_times=[76252]). 1 Input Files, 833kb total:" - "L0, all files 833kb " - - "L0.106[67701,85771] 7ns |-----------------------------------------L0.106-----------------------------------------|" + - "L0.106[67700,85771] 7ns |-----------------------------------------L0.106-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 833kb total:" - "L0 " - - "L0.?[67701,76252] 7ns 394kb|------------------L0.?------------------| " + - "L0.?[67700,76252] 7ns 394kb|------------------L0.?------------------| " - "L0.?[76253,85771] 7ns 439kb |--------------------L0.?---------------------| " - "**** Simulation run 78, type=split(HighL0OverlapTotalBacklog)(split_times=[76252]). 1 Input Files, 833kb total:" - "L0, all files 833kb " - - "L0.110[67701,85771] 8ns |-----------------------------------------L0.110-----------------------------------------|" + - "L0.110[67700,85771] 8ns |-----------------------------------------L0.110-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 833kb total:" - "L0 " - - "L0.?[67701,76252] 8ns 394kb|------------------L0.?------------------| " + - "L0.?[67700,76252] 8ns 394kb|------------------L0.?------------------| " - "L0.?[76253,85771] 8ns 439kb |--------------------L0.?---------------------| " - "**** Simulation run 79, type=split(HighL0OverlapTotalBacklog)(split_times=[76252]). 1 Input Files, 833kb total:" - "L0, all files 833kb " - - "L0.114[67701,85771] 9ns |-----------------------------------------L0.114-----------------------------------------|" + - "L0.114[67700,85771] 9ns |-----------------------------------------L0.114-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 833kb total:" - "L0 " - - "L0.?[67701,76252] 9ns 394kb|------------------L0.?------------------| " + - "L0.?[67700,76252] 9ns 394kb|------------------L0.?------------------| " - "L0.?[76253,85771] 9ns 439kb |--------------------L0.?---------------------| " - "**** Simulation run 80, type=split(HighL0OverlapTotalBacklog)(split_times=[76252]). 1 Input Files, 833kb total:" - "L0, all files 833kb " - - "L0.118[67701,85771] 10ns |-----------------------------------------L0.118-----------------------------------------|" + - "L0.118[67700,85771] 10ns |-----------------------------------------L0.118-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 833kb total:" - "L0 " - - "L0.?[67701,76252] 10ns 394kb|------------------L0.?------------------| " + - "L0.?[67700,76252] 10ns 394kb|------------------L0.?------------------| " - "L0.?[76253,85771] 10ns 439kb |--------------------L0.?---------------------| " - "Committing partition 1:" - " Soft Deleting 12 files: L0.47, L0.54, L0.61, L0.68, L0.75, L0.102, L0.106, L0.110, L0.114, L0.118, L1.203, L1.204" - " Creating 24 files" - - "**** Simulation run 81, type=split(ReduceOverlap)(split_times=[46513]). 1 Input Files, 878kb total:" + - "**** Simulation run 81, type=split(ReduceOverlap)(split_times=[46512]). 1 Input Files, 878kb total:" - "L0, all files 878kb " - "L0.209[38177,57214] 6ns |-----------------------------------------L0.209-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 878kb total:" - "L0 " - - "L0.?[38177,46513] 6ns 384kb|----------------L0.?-----------------| " - - "L0.?[46514,57214] 6ns 493kb |----------------------L0.?----------------------| " - - "**** Simulation run 82, type=split(ReduceOverlap)(split_times=[92926]). 1 Input Files, 1mb total:" + - "L0.?[38177,46512] 6ns 384kb|----------------L0.?-----------------| " + - "L0.?[46513,57214] 6ns 493kb |----------------------L0.?----------------------| " + - "**** Simulation run 82, type=split(ReduceOverlap)(split_times=[92924]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.183[85773,109041] 6ns |----------------------------------------L0.183-----------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85773,92926] 6ns 330kb|----------L0.?-----------| " - - "L0.?[92927,109041] 6ns 743kb |----------------------------L0.?----------------------------| " - - "**** Simulation run 83, type=split(ReduceOverlap)(split_times=[46513]). 1 Input Files, 878kb total:" + - "L0.?[85773,92924] 6ns 330kb|----------L0.?-----------| " + - "L0.?[92925,109041] 6ns 743kb |----------------------------L0.?----------------------------| " + - "**** Simulation run 83, type=split(ReduceOverlap)(split_times=[46512]). 1 Input Files, 878kb total:" - "L0, all files 878kb " - "L0.211[38177,57214] 7ns |-----------------------------------------L0.211-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 878kb total:" - "L0 " - - "L0.?[38177,46513] 7ns 384kb|----------------L0.?-----------------| " - - "L0.?[46514,57214] 7ns 493kb |----------------------L0.?----------------------| " - - "**** Simulation run 84, type=split(ReduceOverlap)(split_times=[92926]). 1 Input Files, 1mb total:" + - "L0.?[38177,46512] 7ns 384kb|----------------L0.?-----------------| " + - "L0.?[46513,57214] 7ns 493kb |----------------------L0.?----------------------| " + - "**** Simulation run 84, type=split(ReduceOverlap)(split_times=[92924]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.187[85773,109041] 7ns |----------------------------------------L0.187-----------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85773,92926] 7ns 330kb|----------L0.?-----------| " - - "L0.?[92927,109041] 7ns 743kb |----------------------------L0.?----------------------------| " - - "**** Simulation run 85, type=split(ReduceOverlap)(split_times=[46513]). 1 Input Files, 878kb total:" + - "L0.?[85773,92924] 7ns 330kb|----------L0.?-----------| " + - "L0.?[92925,109041] 7ns 743kb |----------------------------L0.?----------------------------| " + - "**** Simulation run 85, type=split(ReduceOverlap)(split_times=[46512]). 1 Input Files, 878kb total:" - "L0, all files 878kb " - "L0.213[38177,57214] 8ns |-----------------------------------------L0.213-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 878kb total:" - "L0 " - - "L0.?[38177,46513] 8ns 384kb|----------------L0.?-----------------| " - - "L0.?[46514,57214] 8ns 493kb |----------------------L0.?----------------------| " - - "**** Simulation run 86, type=split(ReduceOverlap)(split_times=[92926]). 1 Input Files, 1mb total:" + - "L0.?[38177,46512] 8ns 384kb|----------------L0.?-----------------| " + - "L0.?[46513,57214] 8ns 493kb |----------------------L0.?----------------------| " + - "**** Simulation run 86, type=split(ReduceOverlap)(split_times=[92924]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.191[85773,109041] 8ns |----------------------------------------L0.191-----------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85773,92926] 8ns 330kb|----------L0.?-----------| " - - "L0.?[92927,109041] 8ns 743kb |----------------------------L0.?----------------------------| " - - "**** Simulation run 87, type=split(ReduceOverlap)(split_times=[46513]). 1 Input Files, 878kb total:" + - "L0.?[85773,92924] 8ns 330kb|----------L0.?-----------| " + - "L0.?[92925,109041] 8ns 743kb |----------------------------L0.?----------------------------| " + - "**** Simulation run 87, type=split(ReduceOverlap)(split_times=[46512]). 1 Input Files, 878kb total:" - "L0, all files 878kb " - "L0.215[38177,57214] 9ns |-----------------------------------------L0.215-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 878kb total:" - "L0 " - - "L0.?[38177,46513] 9ns 384kb|----------------L0.?-----------------| " - - "L0.?[46514,57214] 9ns 493kb |----------------------L0.?----------------------| " - - "**** Simulation run 88, type=split(ReduceOverlap)(split_times=[92926]). 1 Input Files, 1mb total:" + - "L0.?[38177,46512] 9ns 384kb|----------------L0.?-----------------| " + - "L0.?[46513,57214] 9ns 493kb |----------------------L0.?----------------------| " + - "**** Simulation run 88, type=split(ReduceOverlap)(split_times=[92924]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.195[85773,109041] 9ns |----------------------------------------L0.195-----------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85773,92926] 9ns 330kb|----------L0.?-----------| " - - "L0.?[92927,109041] 9ns 743kb |----------------------------L0.?----------------------------| " - - "**** Simulation run 89, type=split(ReduceOverlap)(split_times=[46513]). 1 Input Files, 878kb total:" + - "L0.?[85773,92924] 9ns 330kb|----------L0.?-----------| " + - "L0.?[92925,109041] 9ns 743kb |----------------------------L0.?----------------------------| " + - "**** Simulation run 89, type=split(ReduceOverlap)(split_times=[46512]). 1 Input Files, 878kb total:" - "L0, all files 878kb " - "L0.217[38177,57214] 10ns |-----------------------------------------L0.217-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 878kb total:" - "L0 " - - "L0.?[38177,46513] 10ns 384kb|----------------L0.?-----------------| " - - "L0.?[46514,57214] 10ns 493kb |----------------------L0.?----------------------| " - - "**** Simulation run 90, type=split(ReduceOverlap)(split_times=[92926]). 1 Input Files, 1mb total:" + - "L0.?[38177,46512] 10ns 384kb|----------------L0.?-----------------| " + - "L0.?[46513,57214] 10ns 493kb |----------------------L0.?----------------------| " + - "**** Simulation run 90, type=split(ReduceOverlap)(split_times=[92924]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.199[85773,109041] 10ns|----------------------------------------L0.199-----------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85773,92926] 10ns 330kb|----------L0.?-----------| " - - "L0.?[92927,109041] 10ns 743kb |----------------------------L0.?----------------------------| " + - "L0.?[85773,92924] 10ns 330kb|----------L0.?-----------| " + - "L0.?[92925,109041] 10ns 743kb |----------------------------L0.?----------------------------| " - "Committing partition 1:" - " Soft Deleting 10 files: L0.183, L0.187, L0.191, L0.195, L0.199, L0.209, L0.211, L0.213, L0.215, L0.217" - " Creating 20 files" - - "**** Simulation run 91, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[156350, 198370]). 11 Input Files, 20mb total:" + - "**** Simulation run 91, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[156351, 198372]). 11 Input Files, 20mb total:" - "L0 " - - "L0.142[114330,135300] 5ns 967kb|-------L0.142-------| " - - "L0.100[135301,142885] 5ns 350kb |L0.100| " - - "L0.153[142886,142886] 5ns 0b |L0.153| " + - "L0.142[114330,135298] 5ns 967kb|-------L0.142-------| " + - "L0.100[135299,142885] 5ns 350kb |L0.100| " + - "L0.153[142886,142886] 5ns 47b |L0.153| " - "L0.181[142887,160867] 5ns 829kb |-----L0.181-----| " - "L0.182[160868,171442] 5ns 488kb |-L0.182--| " - - "L0.167[171443,171443] 5ns 0b |L0.167| " + - "L0.167[171443,171443] 5ns 47b |L0.167| " - "L0.168[171444,200000] 5ns 1mb |----------L0.168-----------| " - "L1 " - "L1.139[114330,142886] 4ns 6mb|----------L1.139-----------| " @@ -1124,579 +1122,579 @@ async fn all_overlapping_l0_max_input_bytes_per_partition() { - "L1.166[171444,200000] 4ns 6mb |----------L1.166-----------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 20mb total:" - "L1 " - - "L1.?[114330,156350] 5ns 10mb|-------------------L1.?-------------------| " - - "L1.?[156351,198370] 5ns 10mb |-------------------L1.?-------------------| " - - "L1.?[198371,200000] 5ns 397kb |L1.?|" + - "L1.?[114330,156351] 5ns 10mb|-------------------L1.?-------------------| " + - "L1.?[156352,198372] 5ns 10mb |-------------------L1.?-------------------| " + - "L1.?[198373,200000] 5ns 397kb |L1.?|" - "Committing partition 1:" - " Soft Deleting 11 files: L0.100, L1.139, L1.140, L0.142, L0.153, L1.165, L1.166, L0.167, L0.168, L0.181, L0.182" - " Creating 3 files" - "**** Simulation run 92, type=split(HighL0OverlapTotalBacklog)(split_times=[142886]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.250[114330,156350] 5ns|-----------------------------------------L1.250-----------------------------------------|" + - "L1.250[114330,156351] 5ns|-----------------------------------------L1.250-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - "L1.?[114330,142886] 5ns 7mb|---------------------------L1.?----------------------------| " - - "L1.?[142887,156350] 5ns 3mb |-----------L1.?-----------| " + - "L1.?[142887,156351] 5ns 3mb |-----------L1.?-----------| " - "**** Simulation run 93, type=split(HighL0OverlapTotalBacklog)(split_times=[171442]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.251[156351,198370] 5ns|-----------------------------------------L1.251-----------------------------------------|" + - "L1.251[156352,198372] 5ns|-----------------------------------------L1.251-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[156351,171442] 5ns 4mb|-------------L1.?-------------| " - - "L1.?[171443,198370] 5ns 6mb |-------------------------L1.?--------------------------| " + - "L1.?[156352,171442] 5ns 4mb|-------------L1.?-------------| " + - "L1.?[171443,198372] 5ns 6mb |-------------------------L1.?--------------------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L1.250, L1.251" - " Creating 4 files" - - "**** Simulation run 94, type=split(ReduceOverlap)(split_times=[156350]). 1 Input Files, 829kb total:" + - "**** Simulation run 94, type=split(ReduceOverlap)(split_times=[156351]). 1 Input Files, 829kb total:" - "L0, all files 829kb " - "L0.185[142887,160867] 6ns|-----------------------------------------L0.185-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 829kb total:" - "L0 " - - "L0.?[142887,156350] 6ns 621kb|------------------------------L0.?-------------------------------| " - - "L0.?[156351,160867] 6ns 208kb |--------L0.?--------| " - - "**** Simulation run 95, type=split(ReduceOverlap)(split_times=[198370]). 1 Input Files, 1mb total:" + - "L0.?[142887,156351] 6ns 621kb|------------------------------L0.?-------------------------------| " + - "L0.?[156352,160867] 6ns 208kb |--------L0.?--------| " + - "**** Simulation run 95, type=split(ReduceOverlap)(split_times=[198372]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.170[171444,200000] 6ns|-----------------------------------------L0.170-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,198370] 6ns 1mb|---------------------------------------L0.?---------------------------------------| " - - "L0.?[198371,200000] 6ns 75kb |L0.?|" - - "**** Simulation run 96, type=split(ReduceOverlap)(split_times=[156350]). 1 Input Files, 829kb total:" + - "L0.?[171444,198372] 6ns 1mb|---------------------------------------L0.?---------------------------------------| " + - "L0.?[198373,200000] 6ns 75kb |L0.?|" + - "**** Simulation run 96, type=split(ReduceOverlap)(split_times=[156351]). 1 Input Files, 829kb total:" - "L0, all files 829kb " - "L0.189[142887,160867] 7ns|-----------------------------------------L0.189-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 829kb total:" - "L0 " - - "L0.?[142887,156350] 7ns 621kb|------------------------------L0.?-------------------------------| " - - "L0.?[156351,160867] 7ns 208kb |--------L0.?--------| " - - "**** Simulation run 97, type=split(ReduceOverlap)(split_times=[198370]). 1 Input Files, 1mb total:" + - "L0.?[142887,156351] 7ns 621kb|------------------------------L0.?-------------------------------| " + - "L0.?[156352,160867] 7ns 208kb |--------L0.?--------| " + - "**** Simulation run 97, type=split(ReduceOverlap)(split_times=[198372]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.172[171444,200000] 7ns|-----------------------------------------L0.172-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,198370] 7ns 1mb|---------------------------------------L0.?---------------------------------------| " - - "L0.?[198371,200000] 7ns 75kb |L0.?|" - - "**** Simulation run 98, type=split(ReduceOverlap)(split_times=[156350]). 1 Input Files, 829kb total:" + - "L0.?[171444,198372] 7ns 1mb|---------------------------------------L0.?---------------------------------------| " + - "L0.?[198373,200000] 7ns 75kb |L0.?|" + - "**** Simulation run 98, type=split(ReduceOverlap)(split_times=[156351]). 1 Input Files, 829kb total:" - "L0, all files 829kb " - "L0.193[142887,160867] 8ns|-----------------------------------------L0.193-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 829kb total:" - "L0 " - - "L0.?[142887,156350] 8ns 621kb|------------------------------L0.?-------------------------------| " - - "L0.?[156351,160867] 8ns 208kb |--------L0.?--------| " - - "**** Simulation run 99, type=split(ReduceOverlap)(split_times=[198370]). 1 Input Files, 1mb total:" + - "L0.?[142887,156351] 8ns 621kb|------------------------------L0.?-------------------------------| " + - "L0.?[156352,160867] 8ns 208kb |--------L0.?--------| " + - "**** Simulation run 99, type=split(ReduceOverlap)(split_times=[198372]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.174[171444,200000] 8ns|-----------------------------------------L0.174-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,198370] 8ns 1mb|---------------------------------------L0.?---------------------------------------| " - - "L0.?[198371,200000] 8ns 75kb |L0.?|" - - "**** Simulation run 100, type=split(ReduceOverlap)(split_times=[156350]). 1 Input Files, 829kb total:" + - "L0.?[171444,198372] 8ns 1mb|---------------------------------------L0.?---------------------------------------| " + - "L0.?[198373,200000] 8ns 75kb |L0.?|" + - "**** Simulation run 100, type=split(ReduceOverlap)(split_times=[156351]). 1 Input Files, 829kb total:" - "L0, all files 829kb " - "L0.197[142887,160867] 9ns|-----------------------------------------L0.197-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 829kb total:" - "L0 " - - "L0.?[142887,156350] 9ns 621kb|------------------------------L0.?-------------------------------| " - - "L0.?[156351,160867] 9ns 208kb |--------L0.?--------| " - - "**** Simulation run 101, type=split(ReduceOverlap)(split_times=[198370]). 1 Input Files, 1mb total:" + - "L0.?[142887,156351] 9ns 621kb|------------------------------L0.?-------------------------------| " + - "L0.?[156352,160867] 9ns 208kb |--------L0.?--------| " + - "**** Simulation run 101, type=split(ReduceOverlap)(split_times=[198372]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.176[171444,200000] 9ns|-----------------------------------------L0.176-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,198370] 9ns 1mb|---------------------------------------L0.?---------------------------------------| " - - "L0.?[198371,200000] 9ns 75kb |L0.?|" - - "**** Simulation run 102, type=split(ReduceOverlap)(split_times=[156350]). 1 Input Files, 829kb total:" + - "L0.?[171444,198372] 9ns 1mb|---------------------------------------L0.?---------------------------------------| " + - "L0.?[198373,200000] 9ns 75kb |L0.?|" + - "**** Simulation run 102, type=split(ReduceOverlap)(split_times=[156351]). 1 Input Files, 829kb total:" - "L0, all files 829kb " - "L0.201[142887,160867] 10ns|-----------------------------------------L0.201-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 829kb total:" - "L0 " - - "L0.?[142887,156350] 10ns 621kb|------------------------------L0.?-------------------------------| " - - "L0.?[156351,160867] 10ns 208kb |--------L0.?--------| " - - "**** Simulation run 103, type=split(ReduceOverlap)(split_times=[198370]). 1 Input Files, 1mb total:" + - "L0.?[142887,156351] 10ns 621kb|------------------------------L0.?-------------------------------| " + - "L0.?[156352,160867] 10ns 208kb |--------L0.?--------| " + - "**** Simulation run 103, type=split(ReduceOverlap)(split_times=[198372]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.178[171444,200000] 10ns|-----------------------------------------L0.178-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,198370] 10ns 1mb|---------------------------------------L0.?---------------------------------------| " - - "L0.?[198371,200000] 10ns 75kb |L0.?|" + - "L0.?[171444,198372] 10ns 1mb|---------------------------------------L0.?---------------------------------------| " + - "L0.?[198373,200000] 10ns 75kb |L0.?|" - "Committing partition 1:" - " Soft Deleting 10 files: L0.170, L0.172, L0.174, L0.176, L0.178, L0.185, L0.189, L0.193, L0.197, L0.201" - " Creating 20 files" - - "**** Simulation run 104, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[38491, 76882]). 17 Input Files, 30mb total:" + - "**** Simulation run 104, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[38490, 76880]). 17 Input Files, 30mb total:" - "L0 " - "L0.46[100,28657] 6ns 1mb |-------L0.46--------| " - "L0.208[28658,38176] 6ns 439kb |L0.208| " - - "L0.230[38177,46513] 6ns 384kb |L0.230| " - - "L0.231[46514,57214] 6ns 493kb |L0.231| " - - "L0.101[57215,67700] 6ns 483kb |L0.101| " - - "L0.220[67701,76252] 6ns 394kb |L0.220| " + - "L0.230[38177,46512] 6ns 384kb |L0.230| " + - "L0.231[46513,57214] 6ns 493kb |L0.231| " + - "L0.101[57215,67699] 6ns 483kb |L0.101| " + - "L0.220[67700,76252] 6ns 394kb |L0.220| " - "L0.221[76253,85771] 6ns 439kb |L0.221| " - - "L0.128[85772,85772] 6ns 0b |L0.128| " - - "L0.232[85773,92926] 6ns 330kb |L0.232| " - - "L0.233[92927,109041] 6ns 743kb |--L0.233--| " + - "L0.128[85772,85772] 6ns 47b |L0.128| " + - "L0.232[85773,92924] 6ns 330kb |L0.232| " + - "L0.233[92925,109041] 6ns 743kb |--L0.233--| " - "L0.184[109042,114328] 6ns 244kb |L0.184|" - - "L0.143[114329,114329] 6ns 0b |L0.143|" + - "L0.143[114329,114329] 6ns 47b |L0.143|" - "L1 " - "L1.206[100,38176] 5ns 8mb|----------L1.206-----------| " - - "L1.207[38177,46513] 5ns 2mb |L1.207| " - - "L1.218[46514,76252] 5ns 6mb |-------L1.218--------| " - - "L1.219[76253,92926] 5ns 4mb |--L1.219---| " - - "L1.205[92927,114329] 5ns 5mb |----L1.205----| " + - "L1.207[38177,46512] 5ns 2mb |L1.207| " + - "L1.218[46513,76252] 5ns 6mb |-------L1.218--------| " + - "L1.219[76253,92924] 5ns 4mb |--L1.219---| " + - "L1.205[92925,114329] 5ns 5mb |----L1.205----| " - "**** 3 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L1 " - - "L1.?[100,38491] 6ns 10mb |------------L1.?------------| " - - "L1.?[38492,76882] 6ns 10mb |------------L1.?------------| " - - "L1.?[76883,114329] 6ns 10mb |-----------L1.?------------| " + - "L1.?[100,38490] 6ns 10mb |------------L1.?------------| " + - "L1.?[38491,76880] 6ns 10mb |------------L1.?------------| " + - "L1.?[76881,114329] 6ns 10mb |-----------L1.?------------| " - "Committing partition 1:" - " Soft Deleting 17 files: L0.46, L0.101, L0.128, L0.143, L0.184, L1.205, L1.206, L1.207, L0.208, L1.218, L1.219, L0.220, L0.221, L0.230, L0.231, L0.232, L0.233" - " Creating 3 files" - "**** Simulation run 105, type=split(HighL0OverlapTotalBacklog)(split_times=[38176]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.277[100,38491] 6ns |-----------------------------------------L1.277-----------------------------------------|" + - "L1.277[100,38490] 6ns |-----------------------------------------L1.277-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - "L1.?[100,38176] 6ns 10mb |-----------------------------------------L1.?------------------------------------------| " - - "L1.?[38177,38491] 6ns 84kb |L1.?|" + - "L1.?[38177,38490] 6ns 84kb |L1.?|" - "**** Simulation run 106, type=split(HighL0OverlapTotalBacklog)(split_times=[76252]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.278[38492,76882] 6ns |-----------------------------------------L1.278-----------------------------------------|" + - "L1.278[38491,76880] 6ns |-----------------------------------------L1.278-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[38492,76252] 6ns 10mb|-----------------------------------------L1.?-----------------------------------------| " - - "L1.?[76253,76882] 6ns 168kb |L1.?|" + - "L1.?[38491,76252] 6ns 10mb|-----------------------------------------L1.?-----------------------------------------| " + - "L1.?[76253,76880] 6ns 168kb |L1.?|" - "Committing partition 1:" - " Soft Deleting 2 files: L1.277, L1.278" - " Creating 4 files" - - "**** Simulation run 107, type=split(ReduceOverlap)(split_times=[38491]). 1 Input Files, 384kb total:" + - "**** Simulation run 107, type=split(ReduceOverlap)(split_times=[38490]). 1 Input Files, 384kb total:" - "L0, all files 384kb " - - "L0.234[38177,46513] 7ns |-----------------------------------------L0.234-----------------------------------------|" + - "L0.234[38177,46512] 7ns |-----------------------------------------L0.234-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 384kb total:" - "L0 " - - "L0.?[38177,38491] 7ns 14kb|L0.?| " - - "L0.?[38492,46513] 7ns 370kb |----------------------------------------L0.?----------------------------------------| " - - "**** Simulation run 108, type=split(ReduceOverlap)(split_times=[76882]). 1 Input Files, 439kb total:" + - "L0.?[38177,38490] 7ns 14kb|L0.?| " + - "L0.?[38491,46512] 7ns 370kb |----------------------------------------L0.?----------------------------------------| " + - "**** Simulation run 108, type=split(ReduceOverlap)(split_times=[76880]). 1 Input Files, 439kb total:" - "L0, all files 439kb " - "L0.223[76253,85771] 7ns |-----------------------------------------L0.223-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 439kb total:" - "L0 " - - "L0.?[76253,76882] 7ns 29kb|L0.?| " - - "L0.?[76883,85771] 7ns 410kb |---------------------------------------L0.?---------------------------------------| " - - "**** Simulation run 109, type=split(ReduceOverlap)(split_times=[38491]). 1 Input Files, 384kb total:" + - "L0.?[76253,76880] 7ns 29kb|L0.?| " + - "L0.?[76881,85771] 7ns 410kb |---------------------------------------L0.?---------------------------------------| " + - "**** Simulation run 109, type=split(ReduceOverlap)(split_times=[38490]). 1 Input Files, 384kb total:" - "L0, all files 384kb " - - "L0.238[38177,46513] 8ns |-----------------------------------------L0.238-----------------------------------------|" + - "L0.238[38177,46512] 8ns |-----------------------------------------L0.238-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 384kb total:" - "L0 " - - "L0.?[38177,38491] 8ns 14kb|L0.?| " - - "L0.?[38492,46513] 8ns 370kb |----------------------------------------L0.?----------------------------------------| " - - "**** Simulation run 110, type=split(ReduceOverlap)(split_times=[76882]). 1 Input Files, 439kb total:" + - "L0.?[38177,38490] 8ns 14kb|L0.?| " + - "L0.?[38491,46512] 8ns 370kb |----------------------------------------L0.?----------------------------------------| " + - "**** Simulation run 110, type=split(ReduceOverlap)(split_times=[76880]). 1 Input Files, 439kb total:" - "L0, all files 439kb " - "L0.225[76253,85771] 8ns |-----------------------------------------L0.225-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 439kb total:" - "L0 " - - "L0.?[76253,76882] 8ns 29kb|L0.?| " - - "L0.?[76883,85771] 8ns 410kb |---------------------------------------L0.?---------------------------------------| " - - "**** Simulation run 111, type=split(ReduceOverlap)(split_times=[38491]). 1 Input Files, 384kb total:" + - "L0.?[76253,76880] 8ns 29kb|L0.?| " + - "L0.?[76881,85771] 8ns 410kb |---------------------------------------L0.?---------------------------------------| " + - "**** Simulation run 111, type=split(ReduceOverlap)(split_times=[38490]). 1 Input Files, 384kb total:" - "L0, all files 384kb " - - "L0.242[38177,46513] 9ns |-----------------------------------------L0.242-----------------------------------------|" + - "L0.242[38177,46512] 9ns |-----------------------------------------L0.242-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 384kb total:" - "L0 " - - "L0.?[38177,38491] 9ns 14kb|L0.?| " - - "L0.?[38492,46513] 9ns 370kb |----------------------------------------L0.?----------------------------------------| " - - "**** Simulation run 112, type=split(ReduceOverlap)(split_times=[76882]). 1 Input Files, 439kb total:" + - "L0.?[38177,38490] 9ns 14kb|L0.?| " + - "L0.?[38491,46512] 9ns 370kb |----------------------------------------L0.?----------------------------------------| " + - "**** Simulation run 112, type=split(ReduceOverlap)(split_times=[76880]). 1 Input Files, 439kb total:" - "L0, all files 439kb " - "L0.227[76253,85771] 9ns |-----------------------------------------L0.227-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 439kb total:" - "L0 " - - "L0.?[76253,76882] 9ns 29kb|L0.?| " - - "L0.?[76883,85771] 9ns 410kb |---------------------------------------L0.?---------------------------------------| " - - "**** Simulation run 113, type=split(ReduceOverlap)(split_times=[38491]). 1 Input Files, 384kb total:" + - "L0.?[76253,76880] 9ns 29kb|L0.?| " + - "L0.?[76881,85771] 9ns 410kb |---------------------------------------L0.?---------------------------------------| " + - "**** Simulation run 113, type=split(ReduceOverlap)(split_times=[38490]). 1 Input Files, 384kb total:" - "L0, all files 384kb " - - "L0.246[38177,46513] 10ns |-----------------------------------------L0.246-----------------------------------------|" + - "L0.246[38177,46512] 10ns |-----------------------------------------L0.246-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 384kb total:" - "L0 " - - "L0.?[38177,38491] 10ns 14kb|L0.?| " - - "L0.?[38492,46513] 10ns 370kb |----------------------------------------L0.?----------------------------------------| " - - "**** Simulation run 114, type=split(ReduceOverlap)(split_times=[76882]). 1 Input Files, 439kb total:" + - "L0.?[38177,38490] 10ns 14kb|L0.?| " + - "L0.?[38491,46512] 10ns 370kb |----------------------------------------L0.?----------------------------------------| " + - "**** Simulation run 114, type=split(ReduceOverlap)(split_times=[76880]). 1 Input Files, 439kb total:" - "L0, all files 439kb " - "L0.229[76253,85771] 10ns |-----------------------------------------L0.229-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 439kb total:" - "L0 " - - "L0.?[76253,76882] 10ns 29kb|L0.?| " - - "L0.?[76883,85771] 10ns 410kb |---------------------------------------L0.?---------------------------------------| " + - "L0.?[76253,76880] 10ns 29kb|L0.?| " + - "L0.?[76881,85771] 10ns 410kb |---------------------------------------L0.?---------------------------------------| " - "Committing partition 1:" - " Soft Deleting 8 files: L0.223, L0.225, L0.227, L0.229, L0.234, L0.238, L0.242, L0.246" - " Creating 16 files" - - "**** Simulation run 115, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[149665, 185000]). 14 Input Files, 24mb total:" + - "**** Simulation run 115, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[149666, 185002]). 14 Input Files, 24mb total:" - "L0 " - - "L0.144[114330,135300] 6ns 967kb|-------L0.144-------| " - - "L0.104[135301,142885] 6ns 350kb |L0.104| " - - "L0.155[142886,142886] 6ns 0b |L0.155| " - - "L0.257[142887,156350] 6ns 621kb |---L0.257---| " - - "L0.258[156351,160867] 6ns 208kb |L0.258| " + - "L0.144[114330,135298] 6ns 967kb|-------L0.144-------| " + - "L0.104[135299,142885] 6ns 350kb |L0.104| " + - "L0.155[142886,142886] 6ns 47b |L0.155| " + - "L0.257[142887,156351] 6ns 621kb |---L0.257---| " + - "L0.258[156352,160867] 6ns 208kb |L0.258| " - "L0.186[160868,171442] 6ns 488kb |-L0.186--| " - - "L0.169[171443,171443] 6ns 0b |L0.169| " - - "L0.259[171444,198370] 6ns 1mb |----------L0.259----------| " - - "L0.260[198371,200000] 6ns 75kb |L0.260|" + - "L0.169[171443,171443] 6ns 47b |L0.169| " + - "L0.259[171444,198372] 6ns 1mb |----------L0.259----------| " + - "L0.260[198373,200000] 6ns 75kb |L0.260|" - "L1 " - "L1.253[114330,142886] 5ns 7mb|----------L1.253-----------| " - - "L1.254[142887,156350] 5ns 3mb |---L1.254---| " - - "L1.255[156351,171442] 5ns 4mb |---L1.255----| " - - "L1.256[171443,198370] 5ns 6mb |----------L1.256----------| " - - "L1.252[198371,200000] 5ns 397kb |L1.252|" + - "L1.254[142887,156351] 5ns 3mb |---L1.254---| " + - "L1.255[156352,171442] 5ns 4mb |---L1.255----| " + - "L1.256[171443,198372] 5ns 6mb |----------L1.256----------| " + - "L1.252[198373,200000] 5ns 397kb |L1.252|" - "**** 3 Output Files (parquet_file_id not yet assigned), 24mb total:" - "L1 " - - "L1.?[114330,149665] 6ns 10mb|---------------L1.?----------------| " - - "L1.?[149666,185000] 6ns 10mb |---------------L1.?----------------| " - - "L1.?[185001,200000] 6ns 4mb |----L1.?-----| " + - "L1.?[114330,149666] 6ns 10mb|---------------L1.?----------------| " + - "L1.?[149667,185002] 6ns 10mb |---------------L1.?----------------| " + - "L1.?[185003,200000] 6ns 4mb |----L1.?-----| " - "Committing partition 1:" - " Soft Deleting 14 files: L0.104, L0.144, L0.155, L0.169, L0.186, L1.252, L1.253, L1.254, L1.255, L1.256, L0.257, L0.258, L0.259, L0.260" - " Creating 3 files" - "**** Simulation run 116, type=split(HighL0OverlapTotalBacklog)(split_times=[142886]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.300[114330,149665] 6ns|-----------------------------------------L1.300-----------------------------------------|" + - "L1.300[114330,149666] 6ns|-----------------------------------------L1.300-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - "L1.?[114330,142886] 6ns 8mb|---------------------------------L1.?---------------------------------| " - - "L1.?[142887,149665] 6ns 2mb |-----L1.?------| " + - "L1.?[142887,149666] 6ns 2mb |-----L1.?------| " - "**** Simulation run 117, type=split(HighL0OverlapTotalBacklog)(split_times=[171442]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.301[149666,185000] 6ns|-----------------------------------------L1.301-----------------------------------------|" + - "L1.301[149667,185002] 6ns|-----------------------------------------L1.301-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[149666,171442] 6ns 6mb|------------------------L1.?-------------------------| " - - "L1.?[171443,185000] 6ns 4mb |--------------L1.?--------------| " + - "L1.?[149667,171442] 6ns 6mb|------------------------L1.?-------------------------| " + - "L1.?[171443,185002] 6ns 4mb |--------------L1.?--------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L1.300, L1.301" - " Creating 4 files" - - "**** Simulation run 118, type=split(ReduceOverlap)(split_times=[149665]). 1 Input Files, 621kb total:" + - "**** Simulation run 118, type=split(ReduceOverlap)(split_times=[149666]). 1 Input Files, 621kb total:" - "L0, all files 621kb " - - "L0.261[142887,156350] 7ns|----------------------------------------L0.261-----------------------------------------| " + - "L0.261[142887,156351] 7ns|-----------------------------------------L0.261-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 621kb total:" - "L0 " - - "L0.?[142887,149665] 7ns 312kb|-------------------L0.?--------------------| " - - "L0.?[149666,156350] 7ns 308kb |-------------------L0.?-------------------| " - - "**** Simulation run 119, type=split(ReduceOverlap)(split_times=[185000]). 1 Input Files, 1mb total:" + - "L0.?[142887,149666] 7ns 313kb|-------------------L0.?--------------------| " + - "L0.?[149667,156351] 7ns 308kb |-------------------L0.?-------------------| " + - "**** Simulation run 119, type=split(ReduceOverlap)(split_times=[185002]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - - "L0.263[171444,198370] 7ns|----------------------------------------L0.263-----------------------------------------| " + - "L0.263[171444,198372] 7ns|-----------------------------------------L0.263-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,185000] 7ns 625kb|-------------------L0.?--------------------| " - - "L0.?[185001,198370] 7ns 617kb |-------------------L0.?-------------------| " - - "**** Simulation run 120, type=split(ReduceOverlap)(split_times=[149665]). 1 Input Files, 621kb total:" + - "L0.?[171444,185002] 7ns 625kb|-------------------L0.?--------------------| " + - "L0.?[185003,198372] 7ns 616kb |-------------------L0.?-------------------| " + - "**** Simulation run 120, type=split(ReduceOverlap)(split_times=[149666]). 1 Input Files, 621kb total:" - "L0, all files 621kb " - - "L0.265[142887,156350] 8ns|----------------------------------------L0.265-----------------------------------------| " + - "L0.265[142887,156351] 8ns|-----------------------------------------L0.265-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 621kb total:" - "L0 " - - "L0.?[142887,149665] 8ns 312kb|-------------------L0.?--------------------| " - - "L0.?[149666,156350] 8ns 308kb |-------------------L0.?-------------------| " - - "**** Simulation run 121, type=split(ReduceOverlap)(split_times=[185000]). 1 Input Files, 1mb total:" + - "L0.?[142887,149666] 8ns 313kb|-------------------L0.?--------------------| " + - "L0.?[149667,156351] 8ns 308kb |-------------------L0.?-------------------| " + - "**** Simulation run 121, type=split(ReduceOverlap)(split_times=[185002]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - - "L0.267[171444,198370] 8ns|----------------------------------------L0.267-----------------------------------------| " + - "L0.267[171444,198372] 8ns|-----------------------------------------L0.267-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,185000] 8ns 625kb|-------------------L0.?--------------------| " - - "L0.?[185001,198370] 8ns 617kb |-------------------L0.?-------------------| " - - "**** Simulation run 122, type=split(ReduceOverlap)(split_times=[149665]). 1 Input Files, 621kb total:" + - "L0.?[171444,185002] 8ns 625kb|-------------------L0.?--------------------| " + - "L0.?[185003,198372] 8ns 616kb |-------------------L0.?-------------------| " + - "**** Simulation run 122, type=split(ReduceOverlap)(split_times=[149666]). 1 Input Files, 621kb total:" - "L0, all files 621kb " - - "L0.269[142887,156350] 9ns|----------------------------------------L0.269-----------------------------------------| " + - "L0.269[142887,156351] 9ns|-----------------------------------------L0.269-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 621kb total:" - "L0 " - - "L0.?[142887,149665] 9ns 312kb|-------------------L0.?--------------------| " - - "L0.?[149666,156350] 9ns 308kb |-------------------L0.?-------------------| " - - "**** Simulation run 123, type=split(ReduceOverlap)(split_times=[185000]). 1 Input Files, 1mb total:" + - "L0.?[142887,149666] 9ns 313kb|-------------------L0.?--------------------| " + - "L0.?[149667,156351] 9ns 308kb |-------------------L0.?-------------------| " + - "**** Simulation run 123, type=split(ReduceOverlap)(split_times=[185002]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - - "L0.271[171444,198370] 9ns|----------------------------------------L0.271-----------------------------------------| " + - "L0.271[171444,198372] 9ns|-----------------------------------------L0.271-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,185000] 9ns 625kb|-------------------L0.?--------------------| " - - "L0.?[185001,198370] 9ns 617kb |-------------------L0.?-------------------| " - - "**** Simulation run 124, type=split(ReduceOverlap)(split_times=[149665]). 1 Input Files, 621kb total:" + - "L0.?[171444,185002] 9ns 625kb|-------------------L0.?--------------------| " + - "L0.?[185003,198372] 9ns 616kb |-------------------L0.?-------------------| " + - "**** Simulation run 124, type=split(ReduceOverlap)(split_times=[149666]). 1 Input Files, 621kb total:" - "L0, all files 621kb " - - "L0.273[142887,156350] 10ns|----------------------------------------L0.273-----------------------------------------| " + - "L0.273[142887,156351] 10ns|-----------------------------------------L0.273-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 621kb total:" - "L0 " - - "L0.?[142887,149665] 10ns 312kb|-------------------L0.?--------------------| " - - "L0.?[149666,156350] 10ns 308kb |-------------------L0.?-------------------| " - - "**** Simulation run 125, type=split(ReduceOverlap)(split_times=[185000]). 1 Input Files, 1mb total:" + - "L0.?[142887,149666] 10ns 313kb|-------------------L0.?--------------------| " + - "L0.?[149667,156351] 10ns 308kb |-------------------L0.?-------------------| " + - "**** Simulation run 125, type=split(ReduceOverlap)(split_times=[185002]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - - "L0.275[171444,198370] 10ns|----------------------------------------L0.275-----------------------------------------| " + - "L0.275[171444,198372] 10ns|-----------------------------------------L0.275-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,185000] 10ns 625kb|-------------------L0.?--------------------| " - - "L0.?[185001,198370] 10ns 617kb |-------------------L0.?-------------------| " + - "L0.?[171444,185002] 10ns 625kb|-------------------L0.?--------------------| " + - "L0.?[185003,198372] 10ns 616kb |-------------------L0.?-------------------| " - "Committing partition 1:" - " Soft Deleting 8 files: L0.261, L0.263, L0.265, L0.267, L0.269, L0.271, L0.273, L0.275" - " Creating 16 files" - - "**** Simulation run 126, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[32834, 65568]). 12 Input Files, 23mb total:" + - "**** Simulation run 126, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[32832, 65564]). 12 Input Files, 23mb total:" - "L0 " - "L0.53[100,28657] 7ns 1mb |-------------L0.53-------------| " - "L0.210[28658,38176] 7ns 439kb |-L0.210--| " - - "L0.284[38177,38491] 7ns 14kb |L0.284| " - - "L0.285[38492,46513] 7ns 370kb |L0.285-| " - - "L0.235[46514,57214] 7ns 493kb |--L0.235--| " - - "L0.105[57215,67700] 7ns 483kb |--L0.105--| " - - "L0.222[67701,76252] 7ns 394kb |-L0.222-| " - - "L0.286[76253,76882] 7ns 29kb |L0.286|" + - "L0.284[38177,38490] 7ns 14kb |L0.284| " + - "L0.285[38491,46512] 7ns 370kb |L0.285-| " + - "L0.235[46513,57214] 7ns 493kb |--L0.235--| " + - "L0.105[57215,67699] 7ns 483kb |--L0.105--| " + - "L0.222[67700,76252] 7ns 394kb |-L0.222-| " + - "L0.286[76253,76880] 7ns 29kb |L0.286|" - "L1 " - "L1.280[100,38176] 6ns 10mb|------------------L1.280------------------| " - - "L1.281[38177,38491] 6ns 84kb |L1.281| " - - "L1.282[38492,76252] 6ns 10mb |------------------L1.282------------------| " - - "L1.283[76253,76882] 6ns 168kb |L1.283|" + - "L1.281[38177,38490] 6ns 84kb |L1.281| " + - "L1.282[38491,76252] 6ns 10mb |------------------L1.282------------------| " + - "L1.283[76253,76880] 6ns 168kb |L1.283|" - "**** 3 Output Files (parquet_file_id not yet assigned), 23mb total:" - "L1 " - - "L1.?[100,32834] 7ns 10mb |----------------L1.?----------------| " - - "L1.?[32835,65568] 7ns 10mb |----------------L1.?----------------| " - - "L1.?[65569,76882] 7ns 3mb |---L1.?----| " + - "L1.?[100,32832] 7ns 10mb |----------------L1.?----------------| " + - "L1.?[32833,65564] 7ns 10mb |----------------L1.?----------------| " + - "L1.?[65565,76880] 7ns 3mb |---L1.?----| " - "Committing partition 1:" - " Soft Deleting 12 files: L0.53, L0.105, L0.210, L0.222, L0.235, L1.280, L1.281, L1.282, L1.283, L0.284, L0.285, L0.286" - " Creating 3 files" - - "**** Simulation run 127, type=split(HighL0OverlapTotalBacklog)(split_times=[25694]). 1 Input Files, 10mb total:" + - "**** Simulation run 127, type=split(HighL0OverlapTotalBacklog)(split_times=[25693]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.323[100,32834] 7ns |-----------------------------------------L1.323-----------------------------------------|" + - "L1.323[100,32832] 7ns |-----------------------------------------L1.323-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[100,25694] 7ns 8mb |--------------------------------L1.?--------------------------------| " - - "L1.?[25695,32834] 7ns 2mb |------L1.?-------| " - - "**** Simulation run 128, type=split(HighL0OverlapTotalBacklog)(split_times=[25694]). 1 Input Files, 1mb total:" + - "L1.?[100,25693] 7ns 8mb |--------------------------------L1.?--------------------------------| " + - "L1.?[25694,32832] 7ns 2mb |------L1.?-------| " + - "**** Simulation run 128, type=split(HighL0OverlapTotalBacklog)(split_times=[25693]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.60[100,28657] 8ns |-----------------------------------------L0.60------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[100,25694] 8ns 1mb |-------------------------------------L0.?-------------------------------------| " - - "L0.?[25695,28657] 8ns 137kb |-L0.?--| " - - "**** Simulation run 129, type=split(HighL0OverlapTotalBacklog)(split_times=[25694]). 1 Input Files, 1mb total:" + - "L0.?[100,25693] 8ns 1mb |-------------------------------------L0.?-------------------------------------| " + - "L0.?[25694,28657] 8ns 137kb |-L0.?--| " + - "**** Simulation run 129, type=split(HighL0OverlapTotalBacklog)(split_times=[25693]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.67[100,28657] 9ns |-----------------------------------------L0.67------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[100,25694] 9ns 1mb |-------------------------------------L0.?-------------------------------------| " - - "L0.?[25695,28657] 9ns 137kb |-L0.?--| " - - "**** Simulation run 130, type=split(HighL0OverlapTotalBacklog)(split_times=[25694]). 1 Input Files, 1mb total:" + - "L0.?[100,25693] 9ns 1mb |-------------------------------------L0.?-------------------------------------| " + - "L0.?[25694,28657] 9ns 137kb |-L0.?--| " + - "**** Simulation run 130, type=split(HighL0OverlapTotalBacklog)(split_times=[25693]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.74[100,28657] 10ns |-----------------------------------------L0.74------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[100,25694] 10ns 1mb |-------------------------------------L0.?-------------------------------------| " - - "L0.?[25695,28657] 10ns 137kb |-L0.?--| " - - "**** Simulation run 131, type=split(HighL0OverlapTotalBacklog)(split_times=[51288]). 1 Input Files, 10mb total:" + - "L0.?[100,25693] 10ns 1mb |-------------------------------------L0.?-------------------------------------| " + - "L0.?[25694,28657] 10ns 137kb |-L0.?--| " + - "**** Simulation run 131, type=split(HighL0OverlapTotalBacklog)(split_times=[51286]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.324[32835,65568] 7ns |-----------------------------------------L1.324-----------------------------------------|" + - "L1.324[32833,65564] 7ns |-----------------------------------------L1.324-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[32835,51288] 7ns 6mb|----------------------L1.?----------------------| " - - "L1.?[51289,65568] 7ns 4mb |----------------L1.?-----------------| " - - "**** Simulation run 132, type=split(HighL0OverlapTotalBacklog)(split_times=[51288]). 1 Input Files, 493kb total:" + - "L1.?[32833,51286] 7ns 6mb|----------------------L1.?----------------------| " + - "L1.?[51287,65564] 7ns 4mb |----------------L1.?-----------------| " + - "**** Simulation run 132, type=split(HighL0OverlapTotalBacklog)(split_times=[51286]). 1 Input Files, 493kb total:" - "L0, all files 493kb " - - "L0.239[46514,57214] 8ns |-----------------------------------------L0.239-----------------------------------------|" + - "L0.239[46513,57214] 8ns |-----------------------------------------L0.239-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 493kb total:" - "L0 " - - "L0.?[46514,51288] 8ns 220kb|-----------------L0.?-----------------| " - - "L0.?[51289,57214] 8ns 273kb |---------------------L0.?----------------------| " - - "**** Simulation run 133, type=split(HighL0OverlapTotalBacklog)(split_times=[51288]). 1 Input Files, 493kb total:" + - "L0.?[46513,51286] 8ns 220kb|-----------------L0.?-----------------| " + - "L0.?[51287,57214] 8ns 273kb |---------------------L0.?----------------------| " + - "**** Simulation run 133, type=split(HighL0OverlapTotalBacklog)(split_times=[51286]). 1 Input Files, 493kb total:" - "L0, all files 493kb " - - "L0.243[46514,57214] 9ns |-----------------------------------------L0.243-----------------------------------------|" + - "L0.243[46513,57214] 9ns |-----------------------------------------L0.243-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 493kb total:" - "L0 " - - "L0.?[46514,51288] 9ns 220kb|-----------------L0.?-----------------| " - - "L0.?[51289,57214] 9ns 273kb |---------------------L0.?----------------------| " - - "**** Simulation run 134, type=split(HighL0OverlapTotalBacklog)(split_times=[51288]). 1 Input Files, 493kb total:" + - "L0.?[46513,51286] 9ns 220kb|-----------------L0.?-----------------| " + - "L0.?[51287,57214] 9ns 273kb |---------------------L0.?----------------------| " + - "**** Simulation run 134, type=split(HighL0OverlapTotalBacklog)(split_times=[51286]). 1 Input Files, 493kb total:" - "L0, all files 493kb " - - "L0.247[46514,57214] 10ns |-----------------------------------------L0.247-----------------------------------------|" + - "L0.247[46513,57214] 10ns |-----------------------------------------L0.247-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 493kb total:" - "L0 " - - "L0.?[46514,51288] 10ns 220kb|-----------------L0.?-----------------| " - - "L0.?[51289,57214] 10ns 273kb |---------------------L0.?----------------------| " + - "L0.?[46513,51286] 10ns 220kb|-----------------L0.?-----------------| " + - "L0.?[51287,57214] 10ns 273kb |---------------------L0.?----------------------| " - "Committing partition 1:" - " Soft Deleting 8 files: L0.60, L0.67, L0.74, L0.239, L0.243, L0.247, L1.323, L1.324" - " Creating 16 files" - - "**** Simulation run 135, type=split(ReduceOverlap)(split_times=[32834]). 1 Input Files, 439kb total:" + - "**** Simulation run 135, type=split(ReduceOverlap)(split_times=[32832]). 1 Input Files, 439kb total:" - "L0, all files 439kb " - "L0.212[28658,38176] 8ns |-----------------------------------------L0.212-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 439kb total:" - "L0 " - - "L0.?[28658,32834] 8ns 193kb|----------------L0.?-----------------| " - - "L0.?[32835,38176] 8ns 246kb |----------------------L0.?----------------------| " - - "**** Simulation run 136, type=split(ReduceOverlap)(split_times=[65568]). 1 Input Files, 483kb total:" + - "L0.?[28658,32832] 8ns 192kb|----------------L0.?-----------------| " + - "L0.?[32833,38176] 8ns 246kb |----------------------L0.?----------------------| " + - "**** Simulation run 136, type=split(ReduceOverlap)(split_times=[65564]). 1 Input Files, 483kb total:" - "L0, all files 483kb " - - "L0.109[57215,67700] 8ns |-----------------------------------------L0.109-----------------------------------------|" + - "L0.109[57215,67699] 8ns |-----------------------------------------L0.109-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 483kb total:" - "L0 " - - "L0.?[57215,65568] 8ns 385kb|--------------------------------L0.?---------------------------------| " - - "L0.?[65569,67700] 8ns 98kb |------L0.?------| " - - "**** Simulation run 137, type=split(ReduceOverlap)(split_times=[32834]). 1 Input Files, 439kb total:" + - "L0.?[57215,65564] 8ns 385kb|--------------------------------L0.?---------------------------------| " + - "L0.?[65565,67699] 8ns 98kb |------L0.?------| " + - "**** Simulation run 137, type=split(ReduceOverlap)(split_times=[32832]). 1 Input Files, 439kb total:" - "L0, all files 439kb " - "L0.214[28658,38176] 9ns |-----------------------------------------L0.214-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 439kb total:" - "L0 " - - "L0.?[28658,32834] 9ns 193kb|----------------L0.?-----------------| " - - "L0.?[32835,38176] 9ns 246kb |----------------------L0.?----------------------| " - - "**** Simulation run 138, type=split(ReduceOverlap)(split_times=[65568]). 1 Input Files, 483kb total:" + - "L0.?[28658,32832] 9ns 192kb|----------------L0.?-----------------| " + - "L0.?[32833,38176] 9ns 246kb |----------------------L0.?----------------------| " + - "**** Simulation run 138, type=split(ReduceOverlap)(split_times=[65564]). 1 Input Files, 483kb total:" - "L0, all files 483kb " - - "L0.113[57215,67700] 9ns |-----------------------------------------L0.113-----------------------------------------|" + - "L0.113[57215,67699] 9ns |-----------------------------------------L0.113-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 483kb total:" - "L0 " - - "L0.?[57215,65568] 9ns 385kb|--------------------------------L0.?---------------------------------| " - - "L0.?[65569,67700] 9ns 98kb |------L0.?------| " - - "**** Simulation run 139, type=split(ReduceOverlap)(split_times=[32834]). 1 Input Files, 439kb total:" + - "L0.?[57215,65564] 9ns 385kb|--------------------------------L0.?---------------------------------| " + - "L0.?[65565,67699] 9ns 98kb |------L0.?------| " + - "**** Simulation run 139, type=split(ReduceOverlap)(split_times=[32832]). 1 Input Files, 439kb total:" - "L0, all files 439kb " - "L0.216[28658,38176] 10ns |-----------------------------------------L0.216-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 439kb total:" - "L0 " - - "L0.?[28658,32834] 10ns 193kb|----------------L0.?-----------------| " - - "L0.?[32835,38176] 10ns 246kb |----------------------L0.?----------------------| " - - "**** Simulation run 140, type=split(ReduceOverlap)(split_times=[65568]). 1 Input Files, 483kb total:" + - "L0.?[28658,32832] 10ns 192kb|----------------L0.?-----------------| " + - "L0.?[32833,38176] 10ns 246kb |----------------------L0.?----------------------| " + - "**** Simulation run 140, type=split(ReduceOverlap)(split_times=[65564]). 1 Input Files, 483kb total:" - "L0, all files 483kb " - - "L0.117[57215,67700] 10ns |-----------------------------------------L0.117-----------------------------------------|" + - "L0.117[57215,67699] 10ns |-----------------------------------------L0.117-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 483kb total:" - "L0 " - - "L0.?[57215,65568] 10ns 385kb|--------------------------------L0.?---------------------------------| " - - "L0.?[65569,67700] 10ns 98kb |------L0.?------| " + - "L0.?[57215,65564] 10ns 385kb|--------------------------------L0.?---------------------------------| " + - "L0.?[65565,67699] 10ns 98kb |------L0.?------| " - "Committing partition 1:" - " Soft Deleting 6 files: L0.109, L0.113, L0.117, L0.212, L0.214, L0.216" - " Creating 12 files" - - "**** Simulation run 141, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[108723, 140563]). 16 Input Files, 30mb total:" + - "**** Simulation run 141, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[108721, 140561]). 16 Input Files, 30mb total:" - "L0 " - - "L0.287[76883,85771] 7ns 410kb|L0.287| " - - "L0.130[85772,85772] 7ns 0b |L0.130| " - - "L0.236[85773,92926] 7ns 330kb |L0.236| " - - "L0.237[92927,109041] 7ns 743kb |---L0.237----| " + - "L0.287[76881,85771] 7ns 410kb|L0.287| " + - "L0.130[85772,85772] 7ns 47b |L0.130| " + - "L0.236[85773,92924] 7ns 330kb |L0.236| " + - "L0.237[92925,109041] 7ns 743kb |---L0.237----| " - "L0.188[109042,114328] 7ns 244kb |L0.188| " - - "L0.145[114329,114329] 7ns 0b |L0.145| " - - "L0.146[114330,135300] 7ns 967kb |-----L0.146------| " - - "L0.108[135301,142885] 7ns 350kb |L0.108| " - - "L0.157[142886,142886] 7ns 0b |L0.157| " - - "L0.307[142887,149665] 7ns 312kb |L0.307| " - - "L0.308[149666,156350] 7ns 308kb |L0.308| " - - "L0.262[156351,160867] 7ns 208kb |L0.262| " - - "L1 " - - "L1.279[76883,114329] 6ns 10mb|-------------L1.279--------------| " + - "L0.145[114329,114329] 7ns 47b |L0.145| " + - "L0.146[114330,135298] 7ns 967kb |-----L0.146------| " + - "L0.108[135299,142885] 7ns 350kb |L0.108| " + - "L0.157[142886,142886] 7ns 47b |L0.157| " + - "L0.307[142887,149666] 7ns 313kb |L0.307| " + - "L0.308[149667,156351] 7ns 308kb |L0.308| " + - "L0.262[156352,160867] 7ns 208kb |L0.262| " + - "L1 " + - "L1.279[76881,114329] 6ns 10mb|-------------L1.279--------------| " - "L1.303[114330,142886] 6ns 8mb |---------L1.303----------| " - - "L1.304[142887,149665] 6ns 2mb |L1.304| " - - "L1.305[149666,171442] 6ns 6mb |------L1.305------| " + - "L1.304[142887,149666] 6ns 2mb |L1.304| " + - "L1.305[149667,171442] 6ns 6mb |------L1.305------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L1 " - - "L1.?[76883,108723] 7ns 10mb|------------L1.?------------| " - - "L1.?[108724,140563] 7ns 10mb |------------L1.?------------| " - - "L1.?[140564,171442] 7ns 10mb |-----------L1.?------------| " + - "L1.?[76881,108721] 7ns 10mb|------------L1.?------------| " + - "L1.?[108722,140561] 7ns 10mb |------------L1.?------------| " + - "L1.?[140562,171442] 7ns 10mb |-----------L1.?------------| " - "Committing partition 1:" - " Soft Deleting 16 files: L0.108, L0.130, L0.145, L0.146, L0.157, L0.188, L0.236, L0.237, L0.262, L1.279, L0.287, L1.303, L1.304, L1.305, L0.307, L0.308" - " Creating 3 files" - - "**** Simulation run 142, type=split(HighL0OverlapTotalBacklog)(split_times=[108402]). 1 Input Files, 10mb total:" + - "**** Simulation run 142, type=split(HighL0OverlapTotalBacklog)(split_times=[108401]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.354[76883,108723] 7ns |-----------------------------------------L1.354-----------------------------------------|" + - "L1.354[76881,108721] 7ns |-----------------------------------------L1.354-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[76883,108402] 7ns 10mb|-----------------------------------------L1.?------------------------------------------| " - - "L1.?[108403,108723] 7ns 103kb |L1.?|" - - "**** Simulation run 143, type=split(HighL0OverlapTotalBacklog)(split_times=[108402]). 1 Input Files, 743kb total:" + - "L1.?[76881,108401] 7ns 10mb|-----------------------------------------L1.?------------------------------------------| " + - "L1.?[108402,108721] 7ns 103kb |L1.?|" + - "**** Simulation run 143, type=split(HighL0OverlapTotalBacklog)(split_times=[108401]). 1 Input Files, 743kb total:" - "L0, all files 743kb " - - "L0.241[92927,109041] 8ns |-----------------------------------------L0.241-----------------------------------------|" + - "L0.241[92925,109041] 8ns |-----------------------------------------L0.241-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 743kb total:" - "L0 " - - "L0.?[92927,108402] 8ns 714kb|----------------------------------------L0.?----------------------------------------| " - - "L0.?[108403,109041] 8ns 29kb |L0.?|" - - "**** Simulation run 144, type=split(HighL0OverlapTotalBacklog)(split_times=[108402]). 1 Input Files, 743kb total:" + - "L0.?[92925,108401] 8ns 714kb|----------------------------------------L0.?----------------------------------------| " + - "L0.?[108402,109041] 8ns 30kb |L0.?|" + - "**** Simulation run 144, type=split(HighL0OverlapTotalBacklog)(split_times=[108401]). 1 Input Files, 743kb total:" - "L0, all files 743kb " - - "L0.245[92927,109041] 9ns |-----------------------------------------L0.245-----------------------------------------|" + - "L0.245[92925,109041] 9ns |-----------------------------------------L0.245-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 743kb total:" - "L0 " - - "L0.?[92927,108402] 9ns 714kb|----------------------------------------L0.?----------------------------------------| " - - "L0.?[108403,109041] 9ns 29kb |L0.?|" - - "**** Simulation run 145, type=split(HighL0OverlapTotalBacklog)(split_times=[108402]). 1 Input Files, 743kb total:" + - "L0.?[92925,108401] 9ns 714kb|----------------------------------------L0.?----------------------------------------| " + - "L0.?[108402,109041] 9ns 30kb |L0.?|" + - "**** Simulation run 145, type=split(HighL0OverlapTotalBacklog)(split_times=[108401]). 1 Input Files, 743kb total:" - "L0, all files 743kb " - - "L0.249[92927,109041] 10ns|-----------------------------------------L0.249-----------------------------------------|" + - "L0.249[92925,109041] 10ns|-----------------------------------------L0.249-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 743kb total:" - "L0 " - - "L0.?[92927,108402] 10ns 714kb|----------------------------------------L0.?----------------------------------------| " - - "L0.?[108403,109041] 10ns 29kb |L0.?|" + - "L0.?[92925,108401] 10ns 714kb|----------------------------------------L0.?----------------------------------------| " + - "L0.?[108402,109041] 10ns 30kb |L0.?|" - "**** Simulation run 146, type=split(HighL0OverlapTotalBacklog)(split_times=[139921]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.355[108724,140563] 7ns|-----------------------------------------L1.355-----------------------------------------|" + - "L1.355[108722,140561] 7ns|-----------------------------------------L1.355-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[108724,139921] 7ns 10mb|-----------------------------------------L1.?-----------------------------------------| " - - "L1.?[139922,140563] 7ns 206kb |L1.?|" + - "L1.?[108722,139921] 7ns 10mb|-----------------------------------------L1.?-----------------------------------------| " + - "L1.?[139922,140561] 7ns 206kb |L1.?|" - "**** Simulation run 147, type=split(HighL0OverlapTotalBacklog)(split_times=[139921]). 1 Input Files, 350kb total:" - "L0, all files 350kb " - - "L0.112[135301,142885] 8ns|-----------------------------------------L0.112-----------------------------------------|" + - "L0.112[135299,142885] 8ns|-----------------------------------------L0.112-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 350kb total:" - "L0 " - - "L0.?[135301,139921] 8ns 213kb|------------------------L0.?------------------------| " + - "L0.?[135299,139921] 8ns 213kb|------------------------L0.?------------------------| " - "L0.?[139922,142885] 8ns 137kb |--------------L0.?---------------| " - "**** Simulation run 148, type=split(HighL0OverlapTotalBacklog)(split_times=[139921]). 1 Input Files, 350kb total:" - "L0, all files 350kb " - - "L0.116[135301,142885] 9ns|-----------------------------------------L0.116-----------------------------------------|" + - "L0.116[135299,142885] 9ns|-----------------------------------------L0.116-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 350kb total:" - "L0 " - - "L0.?[135301,139921] 9ns 213kb|------------------------L0.?------------------------| " + - "L0.?[135299,139921] 9ns 213kb|------------------------L0.?------------------------| " - "L0.?[139922,142885] 9ns 137kb |--------------L0.?---------------| " - "**** Simulation run 149, type=split(HighL0OverlapTotalBacklog)(split_times=[139921]). 1 Input Files, 350kb total:" - "L0, all files 350kb " - - "L0.120[135301,142885] 10ns|-----------------------------------------L0.120-----------------------------------------|" + - "L0.120[135299,142885] 10ns|-----------------------------------------L0.120-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 350kb total:" - "L0 " - - "L0.?[135301,139921] 10ns 213kb|------------------------L0.?------------------------| " + - "L0.?[135299,139921] 10ns 213kb|------------------------L0.?------------------------| " - "L0.?[139922,142885] 10ns 137kb |--------------L0.?---------------| " - "Committing partition 1:" - " Soft Deleting 8 files: L0.112, L0.116, L0.120, L0.241, L0.245, L0.249, L1.354, L1.355" - " Creating 16 files" - - "**** Simulation run 150, type=split(ReduceOverlap)(split_times=[108723]). 1 Input Files, 29kb total:" - - "L0, all files 29kb " - - "L0.360[108403,109041] 8ns|-----------------------------------------L0.360-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 29kb total:" - - "L0 " - - "L0.?[108403,108723] 8ns 15kb|-------------------L0.?--------------------| " - - "L0.?[108724,109041] 8ns 15kb |-------------------L0.?-------------------| " - - "**** Simulation run 151, type=split(ReduceOverlap)(split_times=[140563]). 1 Input Files, 137kb total:" + - "**** Simulation run 150, type=split(ReduceOverlap)(split_times=[108721]). 1 Input Files, 30kb total:" + - "L0, all files 30kb " + - "L0.360[108402,109041] 8ns|-----------------------------------------L0.360-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 30kb total:" + - "L0, all files 15kb " + - "L0.?[108402,108721] 8ns |-------------------L0.?-------------------| " + - "L0.?[108722,109041] 8ns |-------------------L0.?-------------------| " + - "**** Simulation run 151, type=split(ReduceOverlap)(split_times=[140561]). 1 Input Files, 137kb total:" - "L0, all files 137kb " - "L0.368[139922,142885] 8ns|-----------------------------------------L0.368-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 137kb total:" - "L0 " - - "L0.?[139922,140563] 8ns 30kb|------L0.?-------| " - - "L0.?[140564,142885] 8ns 107kb |--------------------------------L0.?--------------------------------| " - - "**** Simulation run 152, type=split(ReduceOverlap)(split_times=[108723]). 1 Input Files, 29kb total:" - - "L0, all files 29kb " - - "L0.362[108403,109041] 9ns|-----------------------------------------L0.362-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 29kb total:" - - "L0 " - - "L0.?[108403,108723] 9ns 15kb|-------------------L0.?--------------------| " - - "L0.?[108724,109041] 9ns 15kb |-------------------L0.?-------------------| " - - "**** Simulation run 153, type=split(ReduceOverlap)(split_times=[140563]). 1 Input Files, 137kb total:" + - "L0.?[139922,140561] 8ns 30kb|------L0.?-------| " + - "L0.?[140562,142885] 8ns 107kb |--------------------------------L0.?--------------------------------| " + - "**** Simulation run 152, type=split(ReduceOverlap)(split_times=[108721]). 1 Input Files, 30kb total:" + - "L0, all files 30kb " + - "L0.362[108402,109041] 9ns|-----------------------------------------L0.362-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 30kb total:" + - "L0, all files 15kb " + - "L0.?[108402,108721] 9ns |-------------------L0.?-------------------| " + - "L0.?[108722,109041] 9ns |-------------------L0.?-------------------| " + - "**** Simulation run 153, type=split(ReduceOverlap)(split_times=[140561]). 1 Input Files, 137kb total:" - "L0, all files 137kb " - "L0.370[139922,142885] 9ns|-----------------------------------------L0.370-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 137kb total:" - "L0 " - - "L0.?[139922,140563] 9ns 30kb|------L0.?-------| " - - "L0.?[140564,142885] 9ns 107kb |--------------------------------L0.?--------------------------------| " - - "**** Simulation run 154, type=split(ReduceOverlap)(split_times=[108723]). 1 Input Files, 29kb total:" - - "L0, all files 29kb " - - "L0.364[108403,109041] 10ns|-----------------------------------------L0.364-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 29kb total:" - - "L0 " - - "L0.?[108403,108723] 10ns 15kb|-------------------L0.?--------------------| " - - "L0.?[108724,109041] 10ns 15kb |-------------------L0.?-------------------| " - - "**** Simulation run 155, type=split(ReduceOverlap)(split_times=[140563]). 1 Input Files, 137kb total:" + - "L0.?[139922,140561] 9ns 30kb|------L0.?-------| " + - "L0.?[140562,142885] 9ns 107kb |--------------------------------L0.?--------------------------------| " + - "**** Simulation run 154, type=split(ReduceOverlap)(split_times=[108721]). 1 Input Files, 30kb total:" + - "L0, all files 30kb " + - "L0.364[108402,109041] 10ns|-----------------------------------------L0.364-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 30kb total:" + - "L0, all files 15kb " + - "L0.?[108402,108721] 10ns |-------------------L0.?-------------------| " + - "L0.?[108722,109041] 10ns |-------------------L0.?-------------------| " + - "**** Simulation run 155, type=split(ReduceOverlap)(split_times=[140561]). 1 Input Files, 137kb total:" - "L0, all files 137kb " - "L0.372[139922,142885] 10ns|-----------------------------------------L0.372-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 137kb total:" - "L0 " - - "L0.?[139922,140563] 10ns 30kb|------L0.?-------| " - - "L0.?[140564,142885] 10ns 107kb |--------------------------------L0.?--------------------------------| " + - "L0.?[139922,140561] 10ns 30kb|------L0.?-------| " + - "L0.?[140562,142885] 10ns 107kb |--------------------------------L0.?--------------------------------| " - "Committing partition 1:" - " Soft Deleting 6 files: L0.360, L0.362, L0.364, L0.368, L0.370, L0.372" - " Creating 12 files" - "**** Simulation run 156, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[170977]). 8 Input Files, 20mb total:" - "L0 " - "L0.190[160868,171442] 7ns 488kb |----L0.190----| " - - "L0.171[171443,171443] 7ns 0b |L0.171| " - - "L0.309[171444,185000] 7ns 625kb |------L0.309------| " - - "L0.310[185001,198370] 7ns 617kb |------L0.310------| " - - "L0.264[198371,200000] 7ns 75kb |L0.264|" - - "L1 " - - "L1.356[140564,171442] 7ns 10mb|-------------------L1.356-------------------| " - - "L1.306[171443,185000] 6ns 4mb |------L1.306------| " - - "L1.302[185001,200000] 6ns 4mb |-------L1.302-------| " + - "L0.171[171443,171443] 7ns 47b |L0.171| " + - "L0.309[171444,185002] 7ns 625kb |------L0.309------| " + - "L0.310[185003,198372] 7ns 616kb |------L0.310------| " + - "L0.264[198373,200000] 7ns 75kb |L0.264|" + - "L1 " + - "L1.356[140562,171442] 7ns 10mb|-------------------L1.356-------------------| " + - "L1.306[171443,185002] 6ns 4mb |------L1.306------| " + - "L1.302[185003,200000] 6ns 4mb |-------L1.302-------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 20mb total:" - "L1 " - - "L1.?[140564,170977] 7ns 10mb|--------------------L1.?--------------------| " + - "L1.?[140562,170977] 7ns 10mb|--------------------L1.?--------------------| " - "L1.?[170978,200000] 7ns 10mb |------------------L1.?-------------------| " - "Committing partition 1:" - " Soft Deleting 8 files: L0.171, L0.190, L0.264, L1.302, L1.306, L0.309, L0.310, L1.356" @@ -1725,160 +1723,160 @@ async fn all_overlapping_l0_max_input_bytes_per_partition() { - "Committing partition 1:" - " Soft Deleting 3 files: L0.194, L0.198, L0.202" - " Creating 6 files" - - "**** Simulation run 160, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[28630, 57160]). 17 Input Files, 27mb total:" - - "L0 " - - "L0.328[100,25694] 8ns 1mb|-----------L0.328-----------| " - - "L0.329[25695,28657] 8ns 137kb |L0.329| " - - "L0.342[28658,32834] 8ns 193kb |L0.342| " - - "L0.343[32835,38176] 8ns 246kb |L0.343| " - - "L0.288[38177,38491] 8ns 14kb |L0.288| " - - "L0.289[38492,46513] 8ns 370kb |L0.289-| " - - "L0.336[46514,51288] 8ns 220kb |L0.336| " - - "L0.337[51289,57214] 8ns 273kb |L0.337| " - - "L0.344[57215,65568] 8ns 385kb |L0.344-| " - - "L0.345[65569,67700] 8ns 98kb |L0.345| " - - "L0.224[67701,76252] 8ns 394kb |-L0.224-| " - - "L0.290[76253,76882] 8ns 29kb |L0.290|" - - "L1 " - - "L1.326[100,25694] 7ns 8mb|-----------L1.326-----------| " - - "L1.327[25695,32834] 7ns 2mb |L1.327| " - - "L1.334[32835,51288] 7ns 6mb |------L1.334-------| " - - "L1.335[51289,65568] 7ns 4mb |----L1.335----| " - - "L1.325[65569,76882] 7ns 3mb |--L1.325---| " + - "**** Simulation run 160, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[28628, 57156]). 17 Input Files, 27mb total:" + - "L0 " + - "L0.328[100,25693] 8ns 1mb|----------L0.328-----------| " + - "L0.329[25694,28657] 8ns 137kb |L0.329| " + - "L0.342[28658,32832] 8ns 192kb |L0.342| " + - "L0.343[32833,38176] 8ns 246kb |L0.343| " + - "L0.288[38177,38490] 8ns 14kb |L0.288| " + - "L0.289[38491,46512] 8ns 370kb |L0.289-| " + - "L0.336[46513,51286] 8ns 220kb |L0.336| " + - "L0.337[51287,57214] 8ns 273kb |L0.337| " + - "L0.344[57215,65564] 8ns 385kb |L0.344-| " + - "L0.345[65565,67699] 8ns 98kb |L0.345| " + - "L0.224[67700,76252] 8ns 394kb |-L0.224-| " + - "L0.290[76253,76880] 8ns 29kb |L0.290|" + - "L1 " + - "L1.326[100,25693] 7ns 8mb|----------L1.326-----------| " + - "L1.327[25694,32832] 7ns 2mb |L1.327| " + - "L1.334[32833,51286] 7ns 6mb |------L1.334-------| " + - "L1.335[51287,65564] 7ns 4mb |----L1.335----| " + - "L1.325[65565,76880] 7ns 3mb |--L1.325---| " - "**** 3 Output Files (parquet_file_id not yet assigned), 27mb total:" - "L1 " - - "L1.?[100,28630] 8ns 10mb |-------------L1.?--------------| " - - "L1.?[28631,57160] 8ns 10mb |-------------L1.?--------------| " - - "L1.?[57161,76882] 8ns 7mb |--------L1.?---------| " + - "L1.?[100,28628] 8ns 10mb |-------------L1.?--------------| " + - "L1.?[28629,57156] 8ns 10mb |-------------L1.?--------------| " + - "L1.?[57157,76880] 8ns 7mb |--------L1.?---------| " - "Committing partition 1:" - " Soft Deleting 17 files: L0.224, L0.288, L0.289, L0.290, L1.325, L1.326, L1.327, L0.328, L0.329, L1.334, L1.335, L0.336, L0.337, L0.342, L0.343, L0.344, L0.345" - " Creating 3 files" - - "**** Simulation run 161, type=split(HighL0OverlapTotalBacklog)(split_times=[25694]). 1 Input Files, 10mb total:" + - "**** Simulation run 161, type=split(HighL0OverlapTotalBacklog)(split_times=[25693]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.393[100,28630] 8ns |-----------------------------------------L1.393-----------------------------------------|" + - "L1.393[100,28628] 8ns |-----------------------------------------L1.393-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[100,25694] 8ns 9mb |-------------------------------------L1.?-------------------------------------| " - - "L1.?[25695,28630] 8ns 1mb |-L1.?--| " - - "**** Simulation run 162, type=split(HighL0OverlapTotalBacklog)(split_times=[51288]). 1 Input Files, 10mb total:" + - "L1.?[100,25693] 8ns 9mb |-------------------------------------L1.?-------------------------------------| " + - "L1.?[25694,28628] 8ns 1mb |-L1.?--| " + - "**** Simulation run 162, type=split(HighL0OverlapTotalBacklog)(split_times=[51286]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.394[28631,57160] 8ns |-----------------------------------------L1.394-----------------------------------------|" + - "L1.394[28629,57156] 8ns |-----------------------------------------L1.394-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[28631,51288] 8ns 8mb|--------------------------------L1.?---------------------------------| " - - "L1.?[51289,57160] 8ns 2mb |------L1.?------| " + - "L1.?[28629,51286] 8ns 8mb|--------------------------------L1.?---------------------------------| " + - "L1.?[51287,57156] 8ns 2mb |------L1.?------| " - "Committing partition 1:" - " Soft Deleting 2 files: L1.393, L1.394" - " Creating 4 files" - - "**** Simulation run 163, type=split(ReduceOverlap)(split_times=[28630]). 1 Input Files, 137kb total:" + - "**** Simulation run 163, type=split(ReduceOverlap)(split_times=[28628]). 1 Input Files, 137kb total:" - "L0, all files 137kb " - - "L0.331[25695,28657] 9ns |-----------------------------------------L0.331-----------------------------------------|" + - "L0.331[25694,28657] 9ns |-----------------------------------------L0.331-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 137kb total:" - "L0 " - - "L0.?[25695,28630] 9ns 135kb|-----------------------------------------L0.?------------------------------------------| " - - "L0.?[28631,28657] 9ns 1kb |L0.?|" - - "**** Simulation run 164, type=split(ReduceOverlap)(split_times=[57160]). 1 Input Files, 273kb total:" + - "L0.?[25694,28628] 9ns 135kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[28629,28657] 9ns 1kb |L0.?|" + - "**** Simulation run 164, type=split(ReduceOverlap)(split_times=[57156]). 1 Input Files, 273kb total:" - "L0, all files 273kb " - - "L0.339[51289,57214] 9ns |-----------------------------------------L0.339-----------------------------------------|" + - "L0.339[51287,57214] 9ns |-----------------------------------------L0.339-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 273kb total:" - "L0 " - - "L0.?[51289,57160] 9ns 271kb|-----------------------------------------L0.?------------------------------------------| " - - "L0.?[57161,57214] 9ns 2kb |L0.?|" - - "**** Simulation run 165, type=split(ReduceOverlap)(split_times=[28630]). 1 Input Files, 137kb total:" + - "L0.?[51287,57156] 9ns 271kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[57157,57214] 9ns 3kb |L0.?|" + - "**** Simulation run 165, type=split(ReduceOverlap)(split_times=[28628]). 1 Input Files, 137kb total:" - "L0, all files 137kb " - - "L0.333[25695,28657] 10ns |-----------------------------------------L0.333-----------------------------------------|" + - "L0.333[25694,28657] 10ns |-----------------------------------------L0.333-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 137kb total:" - "L0 " - - "L0.?[25695,28630] 10ns 135kb|-----------------------------------------L0.?------------------------------------------| " - - "L0.?[28631,28657] 10ns 1kb |L0.?|" - - "**** Simulation run 166, type=split(ReduceOverlap)(split_times=[57160]). 1 Input Files, 273kb total:" + - "L0.?[25694,28628] 10ns 135kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[28629,28657] 10ns 1kb |L0.?|" + - "**** Simulation run 166, type=split(ReduceOverlap)(split_times=[57156]). 1 Input Files, 273kb total:" - "L0, all files 273kb " - - "L0.341[51289,57214] 10ns |-----------------------------------------L0.341-----------------------------------------|" + - "L0.341[51287,57214] 10ns |-----------------------------------------L0.341-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 273kb total:" - "L0 " - - "L0.?[51289,57160] 10ns 271kb|-----------------------------------------L0.?------------------------------------------| " - - "L0.?[57161,57214] 10ns 2kb |L0.?|" + - "L0.?[51287,57156] 10ns 271kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[57157,57214] 10ns 3kb |L0.?|" - "Committing partition 1:" - " Soft Deleting 4 files: L0.331, L0.333, L0.339, L0.341" - " Creating 8 files" - - "**** Simulation run 167, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[104731, 132579]). 15 Input Files, 23mb total:" - - "L0 " - - "L0.291[76883,85771] 8ns 410kb|--L0.291--| " - - "L0.132[85772,85772] 8ns 0b |L0.132| " - - "L0.240[85773,92926] 8ns 330kb |-L0.240-| " - - "L0.359[92927,108402] 8ns 714kb |------L0.359-------| " - - "L0.373[108403,108723] 8ns 15kb |L0.373| " - - "L0.374[108724,109041] 8ns 15kb |L0.374| " + - "**** Simulation run 167, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[104729, 132577]). 15 Input Files, 23mb total:" + - "L0 " + - "L0.291[76881,85771] 8ns 410kb|--L0.291--| " + - "L0.132[85772,85772] 8ns 47b |L0.132| " + - "L0.240[85773,92924] 8ns 330kb |-L0.240-| " + - "L0.359[92925,108401] 8ns 714kb |------L0.359-------| " + - "L0.373[108402,108721] 8ns 15kb |L0.373| " + - "L0.374[108722,109041] 8ns 15kb |L0.374| " - "L0.192[109042,114328] 8ns 244kb |L0.192| " - - "L0.147[114329,114329] 8ns 0b |L0.147| " - - "L0.148[114330,135300] 8ns 967kb |----------L0.148-----------| " - - "L0.367[135301,139921] 8ns 213kb |L0.367|" - - "L0.375[139922,140563] 8ns 30kb |L0.375|" - - "L1 " - - "L1.357[76883,108402] 7ns 10mb|------------------L1.357------------------| " - - "L1.358[108403,108723] 7ns 103kb |L1.358| " - - "L1.365[108724,139921] 7ns 10mb |------------------L1.365------------------| " - - "L1.366[139922,140563] 7ns 206kb |L1.366|" + - "L0.147[114329,114329] 8ns 47b |L0.147| " + - "L0.148[114330,135298] 8ns 967kb |----------L0.148-----------| " + - "L0.367[135299,139921] 8ns 213kb |L0.367|" + - "L0.375[139922,140561] 8ns 30kb |L0.375|" + - "L1 " + - "L1.357[76881,108401] 7ns 10mb|------------------L1.357------------------| " + - "L1.358[108402,108721] 7ns 103kb |L1.358| " + - "L1.365[108722,139921] 7ns 10mb |------------------L1.365------------------| " + - "L1.366[139922,140561] 7ns 206kb |L1.366|" - "**** 3 Output Files (parquet_file_id not yet assigned), 23mb total:" - "L1 " - - "L1.?[76883,104731] 8ns 10mb|----------------L1.?-----------------| " - - "L1.?[104732,132579] 8ns 10mb |----------------L1.?-----------------| " - - "L1.?[132580,140563] 8ns 3mb |--L1.?---| " + - "L1.?[76881,104729] 8ns 10mb|----------------L1.?-----------------| " + - "L1.?[104730,132577] 8ns 10mb |----------------L1.?-----------------| " + - "L1.?[132578,140561] 8ns 3mb |--L1.?---| " - "Committing partition 1:" - " Soft Deleting 15 files: L0.132, L0.147, L0.148, L0.192, L0.240, L0.291, L1.357, L1.358, L0.359, L1.365, L1.366, L0.367, L0.373, L0.374, L0.375" - " Creating 3 files" - - "**** Simulation run 168, type=split(ReduceOverlap)(split_times=[104731]). 1 Input Files, 714kb total:" + - "**** Simulation run 168, type=split(ReduceOverlap)(split_times=[104729]). 1 Input Files, 714kb total:" - "L0, all files 714kb " - - "L0.361[92927,108402] 9ns |-----------------------------------------L0.361-----------------------------------------|" + - "L0.361[92925,108401] 9ns |-----------------------------------------L0.361-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 714kb total:" - "L0 " - - "L0.?[92927,104731] 9ns 544kb|-------------------------------L0.?-------------------------------| " - - "L0.?[104732,108402] 9ns 169kb |-------L0.?--------| " - - "**** Simulation run 169, type=split(ReduceOverlap)(split_times=[132579]). 1 Input Files, 967kb total:" + - "L0.?[92925,104729] 9ns 544kb|-------------------------------L0.?-------------------------------| " + - "L0.?[104730,108401] 9ns 169kb |-------L0.?--------| " + - "**** Simulation run 169, type=split(ReduceOverlap)(split_times=[132577]). 1 Input Files, 967kb total:" - "L0, all files 967kb " - - "L0.150[114330,135300] 9ns|-----------------------------------------L0.150-----------------------------------------|" + - "L0.150[114330,135298] 9ns|-----------------------------------------L0.150-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 967kb total:" - "L0 " - - "L0.?[114330,132579] 9ns 841kb|------------------------------------L0.?------------------------------------| " - - "L0.?[132580,135300] 9ns 125kb |--L0.?---| " - - "**** Simulation run 170, type=split(ReduceOverlap)(split_times=[104731]). 1 Input Files, 714kb total:" + - "L0.?[114330,132577] 9ns 841kb|------------------------------------L0.?------------------------------------| " + - "L0.?[132578,135298] 9ns 125kb |--L0.?---| " + - "**** Simulation run 170, type=split(ReduceOverlap)(split_times=[104729]). 1 Input Files, 714kb total:" - "L0, all files 714kb " - - "L0.363[92927,108402] 10ns|-----------------------------------------L0.363-----------------------------------------|" + - "L0.363[92925,108401] 10ns|-----------------------------------------L0.363-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 714kb total:" - "L0 " - - "L0.?[92927,104731] 10ns 544kb|-------------------------------L0.?-------------------------------| " - - "L0.?[104732,108402] 10ns 169kb |-------L0.?--------| " - - "**** Simulation run 171, type=split(ReduceOverlap)(split_times=[132579]). 1 Input Files, 967kb total:" + - "L0.?[92925,104729] 10ns 544kb|-------------------------------L0.?-------------------------------| " + - "L0.?[104730,108401] 10ns 169kb |-------L0.?--------| " + - "**** Simulation run 171, type=split(ReduceOverlap)(split_times=[132577]). 1 Input Files, 967kb total:" - "L0, all files 967kb " - - "L0.152[114330,135300] 10ns|-----------------------------------------L0.152-----------------------------------------|" + - "L0.152[114330,135298] 10ns|-----------------------------------------L0.152-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 967kb total:" - "L0 " - - "L0.?[114330,132579] 10ns 841kb|------------------------------------L0.?------------------------------------| " - - "L0.?[132580,135300] 10ns 125kb |--L0.?---| " + - "L0.?[114330,132577] 10ns 841kb|------------------------------------L0.?------------------------------------| " + - "L0.?[132578,135298] 10ns 125kb |--L0.?---| " - "Committing partition 1:" - " Soft Deleting 4 files: L0.150, L0.152, L0.361, L0.363" - " Creating 8 files" - - "**** Simulation run 172, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[167314, 194064]). 13 Input Files, 22mb total:" + - "**** Simulation run 172, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[167314, 194066]). 13 Input Files, 22mb total:" - "L0 " - - "L0.376[140564,142885] 8ns 107kb|L0.376| " - - "L0.159[142886,142886] 8ns 0b |L0.159| " - - "L0.311[142887,149665] 8ns 312kb |-L0.311-| " - - "L0.312[149666,156350] 8ns 308kb |-L0.312-| " - - "L0.266[156351,160867] 8ns 208kb |L0.266| " + - "L0.376[140562,142885] 8ns 107kb|L0.376| " + - "L0.159[142886,142886] 8ns 47b |L0.159| " + - "L0.311[142887,149666] 8ns 313kb |-L0.311-| " + - "L0.312[149667,156351] 8ns 308kb |-L0.312-| " + - "L0.266[156352,160867] 8ns 208kb |L0.266| " - "L0.387[160868,170977] 8ns 466kb |---L0.387----| " - "L0.388[170978,171442] 8ns 21kb |L0.388| " - - "L0.173[171443,171443] 8ns 0b |L0.173| " - - "L0.313[171444,185000] 8ns 625kb |------L0.313------| " - - "L0.314[185001,198370] 8ns 617kb |------L0.314------| " - - "L0.268[198371,200000] 8ns 75kb |L0.268|" + - "L0.173[171443,171443] 8ns 47b |L0.173| " + - "L0.313[171444,185002] 8ns 625kb |------L0.313------| " + - "L0.314[185003,198372] 8ns 616kb |------L0.314------| " + - "L0.268[198373,200000] 8ns 75kb |L0.268|" - "L1 " - - "L1.385[140564,170977] 7ns 10mb|-------------------L1.385-------------------| " + - "L1.385[140562,170977] 7ns 10mb|-------------------L1.385-------------------| " - "L1.386[170978,200000] 7ns 10mb |-----------------L1.386------------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 22mb total:" - "L1 " - - "L1.?[140564,167314] 8ns 10mb|-----------------L1.?-----------------| " - - "L1.?[167315,194064] 8ns 10mb |-----------------L1.?-----------------| " - - "L1.?[194065,200000] 8ns 2mb |-L1.?-| " + - "L1.?[140562,167314] 8ns 10mb|-----------------L1.?-----------------| " + - "L1.?[167315,194066] 8ns 10mb |-----------------L1.?-----------------| " + - "L1.?[194067,200000] 8ns 2mb |-L1.?-| " - "Committing partition 1:" - " Soft Deleting 13 files: L0.159, L0.173, L0.266, L0.268, L0.311, L0.312, L0.313, L0.314, L0.376, L1.385, L1.386, L0.387, L0.388" - " Creating 3 files" @@ -1889,13 +1887,13 @@ async fn all_overlapping_l0_max_input_bytes_per_partition() { - "L0 " - "L0.?[160868,167314] 9ns 297kb|-------------------------L0.?--------------------------| " - "L0.?[167315,170977] 9ns 169kb |-------------L0.?-------------| " - - "**** Simulation run 174, type=split(ReduceOverlap)(split_times=[194064]). 1 Input Files, 617kb total:" - - "L0, all files 617kb " - - "L0.318[185001,198370] 9ns|-----------------------------------------L0.318-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 617kb total:" + - "**** Simulation run 174, type=split(ReduceOverlap)(split_times=[194066]). 1 Input Files, 616kb total:" + - "L0, all files 616kb " + - "L0.318[185003,198372] 9ns|-----------------------------------------L0.318-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 616kb total:" - "L0 " - - "L0.?[185001,194064] 9ns 418kb|---------------------------L0.?----------------------------| " - - "L0.?[194065,198370] 9ns 199kb |-----------L0.?-----------| " + - "L0.?[185003,194066] 9ns 418kb|---------------------------L0.?----------------------------| " + - "L0.?[194067,198372] 9ns 199kb |-----------L0.?-----------| " - "**** Simulation run 175, type=split(ReduceOverlap)(split_times=[167314]). 1 Input Files, 466kb total:" - "L0, all files 466kb " - "L0.391[160868,170977] 10ns|-----------------------------------------L0.391-----------------------------------------|" @@ -1903,345 +1901,345 @@ async fn all_overlapping_l0_max_input_bytes_per_partition() { - "L0 " - "L0.?[160868,167314] 10ns 297kb|-------------------------L0.?--------------------------| " - "L0.?[167315,170977] 10ns 169kb |-------------L0.?-------------| " - - "**** Simulation run 176, type=split(ReduceOverlap)(split_times=[194064]). 1 Input Files, 617kb total:" - - "L0, all files 617kb " - - "L0.322[185001,198370] 10ns|-----------------------------------------L0.322-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 617kb total:" + - "**** Simulation run 176, type=split(ReduceOverlap)(split_times=[194066]). 1 Input Files, 616kb total:" + - "L0, all files 616kb " + - "L0.322[185003,198372] 10ns|-----------------------------------------L0.322-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 616kb total:" - "L0 " - - "L0.?[185001,194064] 10ns 418kb|---------------------------L0.?----------------------------| " - - "L0.?[194065,198370] 10ns 199kb |-----------L0.?-----------| " + - "L0.?[185003,194066] 10ns 418kb|---------------------------L0.?----------------------------| " + - "L0.?[194067,198372] 10ns 199kb |-----------L0.?-----------| " - "Committing partition 1:" - " Soft Deleting 4 files: L0.318, L0.322, L0.389, L0.391" - " Creating 8 files" - - "**** Simulation run 177, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[25731, 51362]). 17 Input Files, 30mb total:" - - "L0 " - - "L0.330[100,25694] 9ns 1mb|-----------L0.330-----------| " - - "L0.400[25695,28630] 9ns 135kb |L0.400| " - - "L0.401[28631,28657] 9ns 1kb |L0.401| " - - "L0.346[28658,32834] 9ns 193kb |L0.346| " - - "L0.347[32835,38176] 9ns 246kb |L0.347| " - - "L0.292[38177,38491] 9ns 14kb |L0.292| " - - "L0.293[38492,46513] 9ns 370kb |L0.293-| " - - "L0.338[46514,51288] 9ns 220kb |L0.338| " - - "L0.402[51289,57160] 9ns 271kb |L0.402| " - - "L0.403[57161,57214] 9ns 2kb |L0.403| " - - "L0.348[57215,65568] 9ns 385kb |L0.348-| " - - "L0.349[65569,67700] 9ns 98kb |L0.349| " - - "L1 " - - "L1.396[100,25694] 8ns 9mb|-----------L1.396-----------| " - - "L1.397[25695,28630] 8ns 1mb |L1.397| " - - "L1.398[28631,51288] 8ns 8mb |---------L1.398---------| " - - "L1.399[51289,57160] 8ns 2mb |L1.399| " - - "L1.395[57161,76882] 8ns 7mb |-------L1.395--------| " + - "**** Simulation run 177, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[25730, 51360]). 17 Input Files, 30mb total:" + - "L0 " + - "L0.330[100,25693] 9ns 1mb|----------L0.330-----------| " + - "L0.400[25694,28628] 9ns 135kb |L0.400| " + - "L0.401[28629,28657] 9ns 1kb |L0.401| " + - "L0.346[28658,32832] 9ns 192kb |L0.346| " + - "L0.347[32833,38176] 9ns 246kb |L0.347| " + - "L0.292[38177,38490] 9ns 14kb |L0.292| " + - "L0.293[38491,46512] 9ns 370kb |L0.293-| " + - "L0.338[46513,51286] 9ns 220kb |L0.338| " + - "L0.402[51287,57156] 9ns 271kb |L0.402| " + - "L0.403[57157,57214] 9ns 3kb |L0.403| " + - "L0.348[57215,65564] 9ns 385kb |L0.348-| " + - "L0.349[65565,67699] 9ns 98kb |L0.349| " + - "L1 " + - "L1.396[100,25693] 8ns 9mb|----------L1.396-----------| " + - "L1.397[25694,28628] 8ns 1mb |L1.397| " + - "L1.398[28629,51286] 8ns 8mb |---------L1.398---------| " + - "L1.399[51287,57156] 8ns 2mb |L1.399| " + - "L1.395[57157,76880] 8ns 7mb |-------L1.395--------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L1 " - - "L1.?[100,25731] 9ns 10mb |------------L1.?------------| " - - "L1.?[25732,51362] 9ns 10mb |------------L1.?------------| " - - "L1.?[51363,76882] 9ns 10mb |-----------L1.?------------| " + - "L1.?[100,25730] 9ns 10mb |------------L1.?------------| " + - "L1.?[25731,51360] 9ns 10mb |------------L1.?------------| " + - "L1.?[51361,76880] 9ns 10mb |-----------L1.?------------| " - "Committing partition 1:" - " Soft Deleting 17 files: L0.292, L0.293, L0.330, L0.338, L0.346, L0.347, L0.348, L0.349, L1.395, L1.396, L1.397, L1.398, L1.399, L0.400, L0.401, L0.402, L0.403" - " Creating 3 files" - - "**** Simulation run 178, type=split(HighL0OverlapTotalBacklog)(split_times=[25694]). 1 Input Files, 10mb total:" + - "**** Simulation run 178, type=split(HighL0OverlapTotalBacklog)(split_times=[25693]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.430[100,25731] 9ns |-----------------------------------------L1.430-----------------------------------------|" + - "L1.430[100,25730] 9ns |-----------------------------------------L1.430-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[100,25694] 9ns 10mb |-----------------------------------------L1.?------------------------------------------| " - - "L1.?[25695,25731] 9ns 15kb |L1.?|" - - "**** Simulation run 179, type=split(HighL0OverlapTotalBacklog)(split_times=[51288]). 1 Input Files, 10mb total:" + - "L1.?[100,25693] 9ns 10mb |-----------------------------------------L1.?------------------------------------------| " + - "L1.?[25694,25730] 9ns 15kb |L1.?|" + - "**** Simulation run 179, type=split(HighL0OverlapTotalBacklog)(split_times=[51286]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.431[25732,51362] 9ns |-----------------------------------------L1.431-----------------------------------------|" + - "L1.431[25731,51360] 9ns |-----------------------------------------L1.431-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[25732,51288] 9ns 10mb|-----------------------------------------L1.?------------------------------------------| " - - "L1.?[51289,51362] 9ns 30kb |L1.?|" + - "L1.?[25731,51286] 9ns 10mb|-----------------------------------------L1.?------------------------------------------| " + - "L1.?[51287,51360] 9ns 30kb |L1.?|" - "Committing partition 1:" - " Soft Deleting 2 files: L1.430, L1.431" - " Creating 4 files" - - "**** Simulation run 180, type=split(ReduceOverlap)(split_times=[25731]). 1 Input Files, 135kb total:" + - "**** Simulation run 180, type=split(ReduceOverlap)(split_times=[25730]). 1 Input Files, 135kb total:" - "L0, all files 135kb " - - "L0.404[25695,28630] 10ns |----------------------------------------L0.404-----------------------------------------| " + - "L0.404[25694,28628] 10ns |-----------------------------------------L0.404-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 135kb total:" - "L0 " - - "L0.?[25695,25731] 10ns 2kb|L0.?| " - - "L0.?[25732,28630] 10ns 134kb |-----------------------------------------L0.?-----------------------------------------| " - - "**** Simulation run 181, type=split(ReduceOverlap)(split_times=[51362]). 1 Input Files, 271kb total:" + - "L0.?[25694,25730] 10ns 2kb|L0.?| " + - "L0.?[25731,28628] 10ns 134kb |-----------------------------------------L0.?-----------------------------------------| " + - "**** Simulation run 181, type=split(ReduceOverlap)(split_times=[51360]). 1 Input Files, 271kb total:" - "L0, all files 271kb " - - "L0.406[51289,57160] 10ns |-----------------------------------------L0.406-----------------------------------------|" + - "L0.406[51287,57156] 10ns |-----------------------------------------L0.406-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 271kb total:" - "L0 " - - "L0.?[51289,51362] 10ns 3kb|L0.?| " - - "L0.?[51363,57160] 10ns 267kb |-----------------------------------------L0.?-----------------------------------------| " + - "L0.?[51287,51360] 10ns 3kb|L0.?| " + - "L0.?[51361,57156] 10ns 267kb |-----------------------------------------L0.?-----------------------------------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L0.404, L0.406" - " Creating 4 files" - - "**** Simulation run 182, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[76043, 100723]). 8 Input Files, 22mb total:" + - "**** Simulation run 182, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[76041, 100721]). 8 Input Files, 22mb total:" - "L0 " - - "L0.226[67701,76252] 9ns 394kb |---L0.226---| " - - "L0.294[76253,76882] 9ns 29kb |L0.294| " - - "L0.295[76883,85771] 9ns 410kb |---L0.295---| " - - "L0.134[85772,85772] 9ns 0b |L0.134| " - - "L0.244[85773,92926] 9ns 330kb |--L0.244--| " - - "L0.411[92927,104731] 9ns 544kb |-----L0.411------| " + - "L0.226[67700,76252] 9ns 394kb |---L0.226---| " + - "L0.294[76253,76880] 9ns 29kb |L0.294| " + - "L0.295[76881,85771] 9ns 410kb |---L0.295---| " + - "L0.134[85772,85772] 9ns 47b |L0.134| " + - "L0.244[85773,92924] 9ns 330kb |--L0.244--| " + - "L0.411[92925,104729] 9ns 544kb |-----L0.411------| " - "L1 " - - "L1.432[51363,76882] 9ns 10mb|-----------------L1.432------------------| " - - "L1.408[76883,104731] 8ns 10mb |-------------------L1.408-------------------| " + - "L1.432[51361,76880] 9ns 10mb|-----------------L1.432------------------| " + - "L1.408[76881,104729] 8ns 10mb |-------------------L1.408-------------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 22mb total:" - "L1 " - - "L1.?[51363,76043] 9ns 10mb|-----------------L1.?------------------| " - - "L1.?[76044,100723] 9ns 10mb |-----------------L1.?------------------| " - - "L1.?[100724,104731] 9ns 2mb |L1.?| " + - "L1.?[51361,76041] 9ns 10mb|-----------------L1.?------------------| " + - "L1.?[76042,100721] 9ns 10mb |-----------------L1.?------------------| " + - "L1.?[100722,104729] 9ns 2mb |L1.?| " - "Committing partition 1:" - " Soft Deleting 8 files: L0.134, L0.226, L0.244, L0.294, L0.295, L1.408, L0.411, L1.432" - " Creating 3 files" - - "**** Simulation run 183, type=split(ReduceOverlap)(split_times=[76043]). 1 Input Files, 394kb total:" + - "**** Simulation run 183, type=split(ReduceOverlap)(split_times=[76041]). 1 Input Files, 394kb total:" - "L0, all files 394kb " - - "L0.228[67701,76252] 10ns |-----------------------------------------L0.228-----------------------------------------|" + - "L0.228[67700,76252] 10ns |-----------------------------------------L0.228-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 394kb total:" - "L0 " - - "L0.?[67701,76043] 10ns 385kb|----------------------------------------L0.?-----------------------------------------| " - - "L0.?[76044,76252] 10ns 10kb |L0.?|" - - "**** Simulation run 184, type=split(ReduceOverlap)(split_times=[100723]). 1 Input Files, 544kb total:" + - "L0.?[67700,76041] 10ns 385kb|----------------------------------------L0.?-----------------------------------------| " + - "L0.?[76042,76252] 10ns 10kb |L0.?|" + - "**** Simulation run 184, type=split(ReduceOverlap)(split_times=[100721]). 1 Input Files, 544kb total:" - "L0, all files 544kb " - - "L0.415[92927,104731] 10ns|-----------------------------------------L0.415-----------------------------------------|" + - "L0.415[92925,104729] 10ns|-----------------------------------------L0.415-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 544kb total:" - "L0 " - - "L0.?[92927,100723] 10ns 359kb|--------------------------L0.?---------------------------| " - - "L0.?[100724,104731] 10ns 185kb |------------L0.?------------| " + - "L0.?[92925,100721] 10ns 359kb|--------------------------L0.?---------------------------| " + - "L0.?[100722,104729] 10ns 185kb |------------L0.?------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L0.228, L0.415" - " Creating 4 files" - - "**** Simulation run 185, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[129098, 153464]). 18 Input Files, 26mb total:" + - "**** Simulation run 185, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[129096, 153462]). 18 Input Files, 26mb total:" - "L0 " - - "L0.412[104732,108402] 9ns 169kb|L0.412| " - - "L0.377[108403,108723] 9ns 15kb |L0.377| " - - "L0.378[108724,109041] 9ns 15kb |L0.378| " + - "L0.412[104730,108401] 9ns 169kb|L0.412| " + - "L0.377[108402,108721] 9ns 15kb |L0.377| " + - "L0.378[108722,109041] 9ns 15kb |L0.378| " - "L0.196[109042,114328] 9ns 244kb |L0.196| " - - "L0.149[114329,114329] 9ns 0b |L0.149| " - - "L0.413[114330,132579] 9ns 841kb |---------L0.413---------| " - - "L0.414[132580,135300] 9ns 125kb |L0.414| " - - "L0.369[135301,139921] 9ns 213kb |L0.369| " - - "L0.379[139922,140563] 9ns 30kb |L0.379| " - - "L0.380[140564,142885] 9ns 107kb |L0.380| " - - "L0.161[142886,142886] 9ns 0b |L0.161| " - - "L0.315[142887,149665] 9ns 312kb |L0.315-| " - - "L0.316[149666,156350] 9ns 308kb |L0.316-| " - - "L0.270[156351,160867] 9ns 208kb |L0.270| " + - "L0.149[114329,114329] 9ns 47b |L0.149| " + - "L0.413[114330,132577] 9ns 841kb |---------L0.413---------| " + - "L0.414[132578,135298] 9ns 125kb |L0.414| " + - "L0.369[135299,139921] 9ns 213kb |L0.369| " + - "L0.379[139922,140561] 9ns 30kb |L0.379| " + - "L0.380[140562,142885] 9ns 107kb |L0.380| " + - "L0.161[142886,142886] 9ns 47b |L0.161| " + - "L0.315[142887,149666] 9ns 313kb |L0.315-| " + - "L0.316[149667,156351] 9ns 308kb |L0.316-| " + - "L0.270[156352,160867] 9ns 208kb |L0.270| " - "L0.422[160868,167314] 9ns 297kb |L0.422-| " - "L1 " - - "L1.409[104732,132579] 8ns 10mb|----------------L1.409----------------| " - - "L1.410[132580,140563] 8ns 3mb |-L1.410--| " - - "L1.419[140564,167314] 8ns 10mb |---------------L1.419---------------| " + - "L1.409[104730,132577] 8ns 10mb|----------------L1.409----------------| " + - "L1.410[132578,140561] 8ns 3mb |-L1.410--| " + - "L1.419[140562,167314] 8ns 10mb |---------------L1.419---------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 26mb total:" - "L1 " - - "L1.?[104732,129098] 9ns 10mb|--------------L1.?---------------| " - - "L1.?[129099,153464] 9ns 10mb |--------------L1.?---------------| " - - "L1.?[153465,167314] 9ns 6mb |------L1.?-------| " + - "L1.?[104730,129096] 9ns 10mb|--------------L1.?---------------| " + - "L1.?[129097,153462] 9ns 10mb |--------------L1.?---------------| " + - "L1.?[153463,167314] 9ns 6mb |------L1.?-------| " - "Committing partition 1:" - " Soft Deleting 18 files: L0.149, L0.161, L0.196, L0.270, L0.315, L0.316, L0.369, L0.377, L0.378, L0.379, L0.380, L1.409, L1.410, L0.412, L0.413, L0.414, L1.419, L0.422" - " Creating 3 files" - - "**** Simulation run 186, type=split(ReduceOverlap)(split_times=[129098]). 1 Input Files, 841kb total:" + - "**** Simulation run 186, type=split(ReduceOverlap)(split_times=[129096]). 1 Input Files, 841kb total:" - "L0, all files 841kb " - - "L0.417[114330,132579] 10ns|-----------------------------------------L0.417-----------------------------------------|" + - "L0.417[114330,132577] 10ns|-----------------------------------------L0.417-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 841kb total:" - "L0 " - - "L0.?[114330,129098] 10ns 681kb|---------------------------------L0.?---------------------------------| " - - "L0.?[129099,132579] 10ns 160kb |-----L0.?------| " - - "**** Simulation run 187, type=split(ReduceOverlap)(split_times=[153464]). 1 Input Files, 308kb total:" + - "L0.?[114330,129096] 10ns 681kb|---------------------------------L0.?---------------------------------| " + - "L0.?[129097,132577] 10ns 160kb |-----L0.?------| " + - "**** Simulation run 187, type=split(ReduceOverlap)(split_times=[153462]). 1 Input Files, 308kb total:" - "L0, all files 308kb " - - "L0.320[149666,156350] 10ns|-----------------------------------------L0.320-----------------------------------------|" + - "L0.320[149667,156351] 10ns|-----------------------------------------L0.320-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 308kb total:" - "L0 " - - "L0.?[149666,153464] 10ns 175kb|----------------------L0.?-----------------------| " - - "L0.?[153465,156350] 10ns 133kb |----------------L0.?----------------| " + - "L0.?[149667,153462] 10ns 175kb|----------------------L0.?-----------------------| " + - "L0.?[153463,156351] 10ns 133kb |----------------L0.?----------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L0.320, L0.417" - " Creating 4 files" - - "**** Simulation run 188, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[191189]). 9 Input Files, 14mb total:" + - "**** Simulation run 188, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[191191]). 9 Input Files, 14mb total:" - "L0 " - "L0.423[167315,170977] 9ns 169kb|-L0.423-| " - "L0.390[170978,171442] 9ns 21kb |L0.390| " - - "L0.175[171443,171443] 9ns 0b |L0.175| " - - "L0.317[171444,185000] 9ns 625kb |--------------L0.317---------------| " - - "L0.424[185001,194064] 9ns 418kb |--------L0.424--------| " - - "L0.425[194065,198370] 9ns 199kb |-L0.425--| " - - "L0.272[198371,200000] 9ns 75kb |L0.272|" - - "L1 " - - "L1.420[167315,194064] 8ns 10mb|--------------------------------L1.420---------------------------------| " - - "L1.421[194065,200000] 8ns 2mb |----L1.421----| " + - "L0.175[171443,171443] 9ns 47b |L0.175| " + - "L0.317[171444,185002] 9ns 625kb |--------------L0.317---------------| " + - "L0.424[185003,194066] 9ns 418kb |--------L0.424--------| " + - "L0.425[194067,198372] 9ns 199kb |-L0.425--| " + - "L0.272[198373,200000] 9ns 75kb |L0.272|" + - "L1 " + - "L1.420[167315,194066] 8ns 10mb|--------------------------------L1.420---------------------------------| " + - "L1.421[194067,200000] 8ns 2mb |----L1.421----| " - "**** 2 Output Files (parquet_file_id not yet assigned), 14mb total:" - "L1 " - - "L1.?[167315,191189] 9ns 10mb|-----------------------------L1.?------------------------------| " - - "L1.?[191190,200000] 9ns 4mb |---------L1.?---------| " + - "L1.?[167315,191191] 9ns 10mb|-----------------------------L1.?------------------------------| " + - "L1.?[191192,200000] 9ns 4mb |---------L1.?---------| " - "Committing partition 1:" - " Soft Deleting 9 files: L0.175, L0.272, L0.317, L0.390, L1.420, L1.421, L0.423, L0.424, L0.425" - " Creating 2 files" - - "**** Simulation run 189, type=split(ReduceOverlap)(split_times=[191189]). 1 Input Files, 418kb total:" + - "**** Simulation run 189, type=split(ReduceOverlap)(split_times=[191191]). 1 Input Files, 418kb total:" - "L0, all files 418kb " - - "L0.428[185001,194064] 10ns|-----------------------------------------L0.428-----------------------------------------|" + - "L0.428[185003,194066] 10ns|-----------------------------------------L0.428-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 418kb total:" - "L0 " - - "L0.?[185001,191189] 10ns 285kb|---------------------------L0.?----------------------------| " - - "L0.?[191190,194064] 10ns 133kb |-----------L0.?-----------| " + - "L0.?[185003,191191] 10ns 285kb|---------------------------L0.?----------------------------| " + - "L0.?[191192,194066] 10ns 133kb |-----------L0.?-----------| " - "Committing partition 1:" - " Soft Deleting 1 files: L0.428" - " Creating 2 files" - - "**** Simulation run 190, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[23080, 46060]). 14 Input Files, 22mb total:" - - "L0 " - - "L0.332[100,25694] 10ns 1mb|------------------L0.332------------------| " - - "L0.437[25695,25731] 10ns 2kb |L0.437| " - - "L0.438[25732,28630] 10ns 134kb |L0.438| " - - "L0.405[28631,28657] 10ns 1kb |L0.405| " - - "L0.350[28658,32834] 10ns 193kb |L0.350| " - - "L0.351[32835,38176] 10ns 246kb |L0.351-| " - - "L0.296[38177,38491] 10ns 14kb |L0.296| " - - "L0.297[38492,46513] 10ns 370kb |---L0.297---| " - - "L0.340[46514,51288] 10ns 220kb |L0.340| " - - "L0.439[51289,51362] 10ns 3kb |L0.439|" - - "L1 " - - "L1.433[100,25694] 9ns 10mb|------------------L1.433------------------| " - - "L1.434[25695,25731] 9ns 15kb |L1.434| " - - "L1.435[25732,51288] 9ns 10mb |------------------L1.435------------------| " - - "L1.436[51289,51362] 9ns 30kb |L1.436|" + - "**** Simulation run 190, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[23078, 46056]). 14 Input Files, 22mb total:" + - "L0 " + - "L0.332[100,25693] 10ns 1mb|------------------L0.332------------------| " + - "L0.437[25694,25730] 10ns 2kb |L0.437| " + - "L0.438[25731,28628] 10ns 134kb |L0.438| " + - "L0.405[28629,28657] 10ns 1kb |L0.405| " + - "L0.350[28658,32832] 10ns 192kb |L0.350| " + - "L0.351[32833,38176] 10ns 246kb |L0.351-| " + - "L0.296[38177,38490] 10ns 14kb |L0.296| " + - "L0.297[38491,46512] 10ns 370kb |---L0.297---| " + - "L0.340[46513,51286] 10ns 220kb |L0.340| " + - "L0.439[51287,51360] 10ns 3kb |L0.439|" + - "L1 " + - "L1.433[100,25693] 9ns 10mb|------------------L1.433------------------| " + - "L1.434[25694,25730] 9ns 15kb |L1.434| " + - "L1.435[25731,51286] 9ns 10mb |------------------L1.435------------------| " + - "L1.436[51287,51360] 9ns 30kb |L1.436|" - "**** 3 Output Files (parquet_file_id not yet assigned), 22mb total:" - "L1 " - - "L1.?[100,23080] 10ns 10mb|-----------------L1.?-----------------| " - - "L1.?[23081,46060] 10ns 10mb |-----------------L1.?-----------------| " - - "L1.?[46061,51362] 10ns 2mb |-L1.?--| " + - "L1.?[100,23078] 10ns 10mb|-----------------L1.?-----------------| " + - "L1.?[23079,46056] 10ns 10mb |-----------------L1.?-----------------| " + - "L1.?[46057,51360] 10ns 2mb |-L1.?--| " - "Committing partition 1:" - " Soft Deleting 14 files: L0.296, L0.297, L0.332, L0.340, L0.350, L0.351, L0.405, L1.433, L1.434, L1.435, L1.436, L0.437, L0.438, L0.439" - " Creating 3 files" - - "**** Simulation run 191, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[73575, 95787]). 15 Input Files, 24mb total:" - - "L0 " - - "L0.440[51363,57160] 10ns 267kb|L0.440-| " - - "L0.407[57161,57214] 10ns 2kb |L0.407| " - - "L0.352[57215,65568] 10ns 385kb |---L0.352---| " - - "L0.353[65569,67700] 10ns 98kb |L0.353| " - - "L0.444[67701,76043] 10ns 385kb |---L0.444---| " - - "L0.445[76044,76252] 10ns 10kb |L0.445| " - - "L0.298[76253,76882] 10ns 29kb |L0.298| " - - "L0.299[76883,85771] 10ns 410kb |---L0.299---| " - - "L0.136[85772,85772] 10ns 0b |L0.136| " - - "L0.248[85773,92926] 10ns 330kb |--L0.248--| " - - "L0.446[92927,100723] 10ns 359kb |--L0.446---| " - - "L0.447[100724,104731] 10ns 185kb |L0.447|" - - "L1 " - - "L1.441[51363,76043] 9ns 10mb|----------------L1.441-----------------| " - - "L1.442[76044,100723] 9ns 10mb |----------------L1.442-----------------| " - - "L1.443[100724,104731] 9ns 2mb |L1.443|" + - "**** Simulation run 191, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[73573, 95785]). 15 Input Files, 24mb total:" + - "L0 " + - "L0.440[51361,57156] 10ns 267kb|L0.440-| " + - "L0.407[57157,57214] 10ns 3kb |L0.407| " + - "L0.352[57215,65564] 10ns 385kb |---L0.352---| " + - "L0.353[65565,67699] 10ns 98kb |L0.353| " + - "L0.444[67700,76041] 10ns 385kb |---L0.444---| " + - "L0.445[76042,76252] 10ns 10kb |L0.445| " + - "L0.298[76253,76880] 10ns 29kb |L0.298| " + - "L0.299[76881,85771] 10ns 410kb |---L0.299---| " + - "L0.136[85772,85772] 10ns 47b |L0.136| " + - "L0.248[85773,92924] 10ns 330kb |--L0.248--| " + - "L0.446[92925,100721] 10ns 359kb |--L0.446---| " + - "L0.447[100722,104729] 10ns 185kb |L0.447|" + - "L1 " + - "L1.441[51361,76041] 9ns 10mb|----------------L1.441-----------------| " + - "L1.442[76042,100721] 9ns 10mb |----------------L1.442-----------------| " + - "L1.443[100722,104729] 9ns 2mb |L1.443|" - "**** 3 Output Files (parquet_file_id not yet assigned), 24mb total:" - "L1 " - - "L1.?[51363,73575] 10ns 10mb|---------------L1.?----------------| " - - "L1.?[73576,95787] 10ns 10mb |---------------L1.?----------------| " - - "L1.?[95788,104731] 10ns 4mb |----L1.?-----| " + - "L1.?[51361,73573] 10ns 10mb|---------------L1.?----------------| " + - "L1.?[73574,95785] 10ns 10mb |---------------L1.?----------------| " + - "L1.?[95786,104729] 10ns 4mb |----L1.?-----| " - "Committing partition 1:" - " Soft Deleting 15 files: L0.136, L0.248, L0.298, L0.299, L0.352, L0.353, L0.407, L0.440, L1.441, L1.442, L1.443, L0.444, L0.445, L0.446, L0.447" - " Creating 3 files" - - "**** Simulation run 192, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[126689, 148646]). 20 Input Files, 29mb total:" + - "**** Simulation run 192, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[126688, 148646]). 20 Input Files, 29mb total:" - "L0 " - - "L0.416[104732,108402] 10ns 169kb|L0.416| " - - "L0.381[108403,108723] 10ns 15kb |L0.381| " - - "L0.382[108724,109041] 10ns 15kb |L0.382| " + - "L0.416[104730,108401] 10ns 169kb|L0.416| " + - "L0.381[108402,108721] 10ns 15kb |L0.381| " + - "L0.382[108722,109041] 10ns 15kb |L0.382| " - "L0.200[109042,114328] 10ns 244kb |L0.200| " - - "L0.151[114329,114329] 10ns 0b |L0.151| " - - "L0.451[114330,129098] 10ns 681kb |------L0.451-------| " - - "L0.452[129099,132579] 10ns 160kb |L0.452| " - - "L0.418[132580,135300] 10ns 125kb |L0.418| " - - "L0.371[135301,139921] 10ns 213kb |L0.371| " - - "L0.383[139922,140563] 10ns 30kb |L0.383| " - - "L0.384[140564,142885] 10ns 107kb |L0.384| " - - "L0.163[142886,142886] 10ns 0b |L0.163| " - - "L0.319[142887,149665] 10ns 312kb |L0.319-| " - - "L0.453[149666,153464] 10ns 175kb |L0.453| " - - "L0.454[153465,156350] 10ns 133kb |L0.454| " - - "L0.274[156351,160867] 10ns 208kb |L0.274| " + - "L0.151[114329,114329] 10ns 47b |L0.151| " + - "L0.451[114330,129096] 10ns 681kb |------L0.451-------| " + - "L0.452[129097,132577] 10ns 160kb |L0.452| " + - "L0.418[132578,135298] 10ns 125kb |L0.418| " + - "L0.371[135299,139921] 10ns 213kb |L0.371| " + - "L0.383[139922,140561] 10ns 30kb |L0.383| " + - "L0.384[140562,142885] 10ns 107kb |L0.384| " + - "L0.163[142886,142886] 10ns 47b |L0.163| " + - "L0.319[142887,149666] 10ns 313kb |L0.319-| " + - "L0.453[149667,153462] 10ns 175kb |L0.453| " + - "L0.454[153463,156351] 10ns 133kb |L0.454| " + - "L0.274[156352,160867] 10ns 208kb |L0.274| " - "L0.426[160868,167314] 10ns 297kb |L0.426-| " - "L1 " - - "L1.448[104732,129098] 9ns 10mb|-------------L1.448--------------| " - - "L1.449[129099,153464] 9ns 10mb |-------------L1.449--------------| " - - "L1.450[153465,167314] 9ns 6mb |-----L1.450------| " + - "L1.448[104730,129096] 9ns 10mb|-------------L1.448--------------| " + - "L1.449[129097,153462] 9ns 10mb |-------------L1.449--------------| " + - "L1.450[153463,167314] 9ns 6mb |-----L1.450------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 29mb total:" - "L1 " - - "L1.?[104732,126689] 10ns 10mb|------------L1.?-------------| " - - "L1.?[126690,148646] 10ns 10mb |------------L1.?-------------| " + - "L1.?[104730,126688] 10ns 10mb|------------L1.?-------------| " + - "L1.?[126689,148646] 10ns 10mb |------------L1.?-------------| " - "L1.?[148647,167314] 10ns 9mb |----------L1.?----------| " - "Committing partition 1:" - " Soft Deleting 20 files: L0.151, L0.163, L0.200, L0.274, L0.319, L0.371, L0.381, L0.382, L0.383, L0.384, L0.416, L0.418, L0.426, L1.448, L1.449, L1.450, L0.451, L0.452, L0.453, L0.454" - " Creating 3 files" - - "**** Simulation run 193, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[188871]). 10 Input Files, 15mb total:" - - "L0 " - - "L0.276[198371,200000] 10ns 75kb |L0.276|" - - "L0.429[194065,198370] 10ns 199kb |-L0.429--| " - - "L0.458[191190,194064] 10ns 133kb |L0.458| " - - "L0.457[185001,191189] 10ns 285kb |----L0.457-----| " - - "L0.321[171444,185000] 10ns 625kb |--------------L0.321---------------| " - - "L0.177[171443,171443] 10ns 0b |L0.177| " + - "**** Simulation run 193, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[188874]). 10 Input Files, 15mb total:" + - "L0 " + - "L0.276[198373,200000] 10ns 75kb |L0.276|" + - "L0.429[194067,198372] 10ns 199kb |-L0.429--| " + - "L0.458[191192,194066] 10ns 133kb |L0.458| " + - "L0.457[185003,191191] 10ns 285kb |----L0.457-----| " + - "L0.321[171444,185002] 10ns 625kb |--------------L0.321---------------| " + - "L0.177[171443,171443] 10ns 47b |L0.177| " - "L0.392[170978,171442] 10ns 21kb |L0.392| " - "L0.427[167315,170977] 10ns 169kb|-L0.427-| " - "L1 " - - "L1.456[191190,200000] 9ns 4mb |--------L1.456--------| " - - "L1.455[167315,191189] 9ns 10mb|----------------------------L1.455-----------------------------| " + - "L1.456[191192,200000] 9ns 4mb |--------L1.456--------| " + - "L1.455[167315,191191] 9ns 10mb|----------------------------L1.455-----------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 15mb total:" - "L1 " - - "L1.?[167315,188871] 10ns 10mb|--------------------------L1.?---------------------------| " - - "L1.?[188872,200000] 10ns 5mb |------------L1.?------------| " + - "L1.?[167315,188874] 10ns 10mb|--------------------------L1.?---------------------------| " + - "L1.?[188875,200000] 10ns 5mb |------------L1.?------------| " - "Committing partition 1:" - " Soft Deleting 10 files: L0.177, L0.276, L0.321, L0.392, L0.427, L0.429, L1.455, L1.456, L0.457, L0.458" - " Creating 2 files" - - "**** Simulation run 194, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[45717, 68353]). 3 Input Files, 22mb total:" + - "**** Simulation run 194, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[45714, 68349]). 3 Input Files, 22mb total:" - "L1 " - - "L1.460[23081,46060] 10ns 10mb|----------------L1.460----------------| " - - "L1.461[46061,51362] 10ns 2mb |L1.461-| " - - "L1.462[51363,73575] 10ns 10mb |---------------L1.462----------------| " + - "L1.460[23079,46056] 10ns 10mb|----------------L1.460----------------| " + - "L1.461[46057,51360] 10ns 2mb |L1.461-| " + - "L1.462[51361,73573] 10ns 10mb |---------------L1.462----------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 22mb total:" - "L2 " - - "L2.?[23081,45717] 10ns 10mb|-----------------L2.?-----------------| " - - "L2.?[45718,68353] 10ns 10mb |-----------------L2.?-----------------| " - - "L2.?[68354,73575] 10ns 2mb |-L2.?--| " + - "L2.?[23079,45714] 10ns 10mb|-----------------L2.?-----------------| " + - "L2.?[45715,68349] 10ns 10mb |-----------------L2.?-----------------| " + - "L2.?[68350,73573] 10ns 2mb |-L2.?--| " - "Committing partition 1:" - " Soft Deleting 3 files: L1.460, L1.461, L1.462" - " Upgrading 1 files level to CompactionLevel::L2: L1.459" - " Creating 3 files" - - "**** Simulation run 195, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[95682, 117788]). 3 Input Files, 24mb total:" + - "**** Simulation run 195, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[117786, 139786]). 3 Input Files, 24mb total:" - "L1 " - - "L1.463[73576,95787] 10ns 10mb|--------------L1.463---------------| " - - "L1.464[95788,104731] 10ns 4mb |---L1.464----| " - - "L1.465[104732,126689] 10ns 10mb |--------------L1.465---------------| " + - "L1.464[95786,104729] 10ns 4mb|---L1.464----| " + - "L1.465[104730,126688] 10ns 10mb |--------------L1.465---------------| " + - "L1.466[126689,148646] 10ns 10mb |--------------L1.466---------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 24mb total:" - "L2 " - - "L2.?[73576,95682] 10ns 10mb|---------------L2.?----------------| " - - "L2.?[95683,117788] 10ns 10mb |---------------L2.?----------------| " - - "L2.?[117789,126689] 10ns 4mb |----L2.?-----| " + - "L2.?[95786,117786] 10ns 10mb|---------------L2.?----------------| " + - "L2.?[117787,139786] 10ns 10mb |---------------L2.?----------------| " + - "L2.?[139787,148646] 10ns 4mb |----L2.?-----| " - "Committing partition 1:" - - " Soft Deleting 3 files: L1.463, L1.464, L1.465" + - " Soft Deleting 3 files: L1.464, L1.465, L1.466" + - " Upgrading 1 files level to CompactionLevel::L2: L1.463" - " Creating 3 files" - - "**** Simulation run 196, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[148507, 170324]). 3 Input Files, 29mb total:" + - "**** Simulation run 196, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[170349, 192051]). 3 Input Files, 24mb total:" - "L1 " - - "L1.466[126690,148646] 10ns 10mb|-----------L1.466------------| " - - "L1.467[148647,167314] 10ns 9mb |---------L1.467----------| " - - "L1.468[167315,188871] 10ns 10mb |-----------L1.468------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 29mb total:" + - "L1.469[188875,200000] 10ns 5mb |-----L1.469------| " + - "L1.467[148647,167314] 10ns 9mb|------------L1.467------------| " + - "L1.468[167315,188874] 10ns 10mb |--------------L1.468---------------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 24mb total:" - "L2 " - - "L2.?[126690,148507] 10ns 10mb|------------L2.?-------------| " - - "L2.?[148508,170324] 10ns 10mb |------------L2.?-------------| " - - "L2.?[170325,188871] 10ns 9mb |----------L2.?----------| " + - "L2.?[148647,170349] 10ns 10mb|----------------L2.?----------------| " + - "L2.?[170350,192051] 10ns 10mb |----------------L2.?----------------| " + - "L2.?[192052,200000] 10ns 4mb |---L2.?----| " - "Committing partition 1:" - - " Soft Deleting 3 files: L1.466, L1.467, L1.468" + - " Soft Deleting 3 files: L1.467, L1.468, L1.469" - " Creating 3 files" - - "**** Final Output Files (989mb written)" - - "L1 " - - "L1.469[188872,200000] 10ns 5mb |L1.469|" + - "**** Final Output Files (984mb written)" - "L2 " - - "L2.459[100,23080] 10ns 10mb|-L2.459-| " - - "L2.470[23081,45717] 10ns 10mb |-L2.470-| " - - "L2.471[45718,68353] 10ns 10mb |-L2.471-| " - - "L2.472[68354,73575] 10ns 2mb |L2.472| " - - "L2.473[73576,95682] 10ns 10mb |L2.473-| " - - "L2.474[95683,117788] 10ns 10mb |L2.474-| " - - "L2.475[117789,126689] 10ns 4mb |L2.475| " - - "L2.476[126690,148507] 10ns 10mb |L2.476-| " - - "L2.477[148508,170324] 10ns 10mb |L2.477-| " - - "L2.478[170325,188871] 10ns 9mb |L2.478| " + - "L2.459[100,23078] 10ns 10mb|-L2.459-| " + - "L2.463[73574,95785] 10ns 10mb |L2.463-| " + - "L2.470[23079,45714] 10ns 10mb |-L2.470-| " + - "L2.471[45715,68349] 10ns 10mb |-L2.471-| " + - "L2.472[68350,73573] 10ns 2mb |L2.472| " + - "L2.473[95786,117786] 10ns 10mb |L2.473-| " + - "L2.474[117787,139786] 10ns 10mb |L2.474-| " + - "L2.475[139787,148646] 10ns 4mb |L2.475| " + - "L2.476[148647,170349] 10ns 10mb |L2.476-| " + - "L2.477[170350,192051] 10ns 10mb |L2.477-| " + - "L2.478[192052,200000] 10ns 4mb |L2.478|" "### ); } @@ -2400,7 +2398,7 @@ async fn all_overlapping_l0_max_input_bytes_per_partition_small_max_desired_file - "Committing partition 1:" - " Soft Deleting 10 files: L0.1, L0.2, L0.3, L0.4, L0.5, L0.6, L0.7, L0.8, L0.9, L0.10" - " Creating 70 files" - - "**** Simulation run 10, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[67700, 135300]). 23 Input Files, 30mb total:" + - "**** Simulation run 10, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[67699, 135298]). 23 Input Files, 30mb total:" - "L0 " - "L0.11[100,28657] 1ns 1mb |--L0.11---| " - "L0.12[28658,57214] 1ns 1mb |--L0.12---| " @@ -2427,155 +2425,155 @@ async fn all_overlapping_l0_max_input_bytes_per_partition_small_max_desired_file - "L0.33[28658,57214] 4ns 1mb |--L0.33---| " - "**** 3 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L1 " - - "L1.?[100,67700] 4ns 10mb |------------L1.?------------| " - - "L1.?[67701,135300] 4ns 10mb |------------L1.?------------| " - - "L1.?[135301,200000] 4ns 10mb |-----------L1.?------------| " + - "L1.?[100,67699] 4ns 10mb |------------L1.?------------| " + - "L1.?[67700,135298] 4ns 10mb |------------L1.?------------| " + - "L1.?[135299,200000] 4ns 10mb |-----------L1.?------------| " - "Committing partition 1:" - " Soft Deleting 23 files: L0.11, L0.12, L0.13, L0.14, L0.15, L0.16, L0.17, L0.18, L0.19, L0.20, L0.21, L0.22, L0.23, L0.24, L0.25, L0.26, L0.27, L0.28, L0.29, L0.30, L0.31, L0.32, L0.33" - " Creating 3 files" - "**** Simulation run 11, type=split(HighL0OverlapTotalBacklog)(split_times=[28657, 57214]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.81[100,67700] 4ns |-----------------------------------------L1.81------------------------------------------|" + - "L1.81[100,67699] 4ns |-----------------------------------------L1.81------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - "L1.?[100,28657] 4ns 4mb |----------------L1.?----------------| " - "L1.?[28658,57214] 4ns 4mb |----------------L1.?----------------| " - - "L1.?[57215,67700] 4ns 2mb |---L1.?----| " + - "L1.?[57215,67699] 4ns 2mb |---L1.?----| " - "**** Simulation run 12, type=split(HighL0OverlapTotalBacklog)(split_times=[85771, 114328]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.82[67701,135300] 4ns |-----------------------------------------L1.82------------------------------------------|" + - "L1.82[67700,135298] 4ns |-----------------------------------------L1.82------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[67701,85771] 4ns 3mb|---------L1.?---------| " + - "L1.?[67700,85771] 4ns 3mb|---------L1.?---------| " - "L1.?[85772,114328] 4ns 4mb |----------------L1.?----------------| " - - "L1.?[114329,135300] 4ns 3mb |----------L1.?-----------| " + - "L1.?[114329,135298] 4ns 3mb |----------L1.?-----------| " - "**** Simulation run 13, type=split(HighL0OverlapTotalBacklog)(split_times=[142885, 171442]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.83[135301,200000] 4ns |-----------------------------------------L1.83------------------------------------------|" + - "L1.83[135299,200000] 4ns |-----------------------------------------L1.83------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[135301,142885] 4ns 1mb|--L1.?--| " + - "L1.?[135299,142885] 4ns 1mb|--L1.?--| " - "L1.?[142886,171442] 4ns 4mb |----------------L1.?-----------------| " - "L1.?[171443,200000] 4ns 4mb |----------------L1.?-----------------| " - "Committing partition 1:" - " Soft Deleting 3 files: L1.81, L1.82, L1.83" - " Creating 9 files" - - "**** Simulation run 14, type=split(ReduceOverlap)(split_times=[67700]). 1 Input Files, 1mb total:" + - "**** Simulation run 14, type=split(ReduceOverlap)(split_times=[67699]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.34[57215,85771] 4ns |-----------------------------------------L0.34------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[57215,67700] 4ns 483kb|-------------L0.?--------------| " - - "L0.?[67701,85771] 4ns 833kb |-------------------------L0.?-------------------------| " - - "**** Simulation run 15, type=split(ReduceOverlap)(split_times=[135300]). 1 Input Files, 1mb total:" + - "L0.?[57215,67699] 4ns 483kb|-------------L0.?--------------| " + - "L0.?[67700,85771] 4ns 833kb |-------------------------L0.?-------------------------| " + - "**** Simulation run 15, type=split(ReduceOverlap)(split_times=[135298]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.36[114329,142885] 4ns |-----------------------------------------L0.36------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[114329,135300] 4ns 967kb|------------------------------L0.?------------------------------| " - - "L0.?[135301,142885] 4ns 350kb |--------L0.?---------| " - - "**** Simulation run 16, type=split(ReduceOverlap)(split_times=[67700]). 1 Input Files, 1mb total:" + - "L0.?[114329,135298] 4ns 967kb|------------------------------L0.?------------------------------| " + - "L0.?[135299,142885] 4ns 350kb |--------L0.?---------| " + - "**** Simulation run 16, type=split(ReduceOverlap)(split_times=[67699]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.41[57215,85771] 5ns |-----------------------------------------L0.41------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[57215,67700] 5ns 483kb|-------------L0.?--------------| " - - "L0.?[67701,85771] 5ns 833kb |-------------------------L0.?-------------------------| " - - "**** Simulation run 17, type=split(ReduceOverlap)(split_times=[135300]). 1 Input Files, 1mb total:" + - "L0.?[57215,67699] 5ns 483kb|-------------L0.?--------------| " + - "L0.?[67700,85771] 5ns 833kb |-------------------------L0.?-------------------------| " + - "**** Simulation run 17, type=split(ReduceOverlap)(split_times=[135298]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.43[114329,142885] 5ns |-----------------------------------------L0.43------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[114329,135300] 5ns 967kb|------------------------------L0.?------------------------------| " - - "L0.?[135301,142885] 5ns 350kb |--------L0.?---------| " - - "**** Simulation run 18, type=split(ReduceOverlap)(split_times=[67700]). 1 Input Files, 1mb total:" + - "L0.?[114329,135298] 5ns 967kb|------------------------------L0.?------------------------------| " + - "L0.?[135299,142885] 5ns 350kb |--------L0.?---------| " + - "**** Simulation run 18, type=split(ReduceOverlap)(split_times=[67699]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.48[57215,85771] 6ns |-----------------------------------------L0.48------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[57215,67700] 6ns 483kb|-------------L0.?--------------| " - - "L0.?[67701,85771] 6ns 833kb |-------------------------L0.?-------------------------| " - - "**** Simulation run 19, type=split(ReduceOverlap)(split_times=[135300]). 1 Input Files, 1mb total:" + - "L0.?[57215,67699] 6ns 483kb|-------------L0.?--------------| " + - "L0.?[67700,85771] 6ns 833kb |-------------------------L0.?-------------------------| " + - "**** Simulation run 19, type=split(ReduceOverlap)(split_times=[135298]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.50[114329,142885] 6ns |-----------------------------------------L0.50------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[114329,135300] 6ns 967kb|------------------------------L0.?------------------------------| " - - "L0.?[135301,142885] 6ns 350kb |--------L0.?---------| " - - "**** Simulation run 20, type=split(ReduceOverlap)(split_times=[67700]). 1 Input Files, 1mb total:" + - "L0.?[114329,135298] 6ns 967kb|------------------------------L0.?------------------------------| " + - "L0.?[135299,142885] 6ns 350kb |--------L0.?---------| " + - "**** Simulation run 20, type=split(ReduceOverlap)(split_times=[67699]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.55[57215,85771] 7ns |-----------------------------------------L0.55------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[57215,67700] 7ns 483kb|-------------L0.?--------------| " - - "L0.?[67701,85771] 7ns 833kb |-------------------------L0.?-------------------------| " - - "**** Simulation run 21, type=split(ReduceOverlap)(split_times=[135300]). 1 Input Files, 1mb total:" + - "L0.?[57215,67699] 7ns 483kb|-------------L0.?--------------| " + - "L0.?[67700,85771] 7ns 833kb |-------------------------L0.?-------------------------| " + - "**** Simulation run 21, type=split(ReduceOverlap)(split_times=[135298]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.57[114329,142885] 7ns |-----------------------------------------L0.57------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[114329,135300] 7ns 967kb|------------------------------L0.?------------------------------| " - - "L0.?[135301,142885] 7ns 350kb |--------L0.?---------| " - - "**** Simulation run 22, type=split(ReduceOverlap)(split_times=[67700]). 1 Input Files, 1mb total:" + - "L0.?[114329,135298] 7ns 967kb|------------------------------L0.?------------------------------| " + - "L0.?[135299,142885] 7ns 350kb |--------L0.?---------| " + - "**** Simulation run 22, type=split(ReduceOverlap)(split_times=[67699]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.62[57215,85771] 8ns |-----------------------------------------L0.62------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[57215,67700] 8ns 483kb|-------------L0.?--------------| " - - "L0.?[67701,85771] 8ns 833kb |-------------------------L0.?-------------------------| " - - "**** Simulation run 23, type=split(ReduceOverlap)(split_times=[135300]). 1 Input Files, 1mb total:" + - "L0.?[57215,67699] 8ns 483kb|-------------L0.?--------------| " + - "L0.?[67700,85771] 8ns 833kb |-------------------------L0.?-------------------------| " + - "**** Simulation run 23, type=split(ReduceOverlap)(split_times=[135298]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.64[114329,142885] 8ns |-----------------------------------------L0.64------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[114329,135300] 8ns 967kb|------------------------------L0.?------------------------------| " - - "L0.?[135301,142885] 8ns 350kb |--------L0.?---------| " - - "**** Simulation run 24, type=split(ReduceOverlap)(split_times=[67700]). 1 Input Files, 1mb total:" + - "L0.?[114329,135298] 8ns 967kb|------------------------------L0.?------------------------------| " + - "L0.?[135299,142885] 8ns 350kb |--------L0.?---------| " + - "**** Simulation run 24, type=split(ReduceOverlap)(split_times=[67699]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.69[57215,85771] 9ns |-----------------------------------------L0.69------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[57215,67700] 9ns 483kb|-------------L0.?--------------| " - - "L0.?[67701,85771] 9ns 833kb |-------------------------L0.?-------------------------| " - - "**** Simulation run 25, type=split(ReduceOverlap)(split_times=[135300]). 1 Input Files, 1mb total:" + - "L0.?[57215,67699] 9ns 483kb|-------------L0.?--------------| " + - "L0.?[67700,85771] 9ns 833kb |-------------------------L0.?-------------------------| " + - "**** Simulation run 25, type=split(ReduceOverlap)(split_times=[135298]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.71[114329,142885] 9ns |-----------------------------------------L0.71------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[114329,135300] 9ns 967kb|------------------------------L0.?------------------------------| " - - "L0.?[135301,142885] 9ns 350kb |--------L0.?---------| " - - "**** Simulation run 26, type=split(ReduceOverlap)(split_times=[67700]). 1 Input Files, 1mb total:" + - "L0.?[114329,135298] 9ns 967kb|------------------------------L0.?------------------------------| " + - "L0.?[135299,142885] 9ns 350kb |--------L0.?---------| " + - "**** Simulation run 26, type=split(ReduceOverlap)(split_times=[67699]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.76[57215,85771] 10ns |-----------------------------------------L0.76------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[57215,67700] 10ns 483kb|-------------L0.?--------------| " - - "L0.?[67701,85771] 10ns 833kb |-------------------------L0.?-------------------------| " - - "**** Simulation run 27, type=split(ReduceOverlap)(split_times=[135300]). 1 Input Files, 1mb total:" + - "L0.?[57215,67699] 10ns 483kb|-------------L0.?--------------| " + - "L0.?[67700,85771] 10ns 833kb |-------------------------L0.?-------------------------| " + - "**** Simulation run 27, type=split(ReduceOverlap)(split_times=[135298]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.78[114329,142885] 10ns|-----------------------------------------L0.78------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[114329,135300] 10ns 967kb|------------------------------L0.?------------------------------| " - - "L0.?[135301,142885] 10ns 350kb |--------L0.?---------| " + - "L0.?[114329,135298] 10ns 967kb|------------------------------L0.?------------------------------| " + - "L0.?[135299,142885] 10ns 350kb |--------L0.?---------| " - "Committing partition 1:" - " Soft Deleting 14 files: L0.34, L0.36, L0.41, L0.43, L0.48, L0.50, L0.55, L0.57, L0.62, L0.64, L0.69, L0.71, L0.76, L0.78" - " Creating 28 files" - "**** Simulation run 28, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[109041, 160867]). 14 Input Files, 28mb total:" - "L0 " - - "L0.93[57215,67700] 4ns 483kb|L0.93| " - - "L0.94[67701,85771] 4ns 833kb |--L0.94--| " + - "L0.93[57215,67699] 4ns 483kb|L0.93| " + - "L0.94[67700,85771] 4ns 833kb |--L0.94--| " - "L0.35[85772,114328] 4ns 1mb |-----L0.35-----| " - - "L0.95[114329,135300] 4ns 967kb |---L0.95---| " - - "L0.96[135301,142885] 4ns 350kb |L0.96| " + - "L0.95[114329,135298] 4ns 967kb |---L0.95---| " + - "L0.96[135299,142885] 4ns 350kb |L0.96| " - "L0.37[142886,171442] 4ns 1mb |-----L0.37-----| " - "L0.38[171443,200000] 4ns 1mb |-----L0.38------|" - "L1 " - - "L1.86[57215,67700] 4ns 2mb|L1.86| " - - "L1.87[67701,85771] 4ns 3mb |--L1.87--| " + - "L1.86[57215,67699] 4ns 2mb|L1.86| " + - "L1.87[67700,85771] 4ns 3mb |--L1.87--| " - "L1.88[85772,114328] 4ns 4mb |-----L1.88-----| " - - "L1.89[114329,135300] 4ns 3mb |---L1.89---| " - - "L1.90[135301,142885] 4ns 1mb |L1.90| " + - "L1.89[114329,135298] 4ns 3mb |---L1.89---| " + - "L1.90[135299,142885] 4ns 1mb |L1.90| " - "L1.91[142886,171442] 4ns 4mb |-----L1.91-----| " - "L1.92[171443,200000] 4ns 4mb |-----L1.92------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 28mb total:" @@ -2598,42 +2596,42 @@ async fn all_overlapping_l0_max_input_bytes_per_partition_small_max_desired_file - "L0.42[85772,114328] 5ns |-----------------------------------------L0.42------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85772,85772] 5ns 0b |L0.?| " + - "L0.?[85772,85772] 5ns 47b|L0.?| " - "L0.?[85773,114328] 5ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 31, type=split(HighL0OverlapTotalBacklog)(split_times=[85772]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.49[85772,114328] 6ns |-----------------------------------------L0.49------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85772,85772] 6ns 0b |L0.?| " + - "L0.?[85772,85772] 6ns 47b|L0.?| " - "L0.?[85773,114328] 6ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 32, type=split(HighL0OverlapTotalBacklog)(split_times=[85772]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.56[85772,114328] 7ns |-----------------------------------------L0.56------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85772,85772] 7ns 0b |L0.?| " + - "L0.?[85772,85772] 7ns 47b|L0.?| " - "L0.?[85773,114328] 7ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 33, type=split(HighL0OverlapTotalBacklog)(split_times=[85772]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.63[85772,114328] 8ns |-----------------------------------------L0.63------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85772,85772] 8ns 0b |L0.?| " + - "L0.?[85772,85772] 8ns 47b|L0.?| " - "L0.?[85773,114328] 8ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 34, type=split(HighL0OverlapTotalBacklog)(split_times=[85772]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.70[85772,114328] 9ns |-----------------------------------------L0.70------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85772,85772] 9ns 0b |L0.?| " + - "L0.?[85772,85772] 9ns 47b|L0.?| " - "L0.?[85773,114328] 9ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 35, type=split(HighL0OverlapTotalBacklog)(split_times=[85772]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.77[85772,114328] 10ns |-----------------------------------------L0.77------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85772,85772] 10ns 0b|L0.?| " + - "L0.?[85772,85772] 10ns 47b|L0.?| " - "L0.?[85773,114328] 10ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 36, type=split(HighL0OverlapTotalBacklog)(split_times=[114329, 142886]). 1 Input Files, 10mb total:" - "L1, all files 10mb " @@ -2645,87 +2643,87 @@ async fn all_overlapping_l0_max_input_bytes_per_partition_small_max_desired_file - "L1.?[142887,160867] 4ns 3mb |------------L1.?-------------| " - "**** Simulation run 37, type=split(HighL0OverlapTotalBacklog)(split_times=[114329]). 1 Input Files, 967kb total:" - "L0, all files 967kb " - - "L0.99[114329,135300] 5ns |-----------------------------------------L0.99------------------------------------------|" + - "L0.99[114329,135298] 5ns |-----------------------------------------L0.99------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 967kb total:" - "L0 " - - "L0.?[114329,114329] 5ns 0b|L0.?| " - - "L0.?[114330,135300] 5ns 967kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[114329,114329] 5ns 47b|L0.?| " + - "L0.?[114330,135298] 5ns 967kb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 38, type=split(HighL0OverlapTotalBacklog)(split_times=[114329]). 1 Input Files, 967kb total:" - "L0, all files 967kb " - - "L0.103[114329,135300] 6ns|-----------------------------------------L0.103-----------------------------------------|" + - "L0.103[114329,135298] 6ns|-----------------------------------------L0.103-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 967kb total:" - "L0 " - - "L0.?[114329,114329] 6ns 0b|L0.?| " - - "L0.?[114330,135300] 6ns 967kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[114329,114329] 6ns 47b|L0.?| " + - "L0.?[114330,135298] 6ns 967kb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 39, type=split(HighL0OverlapTotalBacklog)(split_times=[114329]). 1 Input Files, 967kb total:" - "L0, all files 967kb " - - "L0.107[114329,135300] 7ns|-----------------------------------------L0.107-----------------------------------------|" + - "L0.107[114329,135298] 7ns|-----------------------------------------L0.107-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 967kb total:" - "L0 " - - "L0.?[114329,114329] 7ns 0b|L0.?| " - - "L0.?[114330,135300] 7ns 967kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[114329,114329] 7ns 47b|L0.?| " + - "L0.?[114330,135298] 7ns 967kb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 40, type=split(HighL0OverlapTotalBacklog)(split_times=[114329]). 1 Input Files, 967kb total:" - "L0, all files 967kb " - - "L0.111[114329,135300] 8ns|-----------------------------------------L0.111-----------------------------------------|" + - "L0.111[114329,135298] 8ns|-----------------------------------------L0.111-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 967kb total:" - "L0 " - - "L0.?[114329,114329] 8ns 0b|L0.?| " - - "L0.?[114330,135300] 8ns 967kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[114329,114329] 8ns 47b|L0.?| " + - "L0.?[114330,135298] 8ns 967kb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 41, type=split(HighL0OverlapTotalBacklog)(split_times=[114329]). 1 Input Files, 967kb total:" - "L0, all files 967kb " - - "L0.115[114329,135300] 9ns|-----------------------------------------L0.115-----------------------------------------|" + - "L0.115[114329,135298] 9ns|-----------------------------------------L0.115-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 967kb total:" - "L0 " - - "L0.?[114329,114329] 9ns 0b|L0.?| " - - "L0.?[114330,135300] 9ns 967kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[114329,114329] 9ns 47b|L0.?| " + - "L0.?[114330,135298] 9ns 967kb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 42, type=split(HighL0OverlapTotalBacklog)(split_times=[114329]). 1 Input Files, 967kb total:" - "L0, all files 967kb " - - "L0.119[114329,135300] 10ns|-----------------------------------------L0.119-----------------------------------------|" + - "L0.119[114329,135298] 10ns|-----------------------------------------L0.119-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 967kb total:" - "L0 " - - "L0.?[114329,114329] 10ns 0b|L0.?| " - - "L0.?[114330,135300] 10ns 967kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[114329,114329] 10ns 47b|L0.?| " + - "L0.?[114330,135298] 10ns 967kb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 43, type=split(HighL0OverlapTotalBacklog)(split_times=[142886]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.44[142886,171442] 5ns |-----------------------------------------L0.44------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[142886,142886] 5ns 0b|L0.?| " + - "L0.?[142886,142886] 5ns 47b|L0.?| " - "L0.?[142887,171442] 5ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 44, type=split(HighL0OverlapTotalBacklog)(split_times=[142886]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.51[142886,171442] 6ns |-----------------------------------------L0.51------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[142886,142886] 6ns 0b|L0.?| " + - "L0.?[142886,142886] 6ns 47b|L0.?| " - "L0.?[142887,171442] 6ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 45, type=split(HighL0OverlapTotalBacklog)(split_times=[142886]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.58[142886,171442] 7ns |-----------------------------------------L0.58------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[142886,142886] 7ns 0b|L0.?| " + - "L0.?[142886,142886] 7ns 47b|L0.?| " - "L0.?[142887,171442] 7ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 46, type=split(HighL0OverlapTotalBacklog)(split_times=[142886]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.65[142886,171442] 8ns |-----------------------------------------L0.65------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[142886,142886] 8ns 0b|L0.?| " + - "L0.?[142886,142886] 8ns 47b|L0.?| " - "L0.?[142887,171442] 8ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 47, type=split(HighL0OverlapTotalBacklog)(split_times=[142886]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.72[142886,171442] 9ns |-----------------------------------------L0.72------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[142886,142886] 9ns 0b|L0.?| " + - "L0.?[142886,142886] 9ns 47b|L0.?| " - "L0.?[142887,171442] 9ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 48, type=split(HighL0OverlapTotalBacklog)(split_times=[142886]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.79[142886,171442] 10ns|-----------------------------------------L0.79------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[142886,142886] 10ns 0b|L0.?| " + - "L0.?[142886,142886] 10ns 47b|L0.?| " - "L0.?[142887,171442] 10ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 49, type=split(HighL0OverlapTotalBacklog)(split_times=[171443]). 1 Input Files, 8mb total:" - "L1, all files 8mb " @@ -2739,42 +2737,42 @@ async fn all_overlapping_l0_max_input_bytes_per_partition_small_max_desired_file - "L0.45[171443,200000] 5ns |-----------------------------------------L0.45------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171443,171443] 5ns 0b|L0.?| " + - "L0.?[171443,171443] 5ns 47b|L0.?| " - "L0.?[171444,200000] 5ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 51, type=split(HighL0OverlapTotalBacklog)(split_times=[171443]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.52[171443,200000] 6ns |-----------------------------------------L0.52------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171443,171443] 6ns 0b|L0.?| " + - "L0.?[171443,171443] 6ns 47b|L0.?| " - "L0.?[171444,200000] 6ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 52, type=split(HighL0OverlapTotalBacklog)(split_times=[171443]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.59[171443,200000] 7ns |-----------------------------------------L0.59------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171443,171443] 7ns 0b|L0.?| " + - "L0.?[171443,171443] 7ns 47b|L0.?| " - "L0.?[171444,200000] 7ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 53, type=split(HighL0OverlapTotalBacklog)(split_times=[171443]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.66[171443,200000] 8ns |-----------------------------------------L0.66------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171443,171443] 8ns 0b|L0.?| " + - "L0.?[171443,171443] 8ns 47b|L0.?| " - "L0.?[171444,200000] 8ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 54, type=split(HighL0OverlapTotalBacklog)(split_times=[171443]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.73[171443,200000] 9ns |-----------------------------------------L0.73------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171443,171443] 9ns 0b|L0.?| " + - "L0.?[171443,171443] 9ns 47b|L0.?| " - "L0.?[171444,200000] 9ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "**** Simulation run 55, type=split(HighL0OverlapTotalBacklog)(split_times=[171443]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.80[171443,200000] 10ns|-----------------------------------------L0.80------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171443,171443] 10ns 0b|L0.?| " + - "L0.?[171443,171443] 10ns 47b|L0.?| " - "L0.?[171444,200000] 10ns 1mb|-----------------------------------------L0.?------------------------------------------| " - "Committing partition 1:" - " Soft Deleting 27 files: L0.42, L0.44, L0.45, L0.49, L0.51, L0.52, L0.56, L0.58, L0.59, L0.63, L0.65, L0.66, L0.70, L0.72, L0.73, L0.77, L0.79, L0.80, L0.99, L0.103, L0.107, L0.111, L0.115, L0.119, L1.121, L1.122, L1.123" @@ -2866,16 +2864,16 @@ async fn all_overlapping_l0_max_input_bytes_per_partition_small_max_desired_file - "Committing partition 1:" - " Soft Deleting 12 files: L0.127, L0.129, L0.131, L0.133, L0.135, L0.137, L0.154, L0.156, L0.158, L0.160, L0.162, L0.164" - " Creating 24 files" - - "**** Simulation run 68, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[46513, 92926]). 13 Input Files, 25mb total:" + - "**** Simulation run 68, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[46512, 92924]). 13 Input Files, 25mb total:" - "L0 " - "L0.39[100,28657] 5ns 1mb |-------L0.39--------| " - "L0.40[28658,57214] 5ns 1mb |-------L0.40--------| " - - "L0.97[57215,67700] 5ns 483kb |L0.97-| " - - "L0.98[67701,85771] 5ns 833kb |---L0.98----| " - - "L0.126[85772,85772] 5ns 0b |L0.126| " + - "L0.97[57215,67699] 5ns 483kb |L0.97-| " + - "L0.98[67700,85771] 5ns 833kb |---L0.98----| " + - "L0.126[85772,85772] 5ns 47b |L0.126| " - "L0.179[85773,109041] 5ns 1mb |-----L0.179-----| " - "L0.180[109042,114328] 5ns 244kb |L0.180|" - - "L0.141[114329,114329] 5ns 0b |L0.141|" + - "L0.141[114329,114329] 5ns 47b |L0.141|" - "L1 " - "L1.84[100,28657] 4ns 4mb |-------L1.84--------| " - "L1.85[28658,57214] 4ns 4mb |-------L1.85--------| " @@ -2884,19 +2882,19 @@ async fn all_overlapping_l0_max_input_bytes_per_partition_small_max_desired_file - "L1.138[109042,114329] 4ns 1mb |L1.138|" - "**** 3 Output Files (parquet_file_id not yet assigned), 25mb total:" - "L1 " - - "L1.?[100,46513] 5ns 10mb |---------------L1.?---------------| " - - "L1.?[46514,92926] 5ns 10mb |---------------L1.?---------------| " - - "L1.?[92927,114329] 5ns 5mb |-----L1.?-----| " + - "L1.?[100,46512] 5ns 10mb |---------------L1.?---------------| " + - "L1.?[46513,92924] 5ns 10mb |---------------L1.?---------------| " + - "L1.?[92925,114329] 5ns 5mb |-----L1.?-----| " - "Committing partition 1:" - " Soft Deleting 13 files: L0.39, L0.40, L1.84, L1.85, L0.97, L0.98, L1.124, L1.125, L0.126, L1.138, L0.141, L0.179, L0.180" - " Creating 3 files" - "**** Simulation run 69, type=split(HighL0OverlapTotalBacklog)(split_times=[38176]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.203[100,46513] 5ns |----------------------------------------L1.203-----------------------------------------| " + - "L1.203[100,46512] 5ns |-----------------------------------------L1.203-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - "L1.?[100,38176] 5ns 8mb |---------------------------------L1.?----------------------------------| " - - "L1.?[38177,46513] 5ns 2mb |-----L1.?-----| " + - "L1.?[38177,46512] 5ns 2mb |-----L1.?-----| " - "**** Simulation run 70, type=split(HighL0OverlapTotalBacklog)(split_times=[38176]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.47[28658,57214] 6ns |-----------------------------------------L0.47------------------------------------------|" @@ -2934,130 +2932,130 @@ async fn all_overlapping_l0_max_input_bytes_per_partition_small_max_desired_file - "L0.?[38177,57214] 10ns 878kb |--------------------------L0.?---------------------------| " - "**** Simulation run 75, type=split(HighL0OverlapTotalBacklog)(split_times=[76252]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.204[46514,92926] 5ns |-----------------------------------------L1.204-----------------------------------------|" + - "L1.204[46513,92924] 5ns |-----------------------------------------L1.204-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[46514,76252] 5ns 6mb|-------------------------L1.?--------------------------| " - - "L1.?[76253,92926] 5ns 4mb |-------------L1.?-------------| " + - "L1.?[46513,76252] 5ns 6mb|-------------------------L1.?--------------------------| " + - "L1.?[76253,92924] 5ns 4mb |-------------L1.?-------------| " - "**** Simulation run 76, type=split(HighL0OverlapTotalBacklog)(split_times=[76252]). 1 Input Files, 833kb total:" - "L0, all files 833kb " - - "L0.102[67701,85771] 6ns |-----------------------------------------L0.102-----------------------------------------|" + - "L0.102[67700,85771] 6ns |-----------------------------------------L0.102-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 833kb total:" - "L0 " - - "L0.?[67701,76252] 6ns 394kb|------------------L0.?------------------| " + - "L0.?[67700,76252] 6ns 394kb|------------------L0.?------------------| " - "L0.?[76253,85771] 6ns 439kb |--------------------L0.?---------------------| " - "**** Simulation run 77, type=split(HighL0OverlapTotalBacklog)(split_times=[76252]). 1 Input Files, 833kb total:" - "L0, all files 833kb " - - "L0.106[67701,85771] 7ns |-----------------------------------------L0.106-----------------------------------------|" + - "L0.106[67700,85771] 7ns |-----------------------------------------L0.106-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 833kb total:" - "L0 " - - "L0.?[67701,76252] 7ns 394kb|------------------L0.?------------------| " + - "L0.?[67700,76252] 7ns 394kb|------------------L0.?------------------| " - "L0.?[76253,85771] 7ns 439kb |--------------------L0.?---------------------| " - "**** Simulation run 78, type=split(HighL0OverlapTotalBacklog)(split_times=[76252]). 1 Input Files, 833kb total:" - "L0, all files 833kb " - - "L0.110[67701,85771] 8ns |-----------------------------------------L0.110-----------------------------------------|" + - "L0.110[67700,85771] 8ns |-----------------------------------------L0.110-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 833kb total:" - "L0 " - - "L0.?[67701,76252] 8ns 394kb|------------------L0.?------------------| " + - "L0.?[67700,76252] 8ns 394kb|------------------L0.?------------------| " - "L0.?[76253,85771] 8ns 439kb |--------------------L0.?---------------------| " - "**** Simulation run 79, type=split(HighL0OverlapTotalBacklog)(split_times=[76252]). 1 Input Files, 833kb total:" - "L0, all files 833kb " - - "L0.114[67701,85771] 9ns |-----------------------------------------L0.114-----------------------------------------|" + - "L0.114[67700,85771] 9ns |-----------------------------------------L0.114-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 833kb total:" - "L0 " - - "L0.?[67701,76252] 9ns 394kb|------------------L0.?------------------| " + - "L0.?[67700,76252] 9ns 394kb|------------------L0.?------------------| " - "L0.?[76253,85771] 9ns 439kb |--------------------L0.?---------------------| " - "**** Simulation run 80, type=split(HighL0OverlapTotalBacklog)(split_times=[76252]). 1 Input Files, 833kb total:" - "L0, all files 833kb " - - "L0.118[67701,85771] 10ns |-----------------------------------------L0.118-----------------------------------------|" + - "L0.118[67700,85771] 10ns |-----------------------------------------L0.118-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 833kb total:" - "L0 " - - "L0.?[67701,76252] 10ns 394kb|------------------L0.?------------------| " + - "L0.?[67700,76252] 10ns 394kb|------------------L0.?------------------| " - "L0.?[76253,85771] 10ns 439kb |--------------------L0.?---------------------| " - "Committing partition 1:" - " Soft Deleting 12 files: L0.47, L0.54, L0.61, L0.68, L0.75, L0.102, L0.106, L0.110, L0.114, L0.118, L1.203, L1.204" - " Creating 24 files" - - "**** Simulation run 81, type=split(ReduceOverlap)(split_times=[46513]). 1 Input Files, 878kb total:" + - "**** Simulation run 81, type=split(ReduceOverlap)(split_times=[46512]). 1 Input Files, 878kb total:" - "L0, all files 878kb " - "L0.209[38177,57214] 6ns |-----------------------------------------L0.209-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 878kb total:" - "L0 " - - "L0.?[38177,46513] 6ns 384kb|----------------L0.?-----------------| " - - "L0.?[46514,57214] 6ns 493kb |----------------------L0.?----------------------| " - - "**** Simulation run 82, type=split(ReduceOverlap)(split_times=[92926]). 1 Input Files, 1mb total:" + - "L0.?[38177,46512] 6ns 384kb|----------------L0.?-----------------| " + - "L0.?[46513,57214] 6ns 493kb |----------------------L0.?----------------------| " + - "**** Simulation run 82, type=split(ReduceOverlap)(split_times=[92924]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.183[85773,109041] 6ns |----------------------------------------L0.183-----------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85773,92926] 6ns 330kb|----------L0.?-----------| " - - "L0.?[92927,109041] 6ns 743kb |----------------------------L0.?----------------------------| " - - "**** Simulation run 83, type=split(ReduceOverlap)(split_times=[46513]). 1 Input Files, 878kb total:" + - "L0.?[85773,92924] 6ns 330kb|----------L0.?-----------| " + - "L0.?[92925,109041] 6ns 743kb |----------------------------L0.?----------------------------| " + - "**** Simulation run 83, type=split(ReduceOverlap)(split_times=[46512]). 1 Input Files, 878kb total:" - "L0, all files 878kb " - "L0.211[38177,57214] 7ns |-----------------------------------------L0.211-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 878kb total:" - "L0 " - - "L0.?[38177,46513] 7ns 384kb|----------------L0.?-----------------| " - - "L0.?[46514,57214] 7ns 493kb |----------------------L0.?----------------------| " - - "**** Simulation run 84, type=split(ReduceOverlap)(split_times=[92926]). 1 Input Files, 1mb total:" + - "L0.?[38177,46512] 7ns 384kb|----------------L0.?-----------------| " + - "L0.?[46513,57214] 7ns 493kb |----------------------L0.?----------------------| " + - "**** Simulation run 84, type=split(ReduceOverlap)(split_times=[92924]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.187[85773,109041] 7ns |----------------------------------------L0.187-----------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85773,92926] 7ns 330kb|----------L0.?-----------| " - - "L0.?[92927,109041] 7ns 743kb |----------------------------L0.?----------------------------| " - - "**** Simulation run 85, type=split(ReduceOverlap)(split_times=[46513]). 1 Input Files, 878kb total:" + - "L0.?[85773,92924] 7ns 330kb|----------L0.?-----------| " + - "L0.?[92925,109041] 7ns 743kb |----------------------------L0.?----------------------------| " + - "**** Simulation run 85, type=split(ReduceOverlap)(split_times=[46512]). 1 Input Files, 878kb total:" - "L0, all files 878kb " - "L0.213[38177,57214] 8ns |-----------------------------------------L0.213-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 878kb total:" - "L0 " - - "L0.?[38177,46513] 8ns 384kb|----------------L0.?-----------------| " - - "L0.?[46514,57214] 8ns 493kb |----------------------L0.?----------------------| " - - "**** Simulation run 86, type=split(ReduceOverlap)(split_times=[92926]). 1 Input Files, 1mb total:" + - "L0.?[38177,46512] 8ns 384kb|----------------L0.?-----------------| " + - "L0.?[46513,57214] 8ns 493kb |----------------------L0.?----------------------| " + - "**** Simulation run 86, type=split(ReduceOverlap)(split_times=[92924]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.191[85773,109041] 8ns |----------------------------------------L0.191-----------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85773,92926] 8ns 330kb|----------L0.?-----------| " - - "L0.?[92927,109041] 8ns 743kb |----------------------------L0.?----------------------------| " - - "**** Simulation run 87, type=split(ReduceOverlap)(split_times=[46513]). 1 Input Files, 878kb total:" + - "L0.?[85773,92924] 8ns 330kb|----------L0.?-----------| " + - "L0.?[92925,109041] 8ns 743kb |----------------------------L0.?----------------------------| " + - "**** Simulation run 87, type=split(ReduceOverlap)(split_times=[46512]). 1 Input Files, 878kb total:" - "L0, all files 878kb " - "L0.215[38177,57214] 9ns |-----------------------------------------L0.215-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 878kb total:" - "L0 " - - "L0.?[38177,46513] 9ns 384kb|----------------L0.?-----------------| " - - "L0.?[46514,57214] 9ns 493kb |----------------------L0.?----------------------| " - - "**** Simulation run 88, type=split(ReduceOverlap)(split_times=[92926]). 1 Input Files, 1mb total:" + - "L0.?[38177,46512] 9ns 384kb|----------------L0.?-----------------| " + - "L0.?[46513,57214] 9ns 493kb |----------------------L0.?----------------------| " + - "**** Simulation run 88, type=split(ReduceOverlap)(split_times=[92924]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.195[85773,109041] 9ns |----------------------------------------L0.195-----------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85773,92926] 9ns 330kb|----------L0.?-----------| " - - "L0.?[92927,109041] 9ns 743kb |----------------------------L0.?----------------------------| " - - "**** Simulation run 89, type=split(ReduceOverlap)(split_times=[46513]). 1 Input Files, 878kb total:" + - "L0.?[85773,92924] 9ns 330kb|----------L0.?-----------| " + - "L0.?[92925,109041] 9ns 743kb |----------------------------L0.?----------------------------| " + - "**** Simulation run 89, type=split(ReduceOverlap)(split_times=[46512]). 1 Input Files, 878kb total:" - "L0, all files 878kb " - "L0.217[38177,57214] 10ns |-----------------------------------------L0.217-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 878kb total:" - "L0 " - - "L0.?[38177,46513] 10ns 384kb|----------------L0.?-----------------| " - - "L0.?[46514,57214] 10ns 493kb |----------------------L0.?----------------------| " - - "**** Simulation run 90, type=split(ReduceOverlap)(split_times=[92926]). 1 Input Files, 1mb total:" + - "L0.?[38177,46512] 10ns 384kb|----------------L0.?-----------------| " + - "L0.?[46513,57214] 10ns 493kb |----------------------L0.?----------------------| " + - "**** Simulation run 90, type=split(ReduceOverlap)(split_times=[92924]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.199[85773,109041] 10ns|----------------------------------------L0.199-----------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[85773,92926] 10ns 330kb|----------L0.?-----------| " - - "L0.?[92927,109041] 10ns 743kb |----------------------------L0.?----------------------------| " + - "L0.?[85773,92924] 10ns 330kb|----------L0.?-----------| " + - "L0.?[92925,109041] 10ns 743kb |----------------------------L0.?----------------------------| " - "Committing partition 1:" - " Soft Deleting 10 files: L0.183, L0.187, L0.191, L0.195, L0.199, L0.209, L0.211, L0.213, L0.215, L0.217" - " Creating 20 files" - - "**** Simulation run 91, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[156350, 198370]). 11 Input Files, 20mb total:" + - "**** Simulation run 91, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[156351, 198372]). 11 Input Files, 20mb total:" - "L0 " - - "L0.142[114330,135300] 5ns 967kb|-------L0.142-------| " - - "L0.100[135301,142885] 5ns 350kb |L0.100| " - - "L0.153[142886,142886] 5ns 0b |L0.153| " + - "L0.142[114330,135298] 5ns 967kb|-------L0.142-------| " + - "L0.100[135299,142885] 5ns 350kb |L0.100| " + - "L0.153[142886,142886] 5ns 47b |L0.153| " - "L0.181[142887,160867] 5ns 829kb |-----L0.181-----| " - "L0.182[160868,171442] 5ns 488kb |-L0.182--| " - - "L0.167[171443,171443] 5ns 0b |L0.167| " + - "L0.167[171443,171443] 5ns 47b |L0.167| " - "L0.168[171444,200000] 5ns 1mb |----------L0.168-----------| " - "L1 " - "L1.139[114330,142886] 4ns 6mb|----------L1.139-----------| " @@ -3066,579 +3064,579 @@ async fn all_overlapping_l0_max_input_bytes_per_partition_small_max_desired_file - "L1.166[171444,200000] 4ns 6mb |----------L1.166-----------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 20mb total:" - "L1 " - - "L1.?[114330,156350] 5ns 10mb|-------------------L1.?-------------------| " - - "L1.?[156351,198370] 5ns 10mb |-------------------L1.?-------------------| " - - "L1.?[198371,200000] 5ns 397kb |L1.?|" + - "L1.?[114330,156351] 5ns 10mb|-------------------L1.?-------------------| " + - "L1.?[156352,198372] 5ns 10mb |-------------------L1.?-------------------| " + - "L1.?[198373,200000] 5ns 397kb |L1.?|" - "Committing partition 1:" - " Soft Deleting 11 files: L0.100, L1.139, L1.140, L0.142, L0.153, L1.165, L1.166, L0.167, L0.168, L0.181, L0.182" - " Creating 3 files" - "**** Simulation run 92, type=split(HighL0OverlapTotalBacklog)(split_times=[142886]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.250[114330,156350] 5ns|-----------------------------------------L1.250-----------------------------------------|" + - "L1.250[114330,156351] 5ns|-----------------------------------------L1.250-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - "L1.?[114330,142886] 5ns 7mb|---------------------------L1.?----------------------------| " - - "L1.?[142887,156350] 5ns 3mb |-----------L1.?-----------| " + - "L1.?[142887,156351] 5ns 3mb |-----------L1.?-----------| " - "**** Simulation run 93, type=split(HighL0OverlapTotalBacklog)(split_times=[171442]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.251[156351,198370] 5ns|-----------------------------------------L1.251-----------------------------------------|" + - "L1.251[156352,198372] 5ns|-----------------------------------------L1.251-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[156351,171442] 5ns 4mb|-------------L1.?-------------| " - - "L1.?[171443,198370] 5ns 6mb |-------------------------L1.?--------------------------| " + - "L1.?[156352,171442] 5ns 4mb|-------------L1.?-------------| " + - "L1.?[171443,198372] 5ns 6mb |-------------------------L1.?--------------------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L1.250, L1.251" - " Creating 4 files" - - "**** Simulation run 94, type=split(ReduceOverlap)(split_times=[156350]). 1 Input Files, 829kb total:" + - "**** Simulation run 94, type=split(ReduceOverlap)(split_times=[156351]). 1 Input Files, 829kb total:" - "L0, all files 829kb " - "L0.185[142887,160867] 6ns|-----------------------------------------L0.185-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 829kb total:" - "L0 " - - "L0.?[142887,156350] 6ns 621kb|------------------------------L0.?-------------------------------| " - - "L0.?[156351,160867] 6ns 208kb |--------L0.?--------| " - - "**** Simulation run 95, type=split(ReduceOverlap)(split_times=[198370]). 1 Input Files, 1mb total:" + - "L0.?[142887,156351] 6ns 621kb|------------------------------L0.?-------------------------------| " + - "L0.?[156352,160867] 6ns 208kb |--------L0.?--------| " + - "**** Simulation run 95, type=split(ReduceOverlap)(split_times=[198372]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.170[171444,200000] 6ns|-----------------------------------------L0.170-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,198370] 6ns 1mb|---------------------------------------L0.?---------------------------------------| " - - "L0.?[198371,200000] 6ns 75kb |L0.?|" - - "**** Simulation run 96, type=split(ReduceOverlap)(split_times=[156350]). 1 Input Files, 829kb total:" + - "L0.?[171444,198372] 6ns 1mb|---------------------------------------L0.?---------------------------------------| " + - "L0.?[198373,200000] 6ns 75kb |L0.?|" + - "**** Simulation run 96, type=split(ReduceOverlap)(split_times=[156351]). 1 Input Files, 829kb total:" - "L0, all files 829kb " - "L0.189[142887,160867] 7ns|-----------------------------------------L0.189-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 829kb total:" - "L0 " - - "L0.?[142887,156350] 7ns 621kb|------------------------------L0.?-------------------------------| " - - "L0.?[156351,160867] 7ns 208kb |--------L0.?--------| " - - "**** Simulation run 97, type=split(ReduceOverlap)(split_times=[198370]). 1 Input Files, 1mb total:" + - "L0.?[142887,156351] 7ns 621kb|------------------------------L0.?-------------------------------| " + - "L0.?[156352,160867] 7ns 208kb |--------L0.?--------| " + - "**** Simulation run 97, type=split(ReduceOverlap)(split_times=[198372]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.172[171444,200000] 7ns|-----------------------------------------L0.172-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,198370] 7ns 1mb|---------------------------------------L0.?---------------------------------------| " - - "L0.?[198371,200000] 7ns 75kb |L0.?|" - - "**** Simulation run 98, type=split(ReduceOverlap)(split_times=[156350]). 1 Input Files, 829kb total:" + - "L0.?[171444,198372] 7ns 1mb|---------------------------------------L0.?---------------------------------------| " + - "L0.?[198373,200000] 7ns 75kb |L0.?|" + - "**** Simulation run 98, type=split(ReduceOverlap)(split_times=[156351]). 1 Input Files, 829kb total:" - "L0, all files 829kb " - "L0.193[142887,160867] 8ns|-----------------------------------------L0.193-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 829kb total:" - "L0 " - - "L0.?[142887,156350] 8ns 621kb|------------------------------L0.?-------------------------------| " - - "L0.?[156351,160867] 8ns 208kb |--------L0.?--------| " - - "**** Simulation run 99, type=split(ReduceOverlap)(split_times=[198370]). 1 Input Files, 1mb total:" + - "L0.?[142887,156351] 8ns 621kb|------------------------------L0.?-------------------------------| " + - "L0.?[156352,160867] 8ns 208kb |--------L0.?--------| " + - "**** Simulation run 99, type=split(ReduceOverlap)(split_times=[198372]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.174[171444,200000] 8ns|-----------------------------------------L0.174-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,198370] 8ns 1mb|---------------------------------------L0.?---------------------------------------| " - - "L0.?[198371,200000] 8ns 75kb |L0.?|" - - "**** Simulation run 100, type=split(ReduceOverlap)(split_times=[156350]). 1 Input Files, 829kb total:" + - "L0.?[171444,198372] 8ns 1mb|---------------------------------------L0.?---------------------------------------| " + - "L0.?[198373,200000] 8ns 75kb |L0.?|" + - "**** Simulation run 100, type=split(ReduceOverlap)(split_times=[156351]). 1 Input Files, 829kb total:" - "L0, all files 829kb " - "L0.197[142887,160867] 9ns|-----------------------------------------L0.197-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 829kb total:" - "L0 " - - "L0.?[142887,156350] 9ns 621kb|------------------------------L0.?-------------------------------| " - - "L0.?[156351,160867] 9ns 208kb |--------L0.?--------| " - - "**** Simulation run 101, type=split(ReduceOverlap)(split_times=[198370]). 1 Input Files, 1mb total:" + - "L0.?[142887,156351] 9ns 621kb|------------------------------L0.?-------------------------------| " + - "L0.?[156352,160867] 9ns 208kb |--------L0.?--------| " + - "**** Simulation run 101, type=split(ReduceOverlap)(split_times=[198372]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.176[171444,200000] 9ns|-----------------------------------------L0.176-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,198370] 9ns 1mb|---------------------------------------L0.?---------------------------------------| " - - "L0.?[198371,200000] 9ns 75kb |L0.?|" - - "**** Simulation run 102, type=split(ReduceOverlap)(split_times=[156350]). 1 Input Files, 829kb total:" + - "L0.?[171444,198372] 9ns 1mb|---------------------------------------L0.?---------------------------------------| " + - "L0.?[198373,200000] 9ns 75kb |L0.?|" + - "**** Simulation run 102, type=split(ReduceOverlap)(split_times=[156351]). 1 Input Files, 829kb total:" - "L0, all files 829kb " - "L0.201[142887,160867] 10ns|-----------------------------------------L0.201-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 829kb total:" - "L0 " - - "L0.?[142887,156350] 10ns 621kb|------------------------------L0.?-------------------------------| " - - "L0.?[156351,160867] 10ns 208kb |--------L0.?--------| " - - "**** Simulation run 103, type=split(ReduceOverlap)(split_times=[198370]). 1 Input Files, 1mb total:" + - "L0.?[142887,156351] 10ns 621kb|------------------------------L0.?-------------------------------| " + - "L0.?[156352,160867] 10ns 208kb |--------L0.?--------| " + - "**** Simulation run 103, type=split(ReduceOverlap)(split_times=[198372]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.178[171444,200000] 10ns|-----------------------------------------L0.178-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,198370] 10ns 1mb|---------------------------------------L0.?---------------------------------------| " - - "L0.?[198371,200000] 10ns 75kb |L0.?|" + - "L0.?[171444,198372] 10ns 1mb|---------------------------------------L0.?---------------------------------------| " + - "L0.?[198373,200000] 10ns 75kb |L0.?|" - "Committing partition 1:" - " Soft Deleting 10 files: L0.170, L0.172, L0.174, L0.176, L0.178, L0.185, L0.189, L0.193, L0.197, L0.201" - " Creating 20 files" - - "**** Simulation run 104, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[38491, 76882]). 17 Input Files, 30mb total:" + - "**** Simulation run 104, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[38490, 76880]). 17 Input Files, 30mb total:" - "L0 " - "L0.46[100,28657] 6ns 1mb |-------L0.46--------| " - "L0.208[28658,38176] 6ns 439kb |L0.208| " - - "L0.230[38177,46513] 6ns 384kb |L0.230| " - - "L0.231[46514,57214] 6ns 493kb |L0.231| " - - "L0.101[57215,67700] 6ns 483kb |L0.101| " - - "L0.220[67701,76252] 6ns 394kb |L0.220| " + - "L0.230[38177,46512] 6ns 384kb |L0.230| " + - "L0.231[46513,57214] 6ns 493kb |L0.231| " + - "L0.101[57215,67699] 6ns 483kb |L0.101| " + - "L0.220[67700,76252] 6ns 394kb |L0.220| " - "L0.221[76253,85771] 6ns 439kb |L0.221| " - - "L0.128[85772,85772] 6ns 0b |L0.128| " - - "L0.232[85773,92926] 6ns 330kb |L0.232| " - - "L0.233[92927,109041] 6ns 743kb |--L0.233--| " + - "L0.128[85772,85772] 6ns 47b |L0.128| " + - "L0.232[85773,92924] 6ns 330kb |L0.232| " + - "L0.233[92925,109041] 6ns 743kb |--L0.233--| " - "L0.184[109042,114328] 6ns 244kb |L0.184|" - - "L0.143[114329,114329] 6ns 0b |L0.143|" + - "L0.143[114329,114329] 6ns 47b |L0.143|" - "L1 " - "L1.206[100,38176] 5ns 8mb|----------L1.206-----------| " - - "L1.207[38177,46513] 5ns 2mb |L1.207| " - - "L1.218[46514,76252] 5ns 6mb |-------L1.218--------| " - - "L1.219[76253,92926] 5ns 4mb |--L1.219---| " - - "L1.205[92927,114329] 5ns 5mb |----L1.205----| " + - "L1.207[38177,46512] 5ns 2mb |L1.207| " + - "L1.218[46513,76252] 5ns 6mb |-------L1.218--------| " + - "L1.219[76253,92924] 5ns 4mb |--L1.219---| " + - "L1.205[92925,114329] 5ns 5mb |----L1.205----| " - "**** 3 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L1 " - - "L1.?[100,38491] 6ns 10mb |------------L1.?------------| " - - "L1.?[38492,76882] 6ns 10mb |------------L1.?------------| " - - "L1.?[76883,114329] 6ns 10mb |-----------L1.?------------| " + - "L1.?[100,38490] 6ns 10mb |------------L1.?------------| " + - "L1.?[38491,76880] 6ns 10mb |------------L1.?------------| " + - "L1.?[76881,114329] 6ns 10mb |-----------L1.?------------| " - "Committing partition 1:" - " Soft Deleting 17 files: L0.46, L0.101, L0.128, L0.143, L0.184, L1.205, L1.206, L1.207, L0.208, L1.218, L1.219, L0.220, L0.221, L0.230, L0.231, L0.232, L0.233" - " Creating 3 files" - "**** Simulation run 105, type=split(HighL0OverlapTotalBacklog)(split_times=[38176]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.277[100,38491] 6ns |-----------------------------------------L1.277-----------------------------------------|" + - "L1.277[100,38490] 6ns |-----------------------------------------L1.277-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - "L1.?[100,38176] 6ns 10mb |-----------------------------------------L1.?------------------------------------------| " - - "L1.?[38177,38491] 6ns 84kb |L1.?|" + - "L1.?[38177,38490] 6ns 84kb |L1.?|" - "**** Simulation run 106, type=split(HighL0OverlapTotalBacklog)(split_times=[76252]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.278[38492,76882] 6ns |-----------------------------------------L1.278-----------------------------------------|" + - "L1.278[38491,76880] 6ns |-----------------------------------------L1.278-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[38492,76252] 6ns 10mb|-----------------------------------------L1.?-----------------------------------------| " - - "L1.?[76253,76882] 6ns 168kb |L1.?|" + - "L1.?[38491,76252] 6ns 10mb|-----------------------------------------L1.?-----------------------------------------| " + - "L1.?[76253,76880] 6ns 168kb |L1.?|" - "Committing partition 1:" - " Soft Deleting 2 files: L1.277, L1.278" - " Creating 4 files" - - "**** Simulation run 107, type=split(ReduceOverlap)(split_times=[38491]). 1 Input Files, 384kb total:" + - "**** Simulation run 107, type=split(ReduceOverlap)(split_times=[38490]). 1 Input Files, 384kb total:" - "L0, all files 384kb " - - "L0.234[38177,46513] 7ns |-----------------------------------------L0.234-----------------------------------------|" + - "L0.234[38177,46512] 7ns |-----------------------------------------L0.234-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 384kb total:" - "L0 " - - "L0.?[38177,38491] 7ns 14kb|L0.?| " - - "L0.?[38492,46513] 7ns 370kb |----------------------------------------L0.?----------------------------------------| " - - "**** Simulation run 108, type=split(ReduceOverlap)(split_times=[76882]). 1 Input Files, 439kb total:" + - "L0.?[38177,38490] 7ns 14kb|L0.?| " + - "L0.?[38491,46512] 7ns 370kb |----------------------------------------L0.?----------------------------------------| " + - "**** Simulation run 108, type=split(ReduceOverlap)(split_times=[76880]). 1 Input Files, 439kb total:" - "L0, all files 439kb " - "L0.223[76253,85771] 7ns |-----------------------------------------L0.223-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 439kb total:" - "L0 " - - "L0.?[76253,76882] 7ns 29kb|L0.?| " - - "L0.?[76883,85771] 7ns 410kb |---------------------------------------L0.?---------------------------------------| " - - "**** Simulation run 109, type=split(ReduceOverlap)(split_times=[38491]). 1 Input Files, 384kb total:" + - "L0.?[76253,76880] 7ns 29kb|L0.?| " + - "L0.?[76881,85771] 7ns 410kb |---------------------------------------L0.?---------------------------------------| " + - "**** Simulation run 109, type=split(ReduceOverlap)(split_times=[38490]). 1 Input Files, 384kb total:" - "L0, all files 384kb " - - "L0.238[38177,46513] 8ns |-----------------------------------------L0.238-----------------------------------------|" + - "L0.238[38177,46512] 8ns |-----------------------------------------L0.238-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 384kb total:" - "L0 " - - "L0.?[38177,38491] 8ns 14kb|L0.?| " - - "L0.?[38492,46513] 8ns 370kb |----------------------------------------L0.?----------------------------------------| " - - "**** Simulation run 110, type=split(ReduceOverlap)(split_times=[76882]). 1 Input Files, 439kb total:" + - "L0.?[38177,38490] 8ns 14kb|L0.?| " + - "L0.?[38491,46512] 8ns 370kb |----------------------------------------L0.?----------------------------------------| " + - "**** Simulation run 110, type=split(ReduceOverlap)(split_times=[76880]). 1 Input Files, 439kb total:" - "L0, all files 439kb " - "L0.225[76253,85771] 8ns |-----------------------------------------L0.225-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 439kb total:" - "L0 " - - "L0.?[76253,76882] 8ns 29kb|L0.?| " - - "L0.?[76883,85771] 8ns 410kb |---------------------------------------L0.?---------------------------------------| " - - "**** Simulation run 111, type=split(ReduceOverlap)(split_times=[38491]). 1 Input Files, 384kb total:" + - "L0.?[76253,76880] 8ns 29kb|L0.?| " + - "L0.?[76881,85771] 8ns 410kb |---------------------------------------L0.?---------------------------------------| " + - "**** Simulation run 111, type=split(ReduceOverlap)(split_times=[38490]). 1 Input Files, 384kb total:" - "L0, all files 384kb " - - "L0.242[38177,46513] 9ns |-----------------------------------------L0.242-----------------------------------------|" + - "L0.242[38177,46512] 9ns |-----------------------------------------L0.242-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 384kb total:" - "L0 " - - "L0.?[38177,38491] 9ns 14kb|L0.?| " - - "L0.?[38492,46513] 9ns 370kb |----------------------------------------L0.?----------------------------------------| " - - "**** Simulation run 112, type=split(ReduceOverlap)(split_times=[76882]). 1 Input Files, 439kb total:" + - "L0.?[38177,38490] 9ns 14kb|L0.?| " + - "L0.?[38491,46512] 9ns 370kb |----------------------------------------L0.?----------------------------------------| " + - "**** Simulation run 112, type=split(ReduceOverlap)(split_times=[76880]). 1 Input Files, 439kb total:" - "L0, all files 439kb " - "L0.227[76253,85771] 9ns |-----------------------------------------L0.227-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 439kb total:" - "L0 " - - "L0.?[76253,76882] 9ns 29kb|L0.?| " - - "L0.?[76883,85771] 9ns 410kb |---------------------------------------L0.?---------------------------------------| " - - "**** Simulation run 113, type=split(ReduceOverlap)(split_times=[38491]). 1 Input Files, 384kb total:" + - "L0.?[76253,76880] 9ns 29kb|L0.?| " + - "L0.?[76881,85771] 9ns 410kb |---------------------------------------L0.?---------------------------------------| " + - "**** Simulation run 113, type=split(ReduceOverlap)(split_times=[38490]). 1 Input Files, 384kb total:" - "L0, all files 384kb " - - "L0.246[38177,46513] 10ns |-----------------------------------------L0.246-----------------------------------------|" + - "L0.246[38177,46512] 10ns |-----------------------------------------L0.246-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 384kb total:" - "L0 " - - "L0.?[38177,38491] 10ns 14kb|L0.?| " - - "L0.?[38492,46513] 10ns 370kb |----------------------------------------L0.?----------------------------------------| " - - "**** Simulation run 114, type=split(ReduceOverlap)(split_times=[76882]). 1 Input Files, 439kb total:" + - "L0.?[38177,38490] 10ns 14kb|L0.?| " + - "L0.?[38491,46512] 10ns 370kb |----------------------------------------L0.?----------------------------------------| " + - "**** Simulation run 114, type=split(ReduceOverlap)(split_times=[76880]). 1 Input Files, 439kb total:" - "L0, all files 439kb " - "L0.229[76253,85771] 10ns |-----------------------------------------L0.229-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 439kb total:" - "L0 " - - "L0.?[76253,76882] 10ns 29kb|L0.?| " - - "L0.?[76883,85771] 10ns 410kb |---------------------------------------L0.?---------------------------------------| " + - "L0.?[76253,76880] 10ns 29kb|L0.?| " + - "L0.?[76881,85771] 10ns 410kb |---------------------------------------L0.?---------------------------------------| " - "Committing partition 1:" - " Soft Deleting 8 files: L0.223, L0.225, L0.227, L0.229, L0.234, L0.238, L0.242, L0.246" - " Creating 16 files" - - "**** Simulation run 115, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[149665, 185000]). 14 Input Files, 24mb total:" + - "**** Simulation run 115, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[149666, 185002]). 14 Input Files, 24mb total:" - "L0 " - - "L0.144[114330,135300] 6ns 967kb|-------L0.144-------| " - - "L0.104[135301,142885] 6ns 350kb |L0.104| " - - "L0.155[142886,142886] 6ns 0b |L0.155| " - - "L0.257[142887,156350] 6ns 621kb |---L0.257---| " - - "L0.258[156351,160867] 6ns 208kb |L0.258| " + - "L0.144[114330,135298] 6ns 967kb|-------L0.144-------| " + - "L0.104[135299,142885] 6ns 350kb |L0.104| " + - "L0.155[142886,142886] 6ns 47b |L0.155| " + - "L0.257[142887,156351] 6ns 621kb |---L0.257---| " + - "L0.258[156352,160867] 6ns 208kb |L0.258| " - "L0.186[160868,171442] 6ns 488kb |-L0.186--| " - - "L0.169[171443,171443] 6ns 0b |L0.169| " - - "L0.259[171444,198370] 6ns 1mb |----------L0.259----------| " - - "L0.260[198371,200000] 6ns 75kb |L0.260|" + - "L0.169[171443,171443] 6ns 47b |L0.169| " + - "L0.259[171444,198372] 6ns 1mb |----------L0.259----------| " + - "L0.260[198373,200000] 6ns 75kb |L0.260|" - "L1 " - "L1.253[114330,142886] 5ns 7mb|----------L1.253-----------| " - - "L1.254[142887,156350] 5ns 3mb |---L1.254---| " - - "L1.255[156351,171442] 5ns 4mb |---L1.255----| " - - "L1.256[171443,198370] 5ns 6mb |----------L1.256----------| " - - "L1.252[198371,200000] 5ns 397kb |L1.252|" + - "L1.254[142887,156351] 5ns 3mb |---L1.254---| " + - "L1.255[156352,171442] 5ns 4mb |---L1.255----| " + - "L1.256[171443,198372] 5ns 6mb |----------L1.256----------| " + - "L1.252[198373,200000] 5ns 397kb |L1.252|" - "**** 3 Output Files (parquet_file_id not yet assigned), 24mb total:" - "L1 " - - "L1.?[114330,149665] 6ns 10mb|---------------L1.?----------------| " - - "L1.?[149666,185000] 6ns 10mb |---------------L1.?----------------| " - - "L1.?[185001,200000] 6ns 4mb |----L1.?-----| " + - "L1.?[114330,149666] 6ns 10mb|---------------L1.?----------------| " + - "L1.?[149667,185002] 6ns 10mb |---------------L1.?----------------| " + - "L1.?[185003,200000] 6ns 4mb |----L1.?-----| " - "Committing partition 1:" - " Soft Deleting 14 files: L0.104, L0.144, L0.155, L0.169, L0.186, L1.252, L1.253, L1.254, L1.255, L1.256, L0.257, L0.258, L0.259, L0.260" - " Creating 3 files" - "**** Simulation run 116, type=split(HighL0OverlapTotalBacklog)(split_times=[142886]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.300[114330,149665] 6ns|-----------------------------------------L1.300-----------------------------------------|" + - "L1.300[114330,149666] 6ns|-----------------------------------------L1.300-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - "L1.?[114330,142886] 6ns 8mb|---------------------------------L1.?---------------------------------| " - - "L1.?[142887,149665] 6ns 2mb |-----L1.?------| " + - "L1.?[142887,149666] 6ns 2mb |-----L1.?------| " - "**** Simulation run 117, type=split(HighL0OverlapTotalBacklog)(split_times=[171442]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.301[149666,185000] 6ns|-----------------------------------------L1.301-----------------------------------------|" + - "L1.301[149667,185002] 6ns|-----------------------------------------L1.301-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[149666,171442] 6ns 6mb|------------------------L1.?-------------------------| " - - "L1.?[171443,185000] 6ns 4mb |--------------L1.?--------------| " + - "L1.?[149667,171442] 6ns 6mb|------------------------L1.?-------------------------| " + - "L1.?[171443,185002] 6ns 4mb |--------------L1.?--------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L1.300, L1.301" - " Creating 4 files" - - "**** Simulation run 118, type=split(ReduceOverlap)(split_times=[149665]). 1 Input Files, 621kb total:" + - "**** Simulation run 118, type=split(ReduceOverlap)(split_times=[149666]). 1 Input Files, 621kb total:" - "L0, all files 621kb " - - "L0.261[142887,156350] 7ns|----------------------------------------L0.261-----------------------------------------| " + - "L0.261[142887,156351] 7ns|-----------------------------------------L0.261-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 621kb total:" - "L0 " - - "L0.?[142887,149665] 7ns 312kb|-------------------L0.?--------------------| " - - "L0.?[149666,156350] 7ns 308kb |-------------------L0.?-------------------| " - - "**** Simulation run 119, type=split(ReduceOverlap)(split_times=[185000]). 1 Input Files, 1mb total:" + - "L0.?[142887,149666] 7ns 313kb|-------------------L0.?--------------------| " + - "L0.?[149667,156351] 7ns 308kb |-------------------L0.?-------------------| " + - "**** Simulation run 119, type=split(ReduceOverlap)(split_times=[185002]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - - "L0.263[171444,198370] 7ns|----------------------------------------L0.263-----------------------------------------| " + - "L0.263[171444,198372] 7ns|-----------------------------------------L0.263-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,185000] 7ns 625kb|-------------------L0.?--------------------| " - - "L0.?[185001,198370] 7ns 617kb |-------------------L0.?-------------------| " - - "**** Simulation run 120, type=split(ReduceOverlap)(split_times=[149665]). 1 Input Files, 621kb total:" + - "L0.?[171444,185002] 7ns 625kb|-------------------L0.?--------------------| " + - "L0.?[185003,198372] 7ns 616kb |-------------------L0.?-------------------| " + - "**** Simulation run 120, type=split(ReduceOverlap)(split_times=[149666]). 1 Input Files, 621kb total:" - "L0, all files 621kb " - - "L0.265[142887,156350] 8ns|----------------------------------------L0.265-----------------------------------------| " + - "L0.265[142887,156351] 8ns|-----------------------------------------L0.265-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 621kb total:" - "L0 " - - "L0.?[142887,149665] 8ns 312kb|-------------------L0.?--------------------| " - - "L0.?[149666,156350] 8ns 308kb |-------------------L0.?-------------------| " - - "**** Simulation run 121, type=split(ReduceOverlap)(split_times=[185000]). 1 Input Files, 1mb total:" + - "L0.?[142887,149666] 8ns 313kb|-------------------L0.?--------------------| " + - "L0.?[149667,156351] 8ns 308kb |-------------------L0.?-------------------| " + - "**** Simulation run 121, type=split(ReduceOverlap)(split_times=[185002]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - - "L0.267[171444,198370] 8ns|----------------------------------------L0.267-----------------------------------------| " + - "L0.267[171444,198372] 8ns|-----------------------------------------L0.267-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,185000] 8ns 625kb|-------------------L0.?--------------------| " - - "L0.?[185001,198370] 8ns 617kb |-------------------L0.?-------------------| " - - "**** Simulation run 122, type=split(ReduceOverlap)(split_times=[149665]). 1 Input Files, 621kb total:" + - "L0.?[171444,185002] 8ns 625kb|-------------------L0.?--------------------| " + - "L0.?[185003,198372] 8ns 616kb |-------------------L0.?-------------------| " + - "**** Simulation run 122, type=split(ReduceOverlap)(split_times=[149666]). 1 Input Files, 621kb total:" - "L0, all files 621kb " - - "L0.269[142887,156350] 9ns|----------------------------------------L0.269-----------------------------------------| " + - "L0.269[142887,156351] 9ns|-----------------------------------------L0.269-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 621kb total:" - "L0 " - - "L0.?[142887,149665] 9ns 312kb|-------------------L0.?--------------------| " - - "L0.?[149666,156350] 9ns 308kb |-------------------L0.?-------------------| " - - "**** Simulation run 123, type=split(ReduceOverlap)(split_times=[185000]). 1 Input Files, 1mb total:" + - "L0.?[142887,149666] 9ns 313kb|-------------------L0.?--------------------| " + - "L0.?[149667,156351] 9ns 308kb |-------------------L0.?-------------------| " + - "**** Simulation run 123, type=split(ReduceOverlap)(split_times=[185002]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - - "L0.271[171444,198370] 9ns|----------------------------------------L0.271-----------------------------------------| " + - "L0.271[171444,198372] 9ns|-----------------------------------------L0.271-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,185000] 9ns 625kb|-------------------L0.?--------------------| " - - "L0.?[185001,198370] 9ns 617kb |-------------------L0.?-------------------| " - - "**** Simulation run 124, type=split(ReduceOverlap)(split_times=[149665]). 1 Input Files, 621kb total:" + - "L0.?[171444,185002] 9ns 625kb|-------------------L0.?--------------------| " + - "L0.?[185003,198372] 9ns 616kb |-------------------L0.?-------------------| " + - "**** Simulation run 124, type=split(ReduceOverlap)(split_times=[149666]). 1 Input Files, 621kb total:" - "L0, all files 621kb " - - "L0.273[142887,156350] 10ns|----------------------------------------L0.273-----------------------------------------| " + - "L0.273[142887,156351] 10ns|-----------------------------------------L0.273-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 621kb total:" - "L0 " - - "L0.?[142887,149665] 10ns 312kb|-------------------L0.?--------------------| " - - "L0.?[149666,156350] 10ns 308kb |-------------------L0.?-------------------| " - - "**** Simulation run 125, type=split(ReduceOverlap)(split_times=[185000]). 1 Input Files, 1mb total:" + - "L0.?[142887,149666] 10ns 313kb|-------------------L0.?--------------------| " + - "L0.?[149667,156351] 10ns 308kb |-------------------L0.?-------------------| " + - "**** Simulation run 125, type=split(ReduceOverlap)(split_times=[185002]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - - "L0.275[171444,198370] 10ns|----------------------------------------L0.275-----------------------------------------| " + - "L0.275[171444,198372] 10ns|-----------------------------------------L0.275-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[171444,185000] 10ns 625kb|-------------------L0.?--------------------| " - - "L0.?[185001,198370] 10ns 617kb |-------------------L0.?-------------------| " + - "L0.?[171444,185002] 10ns 625kb|-------------------L0.?--------------------| " + - "L0.?[185003,198372] 10ns 616kb |-------------------L0.?-------------------| " - "Committing partition 1:" - " Soft Deleting 8 files: L0.261, L0.263, L0.265, L0.267, L0.269, L0.271, L0.273, L0.275" - " Creating 16 files" - - "**** Simulation run 126, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[32834, 65568]). 12 Input Files, 23mb total:" + - "**** Simulation run 126, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[32832, 65564]). 12 Input Files, 23mb total:" - "L0 " - "L0.53[100,28657] 7ns 1mb |-------------L0.53-------------| " - "L0.210[28658,38176] 7ns 439kb |-L0.210--| " - - "L0.284[38177,38491] 7ns 14kb |L0.284| " - - "L0.285[38492,46513] 7ns 370kb |L0.285-| " - - "L0.235[46514,57214] 7ns 493kb |--L0.235--| " - - "L0.105[57215,67700] 7ns 483kb |--L0.105--| " - - "L0.222[67701,76252] 7ns 394kb |-L0.222-| " - - "L0.286[76253,76882] 7ns 29kb |L0.286|" + - "L0.284[38177,38490] 7ns 14kb |L0.284| " + - "L0.285[38491,46512] 7ns 370kb |L0.285-| " + - "L0.235[46513,57214] 7ns 493kb |--L0.235--| " + - "L0.105[57215,67699] 7ns 483kb |--L0.105--| " + - "L0.222[67700,76252] 7ns 394kb |-L0.222-| " + - "L0.286[76253,76880] 7ns 29kb |L0.286|" - "L1 " - "L1.280[100,38176] 6ns 10mb|------------------L1.280------------------| " - - "L1.281[38177,38491] 6ns 84kb |L1.281| " - - "L1.282[38492,76252] 6ns 10mb |------------------L1.282------------------| " - - "L1.283[76253,76882] 6ns 168kb |L1.283|" + - "L1.281[38177,38490] 6ns 84kb |L1.281| " + - "L1.282[38491,76252] 6ns 10mb |------------------L1.282------------------| " + - "L1.283[76253,76880] 6ns 168kb |L1.283|" - "**** 3 Output Files (parquet_file_id not yet assigned), 23mb total:" - "L1 " - - "L1.?[100,32834] 7ns 10mb |----------------L1.?----------------| " - - "L1.?[32835,65568] 7ns 10mb |----------------L1.?----------------| " - - "L1.?[65569,76882] 7ns 3mb |---L1.?----| " + - "L1.?[100,32832] 7ns 10mb |----------------L1.?----------------| " + - "L1.?[32833,65564] 7ns 10mb |----------------L1.?----------------| " + - "L1.?[65565,76880] 7ns 3mb |---L1.?----| " - "Committing partition 1:" - " Soft Deleting 12 files: L0.53, L0.105, L0.210, L0.222, L0.235, L1.280, L1.281, L1.282, L1.283, L0.284, L0.285, L0.286" - " Creating 3 files" - - "**** Simulation run 127, type=split(HighL0OverlapTotalBacklog)(split_times=[25694]). 1 Input Files, 10mb total:" + - "**** Simulation run 127, type=split(HighL0OverlapTotalBacklog)(split_times=[25693]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.323[100,32834] 7ns |-----------------------------------------L1.323-----------------------------------------|" + - "L1.323[100,32832] 7ns |-----------------------------------------L1.323-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[100,25694] 7ns 8mb |--------------------------------L1.?--------------------------------| " - - "L1.?[25695,32834] 7ns 2mb |------L1.?-------| " - - "**** Simulation run 128, type=split(HighL0OverlapTotalBacklog)(split_times=[25694]). 1 Input Files, 1mb total:" + - "L1.?[100,25693] 7ns 8mb |--------------------------------L1.?--------------------------------| " + - "L1.?[25694,32832] 7ns 2mb |------L1.?-------| " + - "**** Simulation run 128, type=split(HighL0OverlapTotalBacklog)(split_times=[25693]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.60[100,28657] 8ns |-----------------------------------------L0.60------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[100,25694] 8ns 1mb |-------------------------------------L0.?-------------------------------------| " - - "L0.?[25695,28657] 8ns 137kb |-L0.?--| " - - "**** Simulation run 129, type=split(HighL0OverlapTotalBacklog)(split_times=[25694]). 1 Input Files, 1mb total:" + - "L0.?[100,25693] 8ns 1mb |-------------------------------------L0.?-------------------------------------| " + - "L0.?[25694,28657] 8ns 137kb |-L0.?--| " + - "**** Simulation run 129, type=split(HighL0OverlapTotalBacklog)(split_times=[25693]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.67[100,28657] 9ns |-----------------------------------------L0.67------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[100,25694] 9ns 1mb |-------------------------------------L0.?-------------------------------------| " - - "L0.?[25695,28657] 9ns 137kb |-L0.?--| " - - "**** Simulation run 130, type=split(HighL0OverlapTotalBacklog)(split_times=[25694]). 1 Input Files, 1mb total:" + - "L0.?[100,25693] 9ns 1mb |-------------------------------------L0.?-------------------------------------| " + - "L0.?[25694,28657] 9ns 137kb |-L0.?--| " + - "**** Simulation run 130, type=split(HighL0OverlapTotalBacklog)(split_times=[25693]). 1 Input Files, 1mb total:" - "L0, all files 1mb " - "L0.74[100,28657] 10ns |-----------------------------------------L0.74------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 1mb total:" - "L0 " - - "L0.?[100,25694] 10ns 1mb |-------------------------------------L0.?-------------------------------------| " - - "L0.?[25695,28657] 10ns 137kb |-L0.?--| " - - "**** Simulation run 131, type=split(HighL0OverlapTotalBacklog)(split_times=[51288]). 1 Input Files, 10mb total:" + - "L0.?[100,25693] 10ns 1mb |-------------------------------------L0.?-------------------------------------| " + - "L0.?[25694,28657] 10ns 137kb |-L0.?--| " + - "**** Simulation run 131, type=split(HighL0OverlapTotalBacklog)(split_times=[51286]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.324[32835,65568] 7ns |-----------------------------------------L1.324-----------------------------------------|" + - "L1.324[32833,65564] 7ns |-----------------------------------------L1.324-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[32835,51288] 7ns 6mb|----------------------L1.?----------------------| " - - "L1.?[51289,65568] 7ns 4mb |----------------L1.?-----------------| " - - "**** Simulation run 132, type=split(HighL0OverlapTotalBacklog)(split_times=[51288]). 1 Input Files, 493kb total:" + - "L1.?[32833,51286] 7ns 6mb|----------------------L1.?----------------------| " + - "L1.?[51287,65564] 7ns 4mb |----------------L1.?-----------------| " + - "**** Simulation run 132, type=split(HighL0OverlapTotalBacklog)(split_times=[51286]). 1 Input Files, 493kb total:" - "L0, all files 493kb " - - "L0.239[46514,57214] 8ns |-----------------------------------------L0.239-----------------------------------------|" + - "L0.239[46513,57214] 8ns |-----------------------------------------L0.239-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 493kb total:" - "L0 " - - "L0.?[46514,51288] 8ns 220kb|-----------------L0.?-----------------| " - - "L0.?[51289,57214] 8ns 273kb |---------------------L0.?----------------------| " - - "**** Simulation run 133, type=split(HighL0OverlapTotalBacklog)(split_times=[51288]). 1 Input Files, 493kb total:" + - "L0.?[46513,51286] 8ns 220kb|-----------------L0.?-----------------| " + - "L0.?[51287,57214] 8ns 273kb |---------------------L0.?----------------------| " + - "**** Simulation run 133, type=split(HighL0OverlapTotalBacklog)(split_times=[51286]). 1 Input Files, 493kb total:" - "L0, all files 493kb " - - "L0.243[46514,57214] 9ns |-----------------------------------------L0.243-----------------------------------------|" + - "L0.243[46513,57214] 9ns |-----------------------------------------L0.243-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 493kb total:" - "L0 " - - "L0.?[46514,51288] 9ns 220kb|-----------------L0.?-----------------| " - - "L0.?[51289,57214] 9ns 273kb |---------------------L0.?----------------------| " - - "**** Simulation run 134, type=split(HighL0OverlapTotalBacklog)(split_times=[51288]). 1 Input Files, 493kb total:" + - "L0.?[46513,51286] 9ns 220kb|-----------------L0.?-----------------| " + - "L0.?[51287,57214] 9ns 273kb |---------------------L0.?----------------------| " + - "**** Simulation run 134, type=split(HighL0OverlapTotalBacklog)(split_times=[51286]). 1 Input Files, 493kb total:" - "L0, all files 493kb " - - "L0.247[46514,57214] 10ns |-----------------------------------------L0.247-----------------------------------------|" + - "L0.247[46513,57214] 10ns |-----------------------------------------L0.247-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 493kb total:" - "L0 " - - "L0.?[46514,51288] 10ns 220kb|-----------------L0.?-----------------| " - - "L0.?[51289,57214] 10ns 273kb |---------------------L0.?----------------------| " + - "L0.?[46513,51286] 10ns 220kb|-----------------L0.?-----------------| " + - "L0.?[51287,57214] 10ns 273kb |---------------------L0.?----------------------| " - "Committing partition 1:" - " Soft Deleting 8 files: L0.60, L0.67, L0.74, L0.239, L0.243, L0.247, L1.323, L1.324" - " Creating 16 files" - - "**** Simulation run 135, type=split(ReduceOverlap)(split_times=[32834]). 1 Input Files, 439kb total:" + - "**** Simulation run 135, type=split(ReduceOverlap)(split_times=[32832]). 1 Input Files, 439kb total:" - "L0, all files 439kb " - "L0.212[28658,38176] 8ns |-----------------------------------------L0.212-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 439kb total:" - "L0 " - - "L0.?[28658,32834] 8ns 193kb|----------------L0.?-----------------| " - - "L0.?[32835,38176] 8ns 246kb |----------------------L0.?----------------------| " - - "**** Simulation run 136, type=split(ReduceOverlap)(split_times=[65568]). 1 Input Files, 483kb total:" + - "L0.?[28658,32832] 8ns 192kb|----------------L0.?-----------------| " + - "L0.?[32833,38176] 8ns 246kb |----------------------L0.?----------------------| " + - "**** Simulation run 136, type=split(ReduceOverlap)(split_times=[65564]). 1 Input Files, 483kb total:" - "L0, all files 483kb " - - "L0.109[57215,67700] 8ns |-----------------------------------------L0.109-----------------------------------------|" + - "L0.109[57215,67699] 8ns |-----------------------------------------L0.109-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 483kb total:" - "L0 " - - "L0.?[57215,65568] 8ns 385kb|--------------------------------L0.?---------------------------------| " - - "L0.?[65569,67700] 8ns 98kb |------L0.?------| " - - "**** Simulation run 137, type=split(ReduceOverlap)(split_times=[32834]). 1 Input Files, 439kb total:" + - "L0.?[57215,65564] 8ns 385kb|--------------------------------L0.?---------------------------------| " + - "L0.?[65565,67699] 8ns 98kb |------L0.?------| " + - "**** Simulation run 137, type=split(ReduceOverlap)(split_times=[32832]). 1 Input Files, 439kb total:" - "L0, all files 439kb " - "L0.214[28658,38176] 9ns |-----------------------------------------L0.214-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 439kb total:" - "L0 " - - "L0.?[28658,32834] 9ns 193kb|----------------L0.?-----------------| " - - "L0.?[32835,38176] 9ns 246kb |----------------------L0.?----------------------| " - - "**** Simulation run 138, type=split(ReduceOverlap)(split_times=[65568]). 1 Input Files, 483kb total:" + - "L0.?[28658,32832] 9ns 192kb|----------------L0.?-----------------| " + - "L0.?[32833,38176] 9ns 246kb |----------------------L0.?----------------------| " + - "**** Simulation run 138, type=split(ReduceOverlap)(split_times=[65564]). 1 Input Files, 483kb total:" - "L0, all files 483kb " - - "L0.113[57215,67700] 9ns |-----------------------------------------L0.113-----------------------------------------|" + - "L0.113[57215,67699] 9ns |-----------------------------------------L0.113-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 483kb total:" - "L0 " - - "L0.?[57215,65568] 9ns 385kb|--------------------------------L0.?---------------------------------| " - - "L0.?[65569,67700] 9ns 98kb |------L0.?------| " - - "**** Simulation run 139, type=split(ReduceOverlap)(split_times=[32834]). 1 Input Files, 439kb total:" + - "L0.?[57215,65564] 9ns 385kb|--------------------------------L0.?---------------------------------| " + - "L0.?[65565,67699] 9ns 98kb |------L0.?------| " + - "**** Simulation run 139, type=split(ReduceOverlap)(split_times=[32832]). 1 Input Files, 439kb total:" - "L0, all files 439kb " - "L0.216[28658,38176] 10ns |-----------------------------------------L0.216-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 439kb total:" - "L0 " - - "L0.?[28658,32834] 10ns 193kb|----------------L0.?-----------------| " - - "L0.?[32835,38176] 10ns 246kb |----------------------L0.?----------------------| " - - "**** Simulation run 140, type=split(ReduceOverlap)(split_times=[65568]). 1 Input Files, 483kb total:" + - "L0.?[28658,32832] 10ns 192kb|----------------L0.?-----------------| " + - "L0.?[32833,38176] 10ns 246kb |----------------------L0.?----------------------| " + - "**** Simulation run 140, type=split(ReduceOverlap)(split_times=[65564]). 1 Input Files, 483kb total:" - "L0, all files 483kb " - - "L0.117[57215,67700] 10ns |-----------------------------------------L0.117-----------------------------------------|" + - "L0.117[57215,67699] 10ns |-----------------------------------------L0.117-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 483kb total:" - "L0 " - - "L0.?[57215,65568] 10ns 385kb|--------------------------------L0.?---------------------------------| " - - "L0.?[65569,67700] 10ns 98kb |------L0.?------| " + - "L0.?[57215,65564] 10ns 385kb|--------------------------------L0.?---------------------------------| " + - "L0.?[65565,67699] 10ns 98kb |------L0.?------| " - "Committing partition 1:" - " Soft Deleting 6 files: L0.109, L0.113, L0.117, L0.212, L0.214, L0.216" - " Creating 12 files" - - "**** Simulation run 141, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[108723, 140563]). 16 Input Files, 30mb total:" + - "**** Simulation run 141, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[108721, 140561]). 16 Input Files, 30mb total:" - "L0 " - - "L0.287[76883,85771] 7ns 410kb|L0.287| " - - "L0.130[85772,85772] 7ns 0b |L0.130| " - - "L0.236[85773,92926] 7ns 330kb |L0.236| " - - "L0.237[92927,109041] 7ns 743kb |---L0.237----| " + - "L0.287[76881,85771] 7ns 410kb|L0.287| " + - "L0.130[85772,85772] 7ns 47b |L0.130| " + - "L0.236[85773,92924] 7ns 330kb |L0.236| " + - "L0.237[92925,109041] 7ns 743kb |---L0.237----| " - "L0.188[109042,114328] 7ns 244kb |L0.188| " - - "L0.145[114329,114329] 7ns 0b |L0.145| " - - "L0.146[114330,135300] 7ns 967kb |-----L0.146------| " - - "L0.108[135301,142885] 7ns 350kb |L0.108| " - - "L0.157[142886,142886] 7ns 0b |L0.157| " - - "L0.307[142887,149665] 7ns 312kb |L0.307| " - - "L0.308[149666,156350] 7ns 308kb |L0.308| " - - "L0.262[156351,160867] 7ns 208kb |L0.262| " - - "L1 " - - "L1.279[76883,114329] 6ns 10mb|-------------L1.279--------------| " + - "L0.145[114329,114329] 7ns 47b |L0.145| " + - "L0.146[114330,135298] 7ns 967kb |-----L0.146------| " + - "L0.108[135299,142885] 7ns 350kb |L0.108| " + - "L0.157[142886,142886] 7ns 47b |L0.157| " + - "L0.307[142887,149666] 7ns 313kb |L0.307| " + - "L0.308[149667,156351] 7ns 308kb |L0.308| " + - "L0.262[156352,160867] 7ns 208kb |L0.262| " + - "L1 " + - "L1.279[76881,114329] 6ns 10mb|-------------L1.279--------------| " - "L1.303[114330,142886] 6ns 8mb |---------L1.303----------| " - - "L1.304[142887,149665] 6ns 2mb |L1.304| " - - "L1.305[149666,171442] 6ns 6mb |------L1.305------| " + - "L1.304[142887,149666] 6ns 2mb |L1.304| " + - "L1.305[149667,171442] 6ns 6mb |------L1.305------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L1 " - - "L1.?[76883,108723] 7ns 10mb|------------L1.?------------| " - - "L1.?[108724,140563] 7ns 10mb |------------L1.?------------| " - - "L1.?[140564,171442] 7ns 10mb |-----------L1.?------------| " + - "L1.?[76881,108721] 7ns 10mb|------------L1.?------------| " + - "L1.?[108722,140561] 7ns 10mb |------------L1.?------------| " + - "L1.?[140562,171442] 7ns 10mb |-----------L1.?------------| " - "Committing partition 1:" - " Soft Deleting 16 files: L0.108, L0.130, L0.145, L0.146, L0.157, L0.188, L0.236, L0.237, L0.262, L1.279, L0.287, L1.303, L1.304, L1.305, L0.307, L0.308" - " Creating 3 files" - - "**** Simulation run 142, type=split(HighL0OverlapTotalBacklog)(split_times=[108402]). 1 Input Files, 10mb total:" + - "**** Simulation run 142, type=split(HighL0OverlapTotalBacklog)(split_times=[108401]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.354[76883,108723] 7ns |-----------------------------------------L1.354-----------------------------------------|" + - "L1.354[76881,108721] 7ns |-----------------------------------------L1.354-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[76883,108402] 7ns 10mb|-----------------------------------------L1.?------------------------------------------| " - - "L1.?[108403,108723] 7ns 103kb |L1.?|" - - "**** Simulation run 143, type=split(HighL0OverlapTotalBacklog)(split_times=[108402]). 1 Input Files, 743kb total:" + - "L1.?[76881,108401] 7ns 10mb|-----------------------------------------L1.?------------------------------------------| " + - "L1.?[108402,108721] 7ns 103kb |L1.?|" + - "**** Simulation run 143, type=split(HighL0OverlapTotalBacklog)(split_times=[108401]). 1 Input Files, 743kb total:" - "L0, all files 743kb " - - "L0.241[92927,109041] 8ns |-----------------------------------------L0.241-----------------------------------------|" + - "L0.241[92925,109041] 8ns |-----------------------------------------L0.241-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 743kb total:" - "L0 " - - "L0.?[92927,108402] 8ns 714kb|----------------------------------------L0.?----------------------------------------| " - - "L0.?[108403,109041] 8ns 29kb |L0.?|" - - "**** Simulation run 144, type=split(HighL0OverlapTotalBacklog)(split_times=[108402]). 1 Input Files, 743kb total:" + - "L0.?[92925,108401] 8ns 714kb|----------------------------------------L0.?----------------------------------------| " + - "L0.?[108402,109041] 8ns 30kb |L0.?|" + - "**** Simulation run 144, type=split(HighL0OverlapTotalBacklog)(split_times=[108401]). 1 Input Files, 743kb total:" - "L0, all files 743kb " - - "L0.245[92927,109041] 9ns |-----------------------------------------L0.245-----------------------------------------|" + - "L0.245[92925,109041] 9ns |-----------------------------------------L0.245-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 743kb total:" - "L0 " - - "L0.?[92927,108402] 9ns 714kb|----------------------------------------L0.?----------------------------------------| " - - "L0.?[108403,109041] 9ns 29kb |L0.?|" - - "**** Simulation run 145, type=split(HighL0OverlapTotalBacklog)(split_times=[108402]). 1 Input Files, 743kb total:" + - "L0.?[92925,108401] 9ns 714kb|----------------------------------------L0.?----------------------------------------| " + - "L0.?[108402,109041] 9ns 30kb |L0.?|" + - "**** Simulation run 145, type=split(HighL0OverlapTotalBacklog)(split_times=[108401]). 1 Input Files, 743kb total:" - "L0, all files 743kb " - - "L0.249[92927,109041] 10ns|-----------------------------------------L0.249-----------------------------------------|" + - "L0.249[92925,109041] 10ns|-----------------------------------------L0.249-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 743kb total:" - "L0 " - - "L0.?[92927,108402] 10ns 714kb|----------------------------------------L0.?----------------------------------------| " - - "L0.?[108403,109041] 10ns 29kb |L0.?|" + - "L0.?[92925,108401] 10ns 714kb|----------------------------------------L0.?----------------------------------------| " + - "L0.?[108402,109041] 10ns 30kb |L0.?|" - "**** Simulation run 146, type=split(HighL0OverlapTotalBacklog)(split_times=[139921]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.355[108724,140563] 7ns|-----------------------------------------L1.355-----------------------------------------|" + - "L1.355[108722,140561] 7ns|-----------------------------------------L1.355-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[108724,139921] 7ns 10mb|-----------------------------------------L1.?-----------------------------------------| " - - "L1.?[139922,140563] 7ns 206kb |L1.?|" + - "L1.?[108722,139921] 7ns 10mb|-----------------------------------------L1.?-----------------------------------------| " + - "L1.?[139922,140561] 7ns 206kb |L1.?|" - "**** Simulation run 147, type=split(HighL0OverlapTotalBacklog)(split_times=[139921]). 1 Input Files, 350kb total:" - "L0, all files 350kb " - - "L0.112[135301,142885] 8ns|-----------------------------------------L0.112-----------------------------------------|" + - "L0.112[135299,142885] 8ns|-----------------------------------------L0.112-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 350kb total:" - "L0 " - - "L0.?[135301,139921] 8ns 213kb|------------------------L0.?------------------------| " + - "L0.?[135299,139921] 8ns 213kb|------------------------L0.?------------------------| " - "L0.?[139922,142885] 8ns 137kb |--------------L0.?---------------| " - "**** Simulation run 148, type=split(HighL0OverlapTotalBacklog)(split_times=[139921]). 1 Input Files, 350kb total:" - "L0, all files 350kb " - - "L0.116[135301,142885] 9ns|-----------------------------------------L0.116-----------------------------------------|" + - "L0.116[135299,142885] 9ns|-----------------------------------------L0.116-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 350kb total:" - "L0 " - - "L0.?[135301,139921] 9ns 213kb|------------------------L0.?------------------------| " + - "L0.?[135299,139921] 9ns 213kb|------------------------L0.?------------------------| " - "L0.?[139922,142885] 9ns 137kb |--------------L0.?---------------| " - "**** Simulation run 149, type=split(HighL0OverlapTotalBacklog)(split_times=[139921]). 1 Input Files, 350kb total:" - "L0, all files 350kb " - - "L0.120[135301,142885] 10ns|-----------------------------------------L0.120-----------------------------------------|" + - "L0.120[135299,142885] 10ns|-----------------------------------------L0.120-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 350kb total:" - "L0 " - - "L0.?[135301,139921] 10ns 213kb|------------------------L0.?------------------------| " + - "L0.?[135299,139921] 10ns 213kb|------------------------L0.?------------------------| " - "L0.?[139922,142885] 10ns 137kb |--------------L0.?---------------| " - "Committing partition 1:" - " Soft Deleting 8 files: L0.112, L0.116, L0.120, L0.241, L0.245, L0.249, L1.354, L1.355" - " Creating 16 files" - - "**** Simulation run 150, type=split(ReduceOverlap)(split_times=[108723]). 1 Input Files, 29kb total:" - - "L0, all files 29kb " - - "L0.360[108403,109041] 8ns|-----------------------------------------L0.360-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 29kb total:" - - "L0 " - - "L0.?[108403,108723] 8ns 15kb|-------------------L0.?--------------------| " - - "L0.?[108724,109041] 8ns 15kb |-------------------L0.?-------------------| " - - "**** Simulation run 151, type=split(ReduceOverlap)(split_times=[140563]). 1 Input Files, 137kb total:" + - "**** Simulation run 150, type=split(ReduceOverlap)(split_times=[108721]). 1 Input Files, 30kb total:" + - "L0, all files 30kb " + - "L0.360[108402,109041] 8ns|-----------------------------------------L0.360-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 30kb total:" + - "L0, all files 15kb " + - "L0.?[108402,108721] 8ns |-------------------L0.?-------------------| " + - "L0.?[108722,109041] 8ns |-------------------L0.?-------------------| " + - "**** Simulation run 151, type=split(ReduceOverlap)(split_times=[140561]). 1 Input Files, 137kb total:" - "L0, all files 137kb " - "L0.368[139922,142885] 8ns|-----------------------------------------L0.368-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 137kb total:" - "L0 " - - "L0.?[139922,140563] 8ns 30kb|------L0.?-------| " - - "L0.?[140564,142885] 8ns 107kb |--------------------------------L0.?--------------------------------| " - - "**** Simulation run 152, type=split(ReduceOverlap)(split_times=[108723]). 1 Input Files, 29kb total:" - - "L0, all files 29kb " - - "L0.362[108403,109041] 9ns|-----------------------------------------L0.362-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 29kb total:" - - "L0 " - - "L0.?[108403,108723] 9ns 15kb|-------------------L0.?--------------------| " - - "L0.?[108724,109041] 9ns 15kb |-------------------L0.?-------------------| " - - "**** Simulation run 153, type=split(ReduceOverlap)(split_times=[140563]). 1 Input Files, 137kb total:" + - "L0.?[139922,140561] 8ns 30kb|------L0.?-------| " + - "L0.?[140562,142885] 8ns 107kb |--------------------------------L0.?--------------------------------| " + - "**** Simulation run 152, type=split(ReduceOverlap)(split_times=[108721]). 1 Input Files, 30kb total:" + - "L0, all files 30kb " + - "L0.362[108402,109041] 9ns|-----------------------------------------L0.362-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 30kb total:" + - "L0, all files 15kb " + - "L0.?[108402,108721] 9ns |-------------------L0.?-------------------| " + - "L0.?[108722,109041] 9ns |-------------------L0.?-------------------| " + - "**** Simulation run 153, type=split(ReduceOverlap)(split_times=[140561]). 1 Input Files, 137kb total:" - "L0, all files 137kb " - "L0.370[139922,142885] 9ns|-----------------------------------------L0.370-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 137kb total:" - "L0 " - - "L0.?[139922,140563] 9ns 30kb|------L0.?-------| " - - "L0.?[140564,142885] 9ns 107kb |--------------------------------L0.?--------------------------------| " - - "**** Simulation run 154, type=split(ReduceOverlap)(split_times=[108723]). 1 Input Files, 29kb total:" - - "L0, all files 29kb " - - "L0.364[108403,109041] 10ns|-----------------------------------------L0.364-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 29kb total:" - - "L0 " - - "L0.?[108403,108723] 10ns 15kb|-------------------L0.?--------------------| " - - "L0.?[108724,109041] 10ns 15kb |-------------------L0.?-------------------| " - - "**** Simulation run 155, type=split(ReduceOverlap)(split_times=[140563]). 1 Input Files, 137kb total:" + - "L0.?[139922,140561] 9ns 30kb|------L0.?-------| " + - "L0.?[140562,142885] 9ns 107kb |--------------------------------L0.?--------------------------------| " + - "**** Simulation run 154, type=split(ReduceOverlap)(split_times=[108721]). 1 Input Files, 30kb total:" + - "L0, all files 30kb " + - "L0.364[108402,109041] 10ns|-----------------------------------------L0.364-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 30kb total:" + - "L0, all files 15kb " + - "L0.?[108402,108721] 10ns |-------------------L0.?-------------------| " + - "L0.?[108722,109041] 10ns |-------------------L0.?-------------------| " + - "**** Simulation run 155, type=split(ReduceOverlap)(split_times=[140561]). 1 Input Files, 137kb total:" - "L0, all files 137kb " - "L0.372[139922,142885] 10ns|-----------------------------------------L0.372-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 137kb total:" - "L0 " - - "L0.?[139922,140563] 10ns 30kb|------L0.?-------| " - - "L0.?[140564,142885] 10ns 107kb |--------------------------------L0.?--------------------------------| " + - "L0.?[139922,140561] 10ns 30kb|------L0.?-------| " + - "L0.?[140562,142885] 10ns 107kb |--------------------------------L0.?--------------------------------| " - "Committing partition 1:" - " Soft Deleting 6 files: L0.360, L0.362, L0.364, L0.368, L0.370, L0.372" - " Creating 12 files" - "**** Simulation run 156, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[170977]). 8 Input Files, 20mb total:" - "L0 " - "L0.190[160868,171442] 7ns 488kb |----L0.190----| " - - "L0.171[171443,171443] 7ns 0b |L0.171| " - - "L0.309[171444,185000] 7ns 625kb |------L0.309------| " - - "L0.310[185001,198370] 7ns 617kb |------L0.310------| " - - "L0.264[198371,200000] 7ns 75kb |L0.264|" - - "L1 " - - "L1.356[140564,171442] 7ns 10mb|-------------------L1.356-------------------| " - - "L1.306[171443,185000] 6ns 4mb |------L1.306------| " - - "L1.302[185001,200000] 6ns 4mb |-------L1.302-------| " + - "L0.171[171443,171443] 7ns 47b |L0.171| " + - "L0.309[171444,185002] 7ns 625kb |------L0.309------| " + - "L0.310[185003,198372] 7ns 616kb |------L0.310------| " + - "L0.264[198373,200000] 7ns 75kb |L0.264|" + - "L1 " + - "L1.356[140562,171442] 7ns 10mb|-------------------L1.356-------------------| " + - "L1.306[171443,185002] 6ns 4mb |------L1.306------| " + - "L1.302[185003,200000] 6ns 4mb |-------L1.302-------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 20mb total:" - "L1 " - - "L1.?[140564,170977] 7ns 10mb|--------------------L1.?--------------------| " + - "L1.?[140562,170977] 7ns 10mb|--------------------L1.?--------------------| " - "L1.?[170978,200000] 7ns 10mb |------------------L1.?-------------------| " - "Committing partition 1:" - " Soft Deleting 8 files: L0.171, L0.190, L0.264, L1.302, L1.306, L0.309, L0.310, L1.356" @@ -3667,160 +3665,160 @@ async fn all_overlapping_l0_max_input_bytes_per_partition_small_max_desired_file - "Committing partition 1:" - " Soft Deleting 3 files: L0.194, L0.198, L0.202" - " Creating 6 files" - - "**** Simulation run 160, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[28630, 57160]). 17 Input Files, 27mb total:" - - "L0 " - - "L0.328[100,25694] 8ns 1mb|-----------L0.328-----------| " - - "L0.329[25695,28657] 8ns 137kb |L0.329| " - - "L0.342[28658,32834] 8ns 193kb |L0.342| " - - "L0.343[32835,38176] 8ns 246kb |L0.343| " - - "L0.288[38177,38491] 8ns 14kb |L0.288| " - - "L0.289[38492,46513] 8ns 370kb |L0.289-| " - - "L0.336[46514,51288] 8ns 220kb |L0.336| " - - "L0.337[51289,57214] 8ns 273kb |L0.337| " - - "L0.344[57215,65568] 8ns 385kb |L0.344-| " - - "L0.345[65569,67700] 8ns 98kb |L0.345| " - - "L0.224[67701,76252] 8ns 394kb |-L0.224-| " - - "L0.290[76253,76882] 8ns 29kb |L0.290|" - - "L1 " - - "L1.326[100,25694] 7ns 8mb|-----------L1.326-----------| " - - "L1.327[25695,32834] 7ns 2mb |L1.327| " - - "L1.334[32835,51288] 7ns 6mb |------L1.334-------| " - - "L1.335[51289,65568] 7ns 4mb |----L1.335----| " - - "L1.325[65569,76882] 7ns 3mb |--L1.325---| " + - "**** Simulation run 160, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[28628, 57156]). 17 Input Files, 27mb total:" + - "L0 " + - "L0.328[100,25693] 8ns 1mb|----------L0.328-----------| " + - "L0.329[25694,28657] 8ns 137kb |L0.329| " + - "L0.342[28658,32832] 8ns 192kb |L0.342| " + - "L0.343[32833,38176] 8ns 246kb |L0.343| " + - "L0.288[38177,38490] 8ns 14kb |L0.288| " + - "L0.289[38491,46512] 8ns 370kb |L0.289-| " + - "L0.336[46513,51286] 8ns 220kb |L0.336| " + - "L0.337[51287,57214] 8ns 273kb |L0.337| " + - "L0.344[57215,65564] 8ns 385kb |L0.344-| " + - "L0.345[65565,67699] 8ns 98kb |L0.345| " + - "L0.224[67700,76252] 8ns 394kb |-L0.224-| " + - "L0.290[76253,76880] 8ns 29kb |L0.290|" + - "L1 " + - "L1.326[100,25693] 7ns 8mb|----------L1.326-----------| " + - "L1.327[25694,32832] 7ns 2mb |L1.327| " + - "L1.334[32833,51286] 7ns 6mb |------L1.334-------| " + - "L1.335[51287,65564] 7ns 4mb |----L1.335----| " + - "L1.325[65565,76880] 7ns 3mb |--L1.325---| " - "**** 3 Output Files (parquet_file_id not yet assigned), 27mb total:" - "L1 " - - "L1.?[100,28630] 8ns 10mb |-------------L1.?--------------| " - - "L1.?[28631,57160] 8ns 10mb |-------------L1.?--------------| " - - "L1.?[57161,76882] 8ns 7mb |--------L1.?---------| " + - "L1.?[100,28628] 8ns 10mb |-------------L1.?--------------| " + - "L1.?[28629,57156] 8ns 10mb |-------------L1.?--------------| " + - "L1.?[57157,76880] 8ns 7mb |--------L1.?---------| " - "Committing partition 1:" - " Soft Deleting 17 files: L0.224, L0.288, L0.289, L0.290, L1.325, L1.326, L1.327, L0.328, L0.329, L1.334, L1.335, L0.336, L0.337, L0.342, L0.343, L0.344, L0.345" - " Creating 3 files" - - "**** Simulation run 161, type=split(HighL0OverlapTotalBacklog)(split_times=[25694]). 1 Input Files, 10mb total:" + - "**** Simulation run 161, type=split(HighL0OverlapTotalBacklog)(split_times=[25693]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.393[100,28630] 8ns |-----------------------------------------L1.393-----------------------------------------|" + - "L1.393[100,28628] 8ns |-----------------------------------------L1.393-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[100,25694] 8ns 9mb |-------------------------------------L1.?-------------------------------------| " - - "L1.?[25695,28630] 8ns 1mb |-L1.?--| " - - "**** Simulation run 162, type=split(HighL0OverlapTotalBacklog)(split_times=[51288]). 1 Input Files, 10mb total:" + - "L1.?[100,25693] 8ns 9mb |-------------------------------------L1.?-------------------------------------| " + - "L1.?[25694,28628] 8ns 1mb |-L1.?--| " + - "**** Simulation run 162, type=split(HighL0OverlapTotalBacklog)(split_times=[51286]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.394[28631,57160] 8ns |-----------------------------------------L1.394-----------------------------------------|" + - "L1.394[28629,57156] 8ns |-----------------------------------------L1.394-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[28631,51288] 8ns 8mb|--------------------------------L1.?---------------------------------| " - - "L1.?[51289,57160] 8ns 2mb |------L1.?------| " + - "L1.?[28629,51286] 8ns 8mb|--------------------------------L1.?---------------------------------| " + - "L1.?[51287,57156] 8ns 2mb |------L1.?------| " - "Committing partition 1:" - " Soft Deleting 2 files: L1.393, L1.394" - " Creating 4 files" - - "**** Simulation run 163, type=split(ReduceOverlap)(split_times=[28630]). 1 Input Files, 137kb total:" + - "**** Simulation run 163, type=split(ReduceOverlap)(split_times=[28628]). 1 Input Files, 137kb total:" - "L0, all files 137kb " - - "L0.331[25695,28657] 9ns |-----------------------------------------L0.331-----------------------------------------|" + - "L0.331[25694,28657] 9ns |-----------------------------------------L0.331-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 137kb total:" - "L0 " - - "L0.?[25695,28630] 9ns 135kb|-----------------------------------------L0.?------------------------------------------| " - - "L0.?[28631,28657] 9ns 1kb |L0.?|" - - "**** Simulation run 164, type=split(ReduceOverlap)(split_times=[57160]). 1 Input Files, 273kb total:" + - "L0.?[25694,28628] 9ns 135kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[28629,28657] 9ns 1kb |L0.?|" + - "**** Simulation run 164, type=split(ReduceOverlap)(split_times=[57156]). 1 Input Files, 273kb total:" - "L0, all files 273kb " - - "L0.339[51289,57214] 9ns |-----------------------------------------L0.339-----------------------------------------|" + - "L0.339[51287,57214] 9ns |-----------------------------------------L0.339-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 273kb total:" - "L0 " - - "L0.?[51289,57160] 9ns 271kb|-----------------------------------------L0.?------------------------------------------| " - - "L0.?[57161,57214] 9ns 2kb |L0.?|" - - "**** Simulation run 165, type=split(ReduceOverlap)(split_times=[28630]). 1 Input Files, 137kb total:" + - "L0.?[51287,57156] 9ns 271kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[57157,57214] 9ns 3kb |L0.?|" + - "**** Simulation run 165, type=split(ReduceOverlap)(split_times=[28628]). 1 Input Files, 137kb total:" - "L0, all files 137kb " - - "L0.333[25695,28657] 10ns |-----------------------------------------L0.333-----------------------------------------|" + - "L0.333[25694,28657] 10ns |-----------------------------------------L0.333-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 137kb total:" - "L0 " - - "L0.?[25695,28630] 10ns 135kb|-----------------------------------------L0.?------------------------------------------| " - - "L0.?[28631,28657] 10ns 1kb |L0.?|" - - "**** Simulation run 166, type=split(ReduceOverlap)(split_times=[57160]). 1 Input Files, 273kb total:" + - "L0.?[25694,28628] 10ns 135kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[28629,28657] 10ns 1kb |L0.?|" + - "**** Simulation run 166, type=split(ReduceOverlap)(split_times=[57156]). 1 Input Files, 273kb total:" - "L0, all files 273kb " - - "L0.341[51289,57214] 10ns |-----------------------------------------L0.341-----------------------------------------|" + - "L0.341[51287,57214] 10ns |-----------------------------------------L0.341-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 273kb total:" - "L0 " - - "L0.?[51289,57160] 10ns 271kb|-----------------------------------------L0.?------------------------------------------| " - - "L0.?[57161,57214] 10ns 2kb |L0.?|" + - "L0.?[51287,57156] 10ns 271kb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[57157,57214] 10ns 3kb |L0.?|" - "Committing partition 1:" - " Soft Deleting 4 files: L0.331, L0.333, L0.339, L0.341" - " Creating 8 files" - - "**** Simulation run 167, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[104731, 132579]). 15 Input Files, 23mb total:" - - "L0 " - - "L0.291[76883,85771] 8ns 410kb|--L0.291--| " - - "L0.132[85772,85772] 8ns 0b |L0.132| " - - "L0.240[85773,92926] 8ns 330kb |-L0.240-| " - - "L0.359[92927,108402] 8ns 714kb |------L0.359-------| " - - "L0.373[108403,108723] 8ns 15kb |L0.373| " - - "L0.374[108724,109041] 8ns 15kb |L0.374| " + - "**** Simulation run 167, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[104729, 132577]). 15 Input Files, 23mb total:" + - "L0 " + - "L0.291[76881,85771] 8ns 410kb|--L0.291--| " + - "L0.132[85772,85772] 8ns 47b |L0.132| " + - "L0.240[85773,92924] 8ns 330kb |-L0.240-| " + - "L0.359[92925,108401] 8ns 714kb |------L0.359-------| " + - "L0.373[108402,108721] 8ns 15kb |L0.373| " + - "L0.374[108722,109041] 8ns 15kb |L0.374| " - "L0.192[109042,114328] 8ns 244kb |L0.192| " - - "L0.147[114329,114329] 8ns 0b |L0.147| " - - "L0.148[114330,135300] 8ns 967kb |----------L0.148-----------| " - - "L0.367[135301,139921] 8ns 213kb |L0.367|" - - "L0.375[139922,140563] 8ns 30kb |L0.375|" - - "L1 " - - "L1.357[76883,108402] 7ns 10mb|------------------L1.357------------------| " - - "L1.358[108403,108723] 7ns 103kb |L1.358| " - - "L1.365[108724,139921] 7ns 10mb |------------------L1.365------------------| " - - "L1.366[139922,140563] 7ns 206kb |L1.366|" + - "L0.147[114329,114329] 8ns 47b |L0.147| " + - "L0.148[114330,135298] 8ns 967kb |----------L0.148-----------| " + - "L0.367[135299,139921] 8ns 213kb |L0.367|" + - "L0.375[139922,140561] 8ns 30kb |L0.375|" + - "L1 " + - "L1.357[76881,108401] 7ns 10mb|------------------L1.357------------------| " + - "L1.358[108402,108721] 7ns 103kb |L1.358| " + - "L1.365[108722,139921] 7ns 10mb |------------------L1.365------------------| " + - "L1.366[139922,140561] 7ns 206kb |L1.366|" - "**** 3 Output Files (parquet_file_id not yet assigned), 23mb total:" - "L1 " - - "L1.?[76883,104731] 8ns 10mb|----------------L1.?-----------------| " - - "L1.?[104732,132579] 8ns 10mb |----------------L1.?-----------------| " - - "L1.?[132580,140563] 8ns 3mb |--L1.?---| " + - "L1.?[76881,104729] 8ns 10mb|----------------L1.?-----------------| " + - "L1.?[104730,132577] 8ns 10mb |----------------L1.?-----------------| " + - "L1.?[132578,140561] 8ns 3mb |--L1.?---| " - "Committing partition 1:" - " Soft Deleting 15 files: L0.132, L0.147, L0.148, L0.192, L0.240, L0.291, L1.357, L1.358, L0.359, L1.365, L1.366, L0.367, L0.373, L0.374, L0.375" - " Creating 3 files" - - "**** Simulation run 168, type=split(ReduceOverlap)(split_times=[104731]). 1 Input Files, 714kb total:" + - "**** Simulation run 168, type=split(ReduceOverlap)(split_times=[104729]). 1 Input Files, 714kb total:" - "L0, all files 714kb " - - "L0.361[92927,108402] 9ns |-----------------------------------------L0.361-----------------------------------------|" + - "L0.361[92925,108401] 9ns |-----------------------------------------L0.361-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 714kb total:" - "L0 " - - "L0.?[92927,104731] 9ns 544kb|-------------------------------L0.?-------------------------------| " - - "L0.?[104732,108402] 9ns 169kb |-------L0.?--------| " - - "**** Simulation run 169, type=split(ReduceOverlap)(split_times=[132579]). 1 Input Files, 967kb total:" + - "L0.?[92925,104729] 9ns 544kb|-------------------------------L0.?-------------------------------| " + - "L0.?[104730,108401] 9ns 169kb |-------L0.?--------| " + - "**** Simulation run 169, type=split(ReduceOverlap)(split_times=[132577]). 1 Input Files, 967kb total:" - "L0, all files 967kb " - - "L0.150[114330,135300] 9ns|-----------------------------------------L0.150-----------------------------------------|" + - "L0.150[114330,135298] 9ns|-----------------------------------------L0.150-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 967kb total:" - "L0 " - - "L0.?[114330,132579] 9ns 841kb|------------------------------------L0.?------------------------------------| " - - "L0.?[132580,135300] 9ns 125kb |--L0.?---| " - - "**** Simulation run 170, type=split(ReduceOverlap)(split_times=[104731]). 1 Input Files, 714kb total:" + - "L0.?[114330,132577] 9ns 841kb|------------------------------------L0.?------------------------------------| " + - "L0.?[132578,135298] 9ns 125kb |--L0.?---| " + - "**** Simulation run 170, type=split(ReduceOverlap)(split_times=[104729]). 1 Input Files, 714kb total:" - "L0, all files 714kb " - - "L0.363[92927,108402] 10ns|-----------------------------------------L0.363-----------------------------------------|" + - "L0.363[92925,108401] 10ns|-----------------------------------------L0.363-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 714kb total:" - "L0 " - - "L0.?[92927,104731] 10ns 544kb|-------------------------------L0.?-------------------------------| " - - "L0.?[104732,108402] 10ns 169kb |-------L0.?--------| " - - "**** Simulation run 171, type=split(ReduceOverlap)(split_times=[132579]). 1 Input Files, 967kb total:" + - "L0.?[92925,104729] 10ns 544kb|-------------------------------L0.?-------------------------------| " + - "L0.?[104730,108401] 10ns 169kb |-------L0.?--------| " + - "**** Simulation run 171, type=split(ReduceOverlap)(split_times=[132577]). 1 Input Files, 967kb total:" - "L0, all files 967kb " - - "L0.152[114330,135300] 10ns|-----------------------------------------L0.152-----------------------------------------|" + - "L0.152[114330,135298] 10ns|-----------------------------------------L0.152-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 967kb total:" - "L0 " - - "L0.?[114330,132579] 10ns 841kb|------------------------------------L0.?------------------------------------| " - - "L0.?[132580,135300] 10ns 125kb |--L0.?---| " + - "L0.?[114330,132577] 10ns 841kb|------------------------------------L0.?------------------------------------| " + - "L0.?[132578,135298] 10ns 125kb |--L0.?---| " - "Committing partition 1:" - " Soft Deleting 4 files: L0.150, L0.152, L0.361, L0.363" - " Creating 8 files" - - "**** Simulation run 172, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[167314, 194064]). 13 Input Files, 22mb total:" + - "**** Simulation run 172, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[167314, 194066]). 13 Input Files, 22mb total:" - "L0 " - - "L0.376[140564,142885] 8ns 107kb|L0.376| " - - "L0.159[142886,142886] 8ns 0b |L0.159| " - - "L0.311[142887,149665] 8ns 312kb |-L0.311-| " - - "L0.312[149666,156350] 8ns 308kb |-L0.312-| " - - "L0.266[156351,160867] 8ns 208kb |L0.266| " + - "L0.376[140562,142885] 8ns 107kb|L0.376| " + - "L0.159[142886,142886] 8ns 47b |L0.159| " + - "L0.311[142887,149666] 8ns 313kb |-L0.311-| " + - "L0.312[149667,156351] 8ns 308kb |-L0.312-| " + - "L0.266[156352,160867] 8ns 208kb |L0.266| " - "L0.387[160868,170977] 8ns 466kb |---L0.387----| " - "L0.388[170978,171442] 8ns 21kb |L0.388| " - - "L0.173[171443,171443] 8ns 0b |L0.173| " - - "L0.313[171444,185000] 8ns 625kb |------L0.313------| " - - "L0.314[185001,198370] 8ns 617kb |------L0.314------| " - - "L0.268[198371,200000] 8ns 75kb |L0.268|" + - "L0.173[171443,171443] 8ns 47b |L0.173| " + - "L0.313[171444,185002] 8ns 625kb |------L0.313------| " + - "L0.314[185003,198372] 8ns 616kb |------L0.314------| " + - "L0.268[198373,200000] 8ns 75kb |L0.268|" - "L1 " - - "L1.385[140564,170977] 7ns 10mb|-------------------L1.385-------------------| " + - "L1.385[140562,170977] 7ns 10mb|-------------------L1.385-------------------| " - "L1.386[170978,200000] 7ns 10mb |-----------------L1.386------------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 22mb total:" - "L1 " - - "L1.?[140564,167314] 8ns 10mb|-----------------L1.?-----------------| " - - "L1.?[167315,194064] 8ns 10mb |-----------------L1.?-----------------| " - - "L1.?[194065,200000] 8ns 2mb |-L1.?-| " + - "L1.?[140562,167314] 8ns 10mb|-----------------L1.?-----------------| " + - "L1.?[167315,194066] 8ns 10mb |-----------------L1.?-----------------| " + - "L1.?[194067,200000] 8ns 2mb |-L1.?-| " - "Committing partition 1:" - " Soft Deleting 13 files: L0.159, L0.173, L0.266, L0.268, L0.311, L0.312, L0.313, L0.314, L0.376, L1.385, L1.386, L0.387, L0.388" - " Creating 3 files" @@ -3831,13 +3829,13 @@ async fn all_overlapping_l0_max_input_bytes_per_partition_small_max_desired_file - "L0 " - "L0.?[160868,167314] 9ns 297kb|-------------------------L0.?--------------------------| " - "L0.?[167315,170977] 9ns 169kb |-------------L0.?-------------| " - - "**** Simulation run 174, type=split(ReduceOverlap)(split_times=[194064]). 1 Input Files, 617kb total:" - - "L0, all files 617kb " - - "L0.318[185001,198370] 9ns|-----------------------------------------L0.318-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 617kb total:" + - "**** Simulation run 174, type=split(ReduceOverlap)(split_times=[194066]). 1 Input Files, 616kb total:" + - "L0, all files 616kb " + - "L0.318[185003,198372] 9ns|-----------------------------------------L0.318-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 616kb total:" - "L0 " - - "L0.?[185001,194064] 9ns 418kb|---------------------------L0.?----------------------------| " - - "L0.?[194065,198370] 9ns 199kb |-----------L0.?-----------| " + - "L0.?[185003,194066] 9ns 418kb|---------------------------L0.?----------------------------| " + - "L0.?[194067,198372] 9ns 199kb |-----------L0.?-----------| " - "**** Simulation run 175, type=split(ReduceOverlap)(split_times=[167314]). 1 Input Files, 466kb total:" - "L0, all files 466kb " - "L0.391[160868,170977] 10ns|-----------------------------------------L0.391-----------------------------------------|" @@ -3845,345 +3843,345 @@ async fn all_overlapping_l0_max_input_bytes_per_partition_small_max_desired_file - "L0 " - "L0.?[160868,167314] 10ns 297kb|-------------------------L0.?--------------------------| " - "L0.?[167315,170977] 10ns 169kb |-------------L0.?-------------| " - - "**** Simulation run 176, type=split(ReduceOverlap)(split_times=[194064]). 1 Input Files, 617kb total:" - - "L0, all files 617kb " - - "L0.322[185001,198370] 10ns|-----------------------------------------L0.322-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 617kb total:" + - "**** Simulation run 176, type=split(ReduceOverlap)(split_times=[194066]). 1 Input Files, 616kb total:" + - "L0, all files 616kb " + - "L0.322[185003,198372] 10ns|-----------------------------------------L0.322-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 616kb total:" - "L0 " - - "L0.?[185001,194064] 10ns 418kb|---------------------------L0.?----------------------------| " - - "L0.?[194065,198370] 10ns 199kb |-----------L0.?-----------| " + - "L0.?[185003,194066] 10ns 418kb|---------------------------L0.?----------------------------| " + - "L0.?[194067,198372] 10ns 199kb |-----------L0.?-----------| " - "Committing partition 1:" - " Soft Deleting 4 files: L0.318, L0.322, L0.389, L0.391" - " Creating 8 files" - - "**** Simulation run 177, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[25731, 51362]). 17 Input Files, 30mb total:" - - "L0 " - - "L0.330[100,25694] 9ns 1mb|-----------L0.330-----------| " - - "L0.400[25695,28630] 9ns 135kb |L0.400| " - - "L0.401[28631,28657] 9ns 1kb |L0.401| " - - "L0.346[28658,32834] 9ns 193kb |L0.346| " - - "L0.347[32835,38176] 9ns 246kb |L0.347| " - - "L0.292[38177,38491] 9ns 14kb |L0.292| " - - "L0.293[38492,46513] 9ns 370kb |L0.293-| " - - "L0.338[46514,51288] 9ns 220kb |L0.338| " - - "L0.402[51289,57160] 9ns 271kb |L0.402| " - - "L0.403[57161,57214] 9ns 2kb |L0.403| " - - "L0.348[57215,65568] 9ns 385kb |L0.348-| " - - "L0.349[65569,67700] 9ns 98kb |L0.349| " - - "L1 " - - "L1.396[100,25694] 8ns 9mb|-----------L1.396-----------| " - - "L1.397[25695,28630] 8ns 1mb |L1.397| " - - "L1.398[28631,51288] 8ns 8mb |---------L1.398---------| " - - "L1.399[51289,57160] 8ns 2mb |L1.399| " - - "L1.395[57161,76882] 8ns 7mb |-------L1.395--------| " + - "**** Simulation run 177, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[25730, 51360]). 17 Input Files, 30mb total:" + - "L0 " + - "L0.330[100,25693] 9ns 1mb|----------L0.330-----------| " + - "L0.400[25694,28628] 9ns 135kb |L0.400| " + - "L0.401[28629,28657] 9ns 1kb |L0.401| " + - "L0.346[28658,32832] 9ns 192kb |L0.346| " + - "L0.347[32833,38176] 9ns 246kb |L0.347| " + - "L0.292[38177,38490] 9ns 14kb |L0.292| " + - "L0.293[38491,46512] 9ns 370kb |L0.293-| " + - "L0.338[46513,51286] 9ns 220kb |L0.338| " + - "L0.402[51287,57156] 9ns 271kb |L0.402| " + - "L0.403[57157,57214] 9ns 3kb |L0.403| " + - "L0.348[57215,65564] 9ns 385kb |L0.348-| " + - "L0.349[65565,67699] 9ns 98kb |L0.349| " + - "L1 " + - "L1.396[100,25693] 8ns 9mb|----------L1.396-----------| " + - "L1.397[25694,28628] 8ns 1mb |L1.397| " + - "L1.398[28629,51286] 8ns 8mb |---------L1.398---------| " + - "L1.399[51287,57156] 8ns 2mb |L1.399| " + - "L1.395[57157,76880] 8ns 7mb |-------L1.395--------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 30mb total:" - "L1 " - - "L1.?[100,25731] 9ns 10mb |------------L1.?------------| " - - "L1.?[25732,51362] 9ns 10mb |------------L1.?------------| " - - "L1.?[51363,76882] 9ns 10mb |-----------L1.?------------| " + - "L1.?[100,25730] 9ns 10mb |------------L1.?------------| " + - "L1.?[25731,51360] 9ns 10mb |------------L1.?------------| " + - "L1.?[51361,76880] 9ns 10mb |-----------L1.?------------| " - "Committing partition 1:" - " Soft Deleting 17 files: L0.292, L0.293, L0.330, L0.338, L0.346, L0.347, L0.348, L0.349, L1.395, L1.396, L1.397, L1.398, L1.399, L0.400, L0.401, L0.402, L0.403" - " Creating 3 files" - - "**** Simulation run 178, type=split(HighL0OverlapTotalBacklog)(split_times=[25694]). 1 Input Files, 10mb total:" + - "**** Simulation run 178, type=split(HighL0OverlapTotalBacklog)(split_times=[25693]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.430[100,25731] 9ns |-----------------------------------------L1.430-----------------------------------------|" + - "L1.430[100,25730] 9ns |-----------------------------------------L1.430-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[100,25694] 9ns 10mb |-----------------------------------------L1.?------------------------------------------| " - - "L1.?[25695,25731] 9ns 15kb |L1.?|" - - "**** Simulation run 179, type=split(HighL0OverlapTotalBacklog)(split_times=[51288]). 1 Input Files, 10mb total:" + - "L1.?[100,25693] 9ns 10mb |-----------------------------------------L1.?------------------------------------------| " + - "L1.?[25694,25730] 9ns 15kb |L1.?|" + - "**** Simulation run 179, type=split(HighL0OverlapTotalBacklog)(split_times=[51286]). 1 Input Files, 10mb total:" - "L1, all files 10mb " - - "L1.431[25732,51362] 9ns |-----------------------------------------L1.431-----------------------------------------|" + - "L1.431[25731,51360] 9ns |-----------------------------------------L1.431-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 10mb total:" - "L1 " - - "L1.?[25732,51288] 9ns 10mb|-----------------------------------------L1.?------------------------------------------| " - - "L1.?[51289,51362] 9ns 30kb |L1.?|" + - "L1.?[25731,51286] 9ns 10mb|-----------------------------------------L1.?------------------------------------------| " + - "L1.?[51287,51360] 9ns 30kb |L1.?|" - "Committing partition 1:" - " Soft Deleting 2 files: L1.430, L1.431" - " Creating 4 files" - - "**** Simulation run 180, type=split(ReduceOverlap)(split_times=[25731]). 1 Input Files, 135kb total:" + - "**** Simulation run 180, type=split(ReduceOverlap)(split_times=[25730]). 1 Input Files, 135kb total:" - "L0, all files 135kb " - - "L0.404[25695,28630] 10ns |----------------------------------------L0.404-----------------------------------------| " + - "L0.404[25694,28628] 10ns |-----------------------------------------L0.404-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 135kb total:" - "L0 " - - "L0.?[25695,25731] 10ns 2kb|L0.?| " - - "L0.?[25732,28630] 10ns 134kb |-----------------------------------------L0.?-----------------------------------------| " - - "**** Simulation run 181, type=split(ReduceOverlap)(split_times=[51362]). 1 Input Files, 271kb total:" + - "L0.?[25694,25730] 10ns 2kb|L0.?| " + - "L0.?[25731,28628] 10ns 134kb |-----------------------------------------L0.?-----------------------------------------| " + - "**** Simulation run 181, type=split(ReduceOverlap)(split_times=[51360]). 1 Input Files, 271kb total:" - "L0, all files 271kb " - - "L0.406[51289,57160] 10ns |-----------------------------------------L0.406-----------------------------------------|" + - "L0.406[51287,57156] 10ns |-----------------------------------------L0.406-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 271kb total:" - "L0 " - - "L0.?[51289,51362] 10ns 3kb|L0.?| " - - "L0.?[51363,57160] 10ns 267kb |-----------------------------------------L0.?-----------------------------------------| " + - "L0.?[51287,51360] 10ns 3kb|L0.?| " + - "L0.?[51361,57156] 10ns 267kb |-----------------------------------------L0.?-----------------------------------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L0.404, L0.406" - " Creating 4 files" - - "**** Simulation run 182, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[76043, 100723]). 8 Input Files, 22mb total:" + - "**** Simulation run 182, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[76041, 100721]). 8 Input Files, 22mb total:" - "L0 " - - "L0.226[67701,76252] 9ns 394kb |---L0.226---| " - - "L0.294[76253,76882] 9ns 29kb |L0.294| " - - "L0.295[76883,85771] 9ns 410kb |---L0.295---| " - - "L0.134[85772,85772] 9ns 0b |L0.134| " - - "L0.244[85773,92926] 9ns 330kb |--L0.244--| " - - "L0.411[92927,104731] 9ns 544kb |-----L0.411------| " + - "L0.226[67700,76252] 9ns 394kb |---L0.226---| " + - "L0.294[76253,76880] 9ns 29kb |L0.294| " + - "L0.295[76881,85771] 9ns 410kb |---L0.295---| " + - "L0.134[85772,85772] 9ns 47b |L0.134| " + - "L0.244[85773,92924] 9ns 330kb |--L0.244--| " + - "L0.411[92925,104729] 9ns 544kb |-----L0.411------| " - "L1 " - - "L1.432[51363,76882] 9ns 10mb|-----------------L1.432------------------| " - - "L1.408[76883,104731] 8ns 10mb |-------------------L1.408-------------------| " + - "L1.432[51361,76880] 9ns 10mb|-----------------L1.432------------------| " + - "L1.408[76881,104729] 8ns 10mb |-------------------L1.408-------------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 22mb total:" - "L1 " - - "L1.?[51363,76043] 9ns 10mb|-----------------L1.?------------------| " - - "L1.?[76044,100723] 9ns 10mb |-----------------L1.?------------------| " - - "L1.?[100724,104731] 9ns 2mb |L1.?| " + - "L1.?[51361,76041] 9ns 10mb|-----------------L1.?------------------| " + - "L1.?[76042,100721] 9ns 10mb |-----------------L1.?------------------| " + - "L1.?[100722,104729] 9ns 2mb |L1.?| " - "Committing partition 1:" - " Soft Deleting 8 files: L0.134, L0.226, L0.244, L0.294, L0.295, L1.408, L0.411, L1.432" - " Creating 3 files" - - "**** Simulation run 183, type=split(ReduceOverlap)(split_times=[76043]). 1 Input Files, 394kb total:" + - "**** Simulation run 183, type=split(ReduceOverlap)(split_times=[76041]). 1 Input Files, 394kb total:" - "L0, all files 394kb " - - "L0.228[67701,76252] 10ns |-----------------------------------------L0.228-----------------------------------------|" + - "L0.228[67700,76252] 10ns |-----------------------------------------L0.228-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 394kb total:" - "L0 " - - "L0.?[67701,76043] 10ns 385kb|----------------------------------------L0.?-----------------------------------------| " - - "L0.?[76044,76252] 10ns 10kb |L0.?|" - - "**** Simulation run 184, type=split(ReduceOverlap)(split_times=[100723]). 1 Input Files, 544kb total:" + - "L0.?[67700,76041] 10ns 385kb|----------------------------------------L0.?-----------------------------------------| " + - "L0.?[76042,76252] 10ns 10kb |L0.?|" + - "**** Simulation run 184, type=split(ReduceOverlap)(split_times=[100721]). 1 Input Files, 544kb total:" - "L0, all files 544kb " - - "L0.415[92927,104731] 10ns|-----------------------------------------L0.415-----------------------------------------|" + - "L0.415[92925,104729] 10ns|-----------------------------------------L0.415-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 544kb total:" - "L0 " - - "L0.?[92927,100723] 10ns 359kb|--------------------------L0.?---------------------------| " - - "L0.?[100724,104731] 10ns 185kb |------------L0.?------------| " + - "L0.?[92925,100721] 10ns 359kb|--------------------------L0.?---------------------------| " + - "L0.?[100722,104729] 10ns 185kb |------------L0.?------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L0.228, L0.415" - " Creating 4 files" - - "**** Simulation run 185, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[129098, 153464]). 18 Input Files, 26mb total:" + - "**** Simulation run 185, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[129096, 153462]). 18 Input Files, 26mb total:" - "L0 " - - "L0.412[104732,108402] 9ns 169kb|L0.412| " - - "L0.377[108403,108723] 9ns 15kb |L0.377| " - - "L0.378[108724,109041] 9ns 15kb |L0.378| " + - "L0.412[104730,108401] 9ns 169kb|L0.412| " + - "L0.377[108402,108721] 9ns 15kb |L0.377| " + - "L0.378[108722,109041] 9ns 15kb |L0.378| " - "L0.196[109042,114328] 9ns 244kb |L0.196| " - - "L0.149[114329,114329] 9ns 0b |L0.149| " - - "L0.413[114330,132579] 9ns 841kb |---------L0.413---------| " - - "L0.414[132580,135300] 9ns 125kb |L0.414| " - - "L0.369[135301,139921] 9ns 213kb |L0.369| " - - "L0.379[139922,140563] 9ns 30kb |L0.379| " - - "L0.380[140564,142885] 9ns 107kb |L0.380| " - - "L0.161[142886,142886] 9ns 0b |L0.161| " - - "L0.315[142887,149665] 9ns 312kb |L0.315-| " - - "L0.316[149666,156350] 9ns 308kb |L0.316-| " - - "L0.270[156351,160867] 9ns 208kb |L0.270| " + - "L0.149[114329,114329] 9ns 47b |L0.149| " + - "L0.413[114330,132577] 9ns 841kb |---------L0.413---------| " + - "L0.414[132578,135298] 9ns 125kb |L0.414| " + - "L0.369[135299,139921] 9ns 213kb |L0.369| " + - "L0.379[139922,140561] 9ns 30kb |L0.379| " + - "L0.380[140562,142885] 9ns 107kb |L0.380| " + - "L0.161[142886,142886] 9ns 47b |L0.161| " + - "L0.315[142887,149666] 9ns 313kb |L0.315-| " + - "L0.316[149667,156351] 9ns 308kb |L0.316-| " + - "L0.270[156352,160867] 9ns 208kb |L0.270| " - "L0.422[160868,167314] 9ns 297kb |L0.422-| " - "L1 " - - "L1.409[104732,132579] 8ns 10mb|----------------L1.409----------------| " - - "L1.410[132580,140563] 8ns 3mb |-L1.410--| " - - "L1.419[140564,167314] 8ns 10mb |---------------L1.419---------------| " + - "L1.409[104730,132577] 8ns 10mb|----------------L1.409----------------| " + - "L1.410[132578,140561] 8ns 3mb |-L1.410--| " + - "L1.419[140562,167314] 8ns 10mb |---------------L1.419---------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 26mb total:" - "L1 " - - "L1.?[104732,129098] 9ns 10mb|--------------L1.?---------------| " - - "L1.?[129099,153464] 9ns 10mb |--------------L1.?---------------| " - - "L1.?[153465,167314] 9ns 6mb |------L1.?-------| " + - "L1.?[104730,129096] 9ns 10mb|--------------L1.?---------------| " + - "L1.?[129097,153462] 9ns 10mb |--------------L1.?---------------| " + - "L1.?[153463,167314] 9ns 6mb |------L1.?-------| " - "Committing partition 1:" - " Soft Deleting 18 files: L0.149, L0.161, L0.196, L0.270, L0.315, L0.316, L0.369, L0.377, L0.378, L0.379, L0.380, L1.409, L1.410, L0.412, L0.413, L0.414, L1.419, L0.422" - " Creating 3 files" - - "**** Simulation run 186, type=split(ReduceOverlap)(split_times=[129098]). 1 Input Files, 841kb total:" + - "**** Simulation run 186, type=split(ReduceOverlap)(split_times=[129096]). 1 Input Files, 841kb total:" - "L0, all files 841kb " - - "L0.417[114330,132579] 10ns|-----------------------------------------L0.417-----------------------------------------|" + - "L0.417[114330,132577] 10ns|-----------------------------------------L0.417-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 841kb total:" - "L0 " - - "L0.?[114330,129098] 10ns 681kb|---------------------------------L0.?---------------------------------| " - - "L0.?[129099,132579] 10ns 160kb |-----L0.?------| " - - "**** Simulation run 187, type=split(ReduceOverlap)(split_times=[153464]). 1 Input Files, 308kb total:" + - "L0.?[114330,129096] 10ns 681kb|---------------------------------L0.?---------------------------------| " + - "L0.?[129097,132577] 10ns 160kb |-----L0.?------| " + - "**** Simulation run 187, type=split(ReduceOverlap)(split_times=[153462]). 1 Input Files, 308kb total:" - "L0, all files 308kb " - - "L0.320[149666,156350] 10ns|-----------------------------------------L0.320-----------------------------------------|" + - "L0.320[149667,156351] 10ns|-----------------------------------------L0.320-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 308kb total:" - "L0 " - - "L0.?[149666,153464] 10ns 175kb|----------------------L0.?-----------------------| " - - "L0.?[153465,156350] 10ns 133kb |----------------L0.?----------------| " + - "L0.?[149667,153462] 10ns 175kb|----------------------L0.?-----------------------| " + - "L0.?[153463,156351] 10ns 133kb |----------------L0.?----------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L0.320, L0.417" - " Creating 4 files" - - "**** Simulation run 188, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[191189]). 9 Input Files, 14mb total:" + - "**** Simulation run 188, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[191191]). 9 Input Files, 14mb total:" - "L0 " - "L0.423[167315,170977] 9ns 169kb|-L0.423-| " - "L0.390[170978,171442] 9ns 21kb |L0.390| " - - "L0.175[171443,171443] 9ns 0b |L0.175| " - - "L0.317[171444,185000] 9ns 625kb |--------------L0.317---------------| " - - "L0.424[185001,194064] 9ns 418kb |--------L0.424--------| " - - "L0.425[194065,198370] 9ns 199kb |-L0.425--| " - - "L0.272[198371,200000] 9ns 75kb |L0.272|" - - "L1 " - - "L1.420[167315,194064] 8ns 10mb|--------------------------------L1.420---------------------------------| " - - "L1.421[194065,200000] 8ns 2mb |----L1.421----| " + - "L0.175[171443,171443] 9ns 47b |L0.175| " + - "L0.317[171444,185002] 9ns 625kb |--------------L0.317---------------| " + - "L0.424[185003,194066] 9ns 418kb |--------L0.424--------| " + - "L0.425[194067,198372] 9ns 199kb |-L0.425--| " + - "L0.272[198373,200000] 9ns 75kb |L0.272|" + - "L1 " + - "L1.420[167315,194066] 8ns 10mb|--------------------------------L1.420---------------------------------| " + - "L1.421[194067,200000] 8ns 2mb |----L1.421----| " - "**** 2 Output Files (parquet_file_id not yet assigned), 14mb total:" - "L1 " - - "L1.?[167315,191189] 9ns 10mb|-----------------------------L1.?------------------------------| " - - "L1.?[191190,200000] 9ns 4mb |---------L1.?---------| " + - "L1.?[167315,191191] 9ns 10mb|-----------------------------L1.?------------------------------| " + - "L1.?[191192,200000] 9ns 4mb |---------L1.?---------| " - "Committing partition 1:" - " Soft Deleting 9 files: L0.175, L0.272, L0.317, L0.390, L1.420, L1.421, L0.423, L0.424, L0.425" - " Creating 2 files" - - "**** Simulation run 189, type=split(ReduceOverlap)(split_times=[191189]). 1 Input Files, 418kb total:" + - "**** Simulation run 189, type=split(ReduceOverlap)(split_times=[191191]). 1 Input Files, 418kb total:" - "L0, all files 418kb " - - "L0.428[185001,194064] 10ns|-----------------------------------------L0.428-----------------------------------------|" + - "L0.428[185003,194066] 10ns|-----------------------------------------L0.428-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 418kb total:" - "L0 " - - "L0.?[185001,191189] 10ns 285kb|---------------------------L0.?----------------------------| " - - "L0.?[191190,194064] 10ns 133kb |-----------L0.?-----------| " + - "L0.?[185003,191191] 10ns 285kb|---------------------------L0.?----------------------------| " + - "L0.?[191192,194066] 10ns 133kb |-----------L0.?-----------| " - "Committing partition 1:" - " Soft Deleting 1 files: L0.428" - " Creating 2 files" - - "**** Simulation run 190, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[23080, 46060]). 14 Input Files, 22mb total:" - - "L0 " - - "L0.332[100,25694] 10ns 1mb|------------------L0.332------------------| " - - "L0.437[25695,25731] 10ns 2kb |L0.437| " - - "L0.438[25732,28630] 10ns 134kb |L0.438| " - - "L0.405[28631,28657] 10ns 1kb |L0.405| " - - "L0.350[28658,32834] 10ns 193kb |L0.350| " - - "L0.351[32835,38176] 10ns 246kb |L0.351-| " - - "L0.296[38177,38491] 10ns 14kb |L0.296| " - - "L0.297[38492,46513] 10ns 370kb |---L0.297---| " - - "L0.340[46514,51288] 10ns 220kb |L0.340| " - - "L0.439[51289,51362] 10ns 3kb |L0.439|" - - "L1 " - - "L1.433[100,25694] 9ns 10mb|------------------L1.433------------------| " - - "L1.434[25695,25731] 9ns 15kb |L1.434| " - - "L1.435[25732,51288] 9ns 10mb |------------------L1.435------------------| " - - "L1.436[51289,51362] 9ns 30kb |L1.436|" + - "**** Simulation run 190, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[23078, 46056]). 14 Input Files, 22mb total:" + - "L0 " + - "L0.332[100,25693] 10ns 1mb|------------------L0.332------------------| " + - "L0.437[25694,25730] 10ns 2kb |L0.437| " + - "L0.438[25731,28628] 10ns 134kb |L0.438| " + - "L0.405[28629,28657] 10ns 1kb |L0.405| " + - "L0.350[28658,32832] 10ns 192kb |L0.350| " + - "L0.351[32833,38176] 10ns 246kb |L0.351-| " + - "L0.296[38177,38490] 10ns 14kb |L0.296| " + - "L0.297[38491,46512] 10ns 370kb |---L0.297---| " + - "L0.340[46513,51286] 10ns 220kb |L0.340| " + - "L0.439[51287,51360] 10ns 3kb |L0.439|" + - "L1 " + - "L1.433[100,25693] 9ns 10mb|------------------L1.433------------------| " + - "L1.434[25694,25730] 9ns 15kb |L1.434| " + - "L1.435[25731,51286] 9ns 10mb |------------------L1.435------------------| " + - "L1.436[51287,51360] 9ns 30kb |L1.436|" - "**** 3 Output Files (parquet_file_id not yet assigned), 22mb total:" - "L1 " - - "L1.?[100,23080] 10ns 10mb|-----------------L1.?-----------------| " - - "L1.?[23081,46060] 10ns 10mb |-----------------L1.?-----------------| " - - "L1.?[46061,51362] 10ns 2mb |-L1.?--| " + - "L1.?[100,23078] 10ns 10mb|-----------------L1.?-----------------| " + - "L1.?[23079,46056] 10ns 10mb |-----------------L1.?-----------------| " + - "L1.?[46057,51360] 10ns 2mb |-L1.?--| " - "Committing partition 1:" - " Soft Deleting 14 files: L0.296, L0.297, L0.332, L0.340, L0.350, L0.351, L0.405, L1.433, L1.434, L1.435, L1.436, L0.437, L0.438, L0.439" - " Creating 3 files" - - "**** Simulation run 191, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[73575, 95787]). 15 Input Files, 24mb total:" - - "L0 " - - "L0.440[51363,57160] 10ns 267kb|L0.440-| " - - "L0.407[57161,57214] 10ns 2kb |L0.407| " - - "L0.352[57215,65568] 10ns 385kb |---L0.352---| " - - "L0.353[65569,67700] 10ns 98kb |L0.353| " - - "L0.444[67701,76043] 10ns 385kb |---L0.444---| " - - "L0.445[76044,76252] 10ns 10kb |L0.445| " - - "L0.298[76253,76882] 10ns 29kb |L0.298| " - - "L0.299[76883,85771] 10ns 410kb |---L0.299---| " - - "L0.136[85772,85772] 10ns 0b |L0.136| " - - "L0.248[85773,92926] 10ns 330kb |--L0.248--| " - - "L0.446[92927,100723] 10ns 359kb |--L0.446---| " - - "L0.447[100724,104731] 10ns 185kb |L0.447|" - - "L1 " - - "L1.441[51363,76043] 9ns 10mb|----------------L1.441-----------------| " - - "L1.442[76044,100723] 9ns 10mb |----------------L1.442-----------------| " - - "L1.443[100724,104731] 9ns 2mb |L1.443|" + - "**** Simulation run 191, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[73573, 95785]). 15 Input Files, 24mb total:" + - "L0 " + - "L0.440[51361,57156] 10ns 267kb|L0.440-| " + - "L0.407[57157,57214] 10ns 3kb |L0.407| " + - "L0.352[57215,65564] 10ns 385kb |---L0.352---| " + - "L0.353[65565,67699] 10ns 98kb |L0.353| " + - "L0.444[67700,76041] 10ns 385kb |---L0.444---| " + - "L0.445[76042,76252] 10ns 10kb |L0.445| " + - "L0.298[76253,76880] 10ns 29kb |L0.298| " + - "L0.299[76881,85771] 10ns 410kb |---L0.299---| " + - "L0.136[85772,85772] 10ns 47b |L0.136| " + - "L0.248[85773,92924] 10ns 330kb |--L0.248--| " + - "L0.446[92925,100721] 10ns 359kb |--L0.446---| " + - "L0.447[100722,104729] 10ns 185kb |L0.447|" + - "L1 " + - "L1.441[51361,76041] 9ns 10mb|----------------L1.441-----------------| " + - "L1.442[76042,100721] 9ns 10mb |----------------L1.442-----------------| " + - "L1.443[100722,104729] 9ns 2mb |L1.443|" - "**** 3 Output Files (parquet_file_id not yet assigned), 24mb total:" - "L1 " - - "L1.?[51363,73575] 10ns 10mb|---------------L1.?----------------| " - - "L1.?[73576,95787] 10ns 10mb |---------------L1.?----------------| " - - "L1.?[95788,104731] 10ns 4mb |----L1.?-----| " + - "L1.?[51361,73573] 10ns 10mb|---------------L1.?----------------| " + - "L1.?[73574,95785] 10ns 10mb |---------------L1.?----------------| " + - "L1.?[95786,104729] 10ns 4mb |----L1.?-----| " - "Committing partition 1:" - " Soft Deleting 15 files: L0.136, L0.248, L0.298, L0.299, L0.352, L0.353, L0.407, L0.440, L1.441, L1.442, L1.443, L0.444, L0.445, L0.446, L0.447" - " Creating 3 files" - - "**** Simulation run 192, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[126689, 148646]). 20 Input Files, 29mb total:" + - "**** Simulation run 192, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[126688, 148646]). 20 Input Files, 29mb total:" - "L0 " - - "L0.416[104732,108402] 10ns 169kb|L0.416| " - - "L0.381[108403,108723] 10ns 15kb |L0.381| " - - "L0.382[108724,109041] 10ns 15kb |L0.382| " + - "L0.416[104730,108401] 10ns 169kb|L0.416| " + - "L0.381[108402,108721] 10ns 15kb |L0.381| " + - "L0.382[108722,109041] 10ns 15kb |L0.382| " - "L0.200[109042,114328] 10ns 244kb |L0.200| " - - "L0.151[114329,114329] 10ns 0b |L0.151| " - - "L0.451[114330,129098] 10ns 681kb |------L0.451-------| " - - "L0.452[129099,132579] 10ns 160kb |L0.452| " - - "L0.418[132580,135300] 10ns 125kb |L0.418| " - - "L0.371[135301,139921] 10ns 213kb |L0.371| " - - "L0.383[139922,140563] 10ns 30kb |L0.383| " - - "L0.384[140564,142885] 10ns 107kb |L0.384| " - - "L0.163[142886,142886] 10ns 0b |L0.163| " - - "L0.319[142887,149665] 10ns 312kb |L0.319-| " - - "L0.453[149666,153464] 10ns 175kb |L0.453| " - - "L0.454[153465,156350] 10ns 133kb |L0.454| " - - "L0.274[156351,160867] 10ns 208kb |L0.274| " + - "L0.151[114329,114329] 10ns 47b |L0.151| " + - "L0.451[114330,129096] 10ns 681kb |------L0.451-------| " + - "L0.452[129097,132577] 10ns 160kb |L0.452| " + - "L0.418[132578,135298] 10ns 125kb |L0.418| " + - "L0.371[135299,139921] 10ns 213kb |L0.371| " + - "L0.383[139922,140561] 10ns 30kb |L0.383| " + - "L0.384[140562,142885] 10ns 107kb |L0.384| " + - "L0.163[142886,142886] 10ns 47b |L0.163| " + - "L0.319[142887,149666] 10ns 313kb |L0.319-| " + - "L0.453[149667,153462] 10ns 175kb |L0.453| " + - "L0.454[153463,156351] 10ns 133kb |L0.454| " + - "L0.274[156352,160867] 10ns 208kb |L0.274| " - "L0.426[160868,167314] 10ns 297kb |L0.426-| " - "L1 " - - "L1.448[104732,129098] 9ns 10mb|-------------L1.448--------------| " - - "L1.449[129099,153464] 9ns 10mb |-------------L1.449--------------| " - - "L1.450[153465,167314] 9ns 6mb |-----L1.450------| " + - "L1.448[104730,129096] 9ns 10mb|-------------L1.448--------------| " + - "L1.449[129097,153462] 9ns 10mb |-------------L1.449--------------| " + - "L1.450[153463,167314] 9ns 6mb |-----L1.450------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 29mb total:" - "L1 " - - "L1.?[104732,126689] 10ns 10mb|------------L1.?-------------| " - - "L1.?[126690,148646] 10ns 10mb |------------L1.?-------------| " + - "L1.?[104730,126688] 10ns 10mb|------------L1.?-------------| " + - "L1.?[126689,148646] 10ns 10mb |------------L1.?-------------| " - "L1.?[148647,167314] 10ns 9mb |----------L1.?----------| " - "Committing partition 1:" - " Soft Deleting 20 files: L0.151, L0.163, L0.200, L0.274, L0.319, L0.371, L0.381, L0.382, L0.383, L0.384, L0.416, L0.418, L0.426, L1.448, L1.449, L1.450, L0.451, L0.452, L0.453, L0.454" - " Creating 3 files" - - "**** Simulation run 193, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[188871]). 10 Input Files, 15mb total:" - - "L0 " - - "L0.276[198371,200000] 10ns 75kb |L0.276|" - - "L0.429[194065,198370] 10ns 199kb |-L0.429--| " - - "L0.458[191190,194064] 10ns 133kb |L0.458| " - - "L0.457[185001,191189] 10ns 285kb |----L0.457-----| " - - "L0.321[171444,185000] 10ns 625kb |--------------L0.321---------------| " - - "L0.177[171443,171443] 10ns 0b |L0.177| " + - "**** Simulation run 193, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[188874]). 10 Input Files, 15mb total:" + - "L0 " + - "L0.276[198373,200000] 10ns 75kb |L0.276|" + - "L0.429[194067,198372] 10ns 199kb |-L0.429--| " + - "L0.458[191192,194066] 10ns 133kb |L0.458| " + - "L0.457[185003,191191] 10ns 285kb |----L0.457-----| " + - "L0.321[171444,185002] 10ns 625kb |--------------L0.321---------------| " + - "L0.177[171443,171443] 10ns 47b |L0.177| " - "L0.392[170978,171442] 10ns 21kb |L0.392| " - "L0.427[167315,170977] 10ns 169kb|-L0.427-| " - "L1 " - - "L1.456[191190,200000] 9ns 4mb |--------L1.456--------| " - - "L1.455[167315,191189] 9ns 10mb|----------------------------L1.455-----------------------------| " + - "L1.456[191192,200000] 9ns 4mb |--------L1.456--------| " + - "L1.455[167315,191191] 9ns 10mb|----------------------------L1.455-----------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 15mb total:" - "L1 " - - "L1.?[167315,188871] 10ns 10mb|--------------------------L1.?---------------------------| " - - "L1.?[188872,200000] 10ns 5mb |------------L1.?------------| " + - "L1.?[167315,188874] 10ns 10mb|--------------------------L1.?---------------------------| " + - "L1.?[188875,200000] 10ns 5mb |------------L1.?------------| " - "Committing partition 1:" - " Soft Deleting 10 files: L0.177, L0.276, L0.321, L0.392, L0.427, L0.429, L1.455, L1.456, L0.457, L0.458" - " Creating 2 files" - - "**** Simulation run 194, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[45717, 68353]). 3 Input Files, 22mb total:" + - "**** Simulation run 194, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[45714, 68349]). 3 Input Files, 22mb total:" - "L1 " - - "L1.460[23081,46060] 10ns 10mb|----------------L1.460----------------| " - - "L1.461[46061,51362] 10ns 2mb |L1.461-| " - - "L1.462[51363,73575] 10ns 10mb |---------------L1.462----------------| " + - "L1.460[23079,46056] 10ns 10mb|----------------L1.460----------------| " + - "L1.461[46057,51360] 10ns 2mb |L1.461-| " + - "L1.462[51361,73573] 10ns 10mb |---------------L1.462----------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 22mb total:" - "L2 " - - "L2.?[23081,45717] 10ns 10mb|-----------------L2.?-----------------| " - - "L2.?[45718,68353] 10ns 10mb |-----------------L2.?-----------------| " - - "L2.?[68354,73575] 10ns 2mb |-L2.?--| " + - "L2.?[23079,45714] 10ns 10mb|-----------------L2.?-----------------| " + - "L2.?[45715,68349] 10ns 10mb |-----------------L2.?-----------------| " + - "L2.?[68350,73573] 10ns 2mb |-L2.?--| " - "Committing partition 1:" - " Soft Deleting 3 files: L1.460, L1.461, L1.462" - " Upgrading 1 files level to CompactionLevel::L2: L1.459" - " Creating 3 files" - - "**** Simulation run 195, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[95682, 117788]). 3 Input Files, 24mb total:" + - "**** Simulation run 195, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[117786, 139786]). 3 Input Files, 24mb total:" - "L1 " - - "L1.463[73576,95787] 10ns 10mb|--------------L1.463---------------| " - - "L1.464[95788,104731] 10ns 4mb |---L1.464----| " - - "L1.465[104732,126689] 10ns 10mb |--------------L1.465---------------| " + - "L1.464[95786,104729] 10ns 4mb|---L1.464----| " + - "L1.465[104730,126688] 10ns 10mb |--------------L1.465---------------| " + - "L1.466[126689,148646] 10ns 10mb |--------------L1.466---------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 24mb total:" - "L2 " - - "L2.?[73576,95682] 10ns 10mb|---------------L2.?----------------| " - - "L2.?[95683,117788] 10ns 10mb |---------------L2.?----------------| " - - "L2.?[117789,126689] 10ns 4mb |----L2.?-----| " + - "L2.?[95786,117786] 10ns 10mb|---------------L2.?----------------| " + - "L2.?[117787,139786] 10ns 10mb |---------------L2.?----------------| " + - "L2.?[139787,148646] 10ns 4mb |----L2.?-----| " - "Committing partition 1:" - - " Soft Deleting 3 files: L1.463, L1.464, L1.465" + - " Soft Deleting 3 files: L1.464, L1.465, L1.466" + - " Upgrading 1 files level to CompactionLevel::L2: L1.463" - " Creating 3 files" - - "**** Simulation run 196, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[148507, 170324]). 3 Input Files, 29mb total:" + - "**** Simulation run 196, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[170349, 192051]). 3 Input Files, 24mb total:" - "L1 " - - "L1.466[126690,148646] 10ns 10mb|-----------L1.466------------| " - - "L1.467[148647,167314] 10ns 9mb |---------L1.467----------| " - - "L1.468[167315,188871] 10ns 10mb |-----------L1.468------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 29mb total:" + - "L1.469[188875,200000] 10ns 5mb |-----L1.469------| " + - "L1.467[148647,167314] 10ns 9mb|------------L1.467------------| " + - "L1.468[167315,188874] 10ns 10mb |--------------L1.468---------------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 24mb total:" - "L2 " - - "L2.?[126690,148507] 10ns 10mb|------------L2.?-------------| " - - "L2.?[148508,170324] 10ns 10mb |------------L2.?-------------| " - - "L2.?[170325,188871] 10ns 9mb |----------L2.?----------| " + - "L2.?[148647,170349] 10ns 10mb|----------------L2.?----------------| " + - "L2.?[170350,192051] 10ns 10mb |----------------L2.?----------------| " + - "L2.?[192052,200000] 10ns 4mb |---L2.?----| " - "Committing partition 1:" - - " Soft Deleting 3 files: L1.466, L1.467, L1.468" + - " Soft Deleting 3 files: L1.467, L1.468, L1.469" - " Creating 3 files" - - "**** Final Output Files (989mb written)" - - "L1 " - - "L1.469[188872,200000] 10ns 5mb |L1.469|" + - "**** Final Output Files (984mb written)" - "L2 " - - "L2.459[100,23080] 10ns 10mb|-L2.459-| " - - "L2.470[23081,45717] 10ns 10mb |-L2.470-| " - - "L2.471[45718,68353] 10ns 10mb |-L2.471-| " - - "L2.472[68354,73575] 10ns 2mb |L2.472| " - - "L2.473[73576,95682] 10ns 10mb |L2.473-| " - - "L2.474[95683,117788] 10ns 10mb |L2.474-| " - - "L2.475[117789,126689] 10ns 4mb |L2.475| " - - "L2.476[126690,148507] 10ns 10mb |L2.476-| " - - "L2.477[148508,170324] 10ns 10mb |L2.477-| " - - "L2.478[170325,188871] 10ns 9mb |L2.478| " + - "L2.459[100,23078] 10ns 10mb|-L2.459-| " + - "L2.463[73574,95785] 10ns 10mb |L2.463-| " + - "L2.470[23079,45714] 10ns 10mb |-L2.470-| " + - "L2.471[45715,68349] 10ns 10mb |-L2.471-| " + - "L2.472[68350,73573] 10ns 2mb |L2.472| " + - "L2.473[95786,117786] 10ns 10mb |L2.473-| " + - "L2.474[117787,139786] 10ns 10mb |L2.474-| " + - "L2.475[139787,148646] 10ns 4mb |L2.475| " + - "L2.476[148647,170349] 10ns 10mb |L2.476-| " + - "L2.477[170350,192051] 10ns 10mb |L2.477-| " + - "L2.478[192052,200000] 10ns 4mb |L2.478|" "### ); } diff --git a/compactor/tests/layouts/large_files.rs b/compactor/tests/layouts/large_files.rs index 92dfbe3a69..9e7bae9fcd 100644 --- a/compactor/tests/layouts/large_files.rs +++ b/compactor/tests/layouts/large_files.rs @@ -305,31 +305,33 @@ async fn two_large_files_total_over_max_compact_size() { - "L1.4[668,1000] 9ns |------------------------------------------L1.4------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 50mb total:" - "L1 " - - "L1.?[668,668] 9ns 0b |L1.?| " + - "L1.?[668,668] 9ns 154kb |L1.?| " - "L1.?[669,1000] 9ns 50mb |-----------------------------------------L1.?------------------------------------------| " - "Committing partition 1:" - " Soft Deleting 1 files: L1.4" - " Creating 2 files" - - "**** Simulation run 3, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[335]). 3 Input Files, 200mb total:" + - "**** Simulation run 3, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[334, 667]). 3 Input Files, 200mb total:" - "L1 " - "L1.3[1,667] 9ns 100mb |-----------------------------------------L1.3------------------------------------------| " - - "L1.7[668,668] 9ns 0b |L1.7|" + - "L1.7[668,668] 9ns 154kb |L1.7|" - "L2 " - "L2.5[2,668] 8ns 100mb |-----------------------------------------L2.5------------------------------------------| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 200mb total:" + - "**** 3 Output Files (parquet_file_id not yet assigned), 200mb total:" - "L2 " - - "L2.?[1,335] 9ns 100mb |-------------------L2.?--------------------| " - - "L2.?[336,668] 9ns 100mb |-------------------L2.?-------------------| " + - "L2.?[1,334] 9ns 100mb |-------------------L2.?-------------------| " + - "L2.?[335,667] 9ns 100mb |-------------------L2.?-------------------| " + - "L2.?[668,668] 9ns 307kb |L2.?|" - "Committing partition 1:" - " Soft Deleting 3 files: L1.3, L2.5, L1.7" - - " Creating 2 files" + - " Creating 3 files" - "**** Final Output Files (550mb written)" - "L1 " - "L1.8[669,1000] 9ns 50mb |-----------L1.8------------| " - "L2 " - "L2.6[669,1000] 8ns 50mb |-----------L2.6------------| " - - "L2.9[1,335] 9ns 100mb |------------L2.9------------| " - - "L2.10[336,668] 9ns 100mb |-----------L2.10-----------| " + - "L2.9[1,334] 9ns 100mb |------------L2.9------------| " + - "L2.10[335,667] 9ns 100mb |-----------L2.10-----------| " + - "L2.11[668,668] 9ns 307kb |L2.11| " "### ); } @@ -389,7 +391,7 @@ async fn two_large_files_total_over_max_compact_size_small_overlap_range() { - "**** 2 Output Files (parquet_file_id not yet assigned), 150mb total:" - "L2 " - "L2.?[800,934] 8ns 101mb |---------------------------L2.?---------------------------| " - - "L2.?[935,1000] 8ns 50mb |-----------L2.?------------| " + - "L2.?[935,1000] 8ns 49mb |-----------L2.?------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L1.1, L2.2" - " Creating 4 files" @@ -397,7 +399,7 @@ async fn two_large_files_total_over_max_compact_size_small_overlap_range() { - "L1 " - "L1.4[668,1000] 9ns 50mb |------------------------------------------L1.4------------------------------------------|" - "L2 " - - "L2.6[935,1000] 8ns 50mb |-----L2.6------| " + - "L2.6[935,1000] 8ns 49mb |-----L2.6------| " - "L2.5[800,934] 8ns 101mb |---------------L2.5---------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 200mb total:" - "L2 " @@ -645,41 +647,41 @@ async fn two_large_files_total_over_max_compact_size_start_l0() { - "Committing partition 1:" - " Soft Deleting 2 files: L0.1, L1.2" - " Creating 6 files" - - "**** Simulation run 2, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[334]). 4 Input Files, 199mb total:" + - "**** Simulation run 2, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[334]). 4 Input Files, 200mb total:" - "L0 " - "L0.6[0,333] 10ns 50mb |-------------------L0.6--------------------| " - "L0.7[334,666] 10ns 50mb |-------------------L0.7-------------------| " - "L1 " - "L1.3[1,333] 9ns 50mb |-------------------L1.3-------------------| " - "L1.4[334,666] 9ns 50mb |-------------------L1.4-------------------| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 199mb total:" + - "**** 2 Output Files (parquet_file_id not yet assigned), 200mb total:" - "L1 " - "L1.?[0,334] 10ns 100mb |-------------------L1.?--------------------| " - "L1.?[335,666] 10ns 99mb |-------------------L1.?-------------------| " - "Committing partition 1:" - " Soft Deleting 4 files: L1.3, L1.4, L0.6, L0.7" - " Creating 2 files" - - "**** Simulation run 3, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[933]). 2 Input Files, 101mb total:" + - "**** Simulation run 3, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[933]). 2 Input Files, 100mb total:" - "L0 " - "L0.8[667,1000] 10ns 50mb |------------------------------------------L0.8------------------------------------------|" - "L1 " - "L1.5[667,1000] 9ns 50mb |------------------------------------------L1.5------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 101mb total:" + - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:" - "L1 " - "L1.?[667,933] 10ns 80mb |--------------------------------L1.?---------------------------------| " - "L1.?[934,1000] 10ns 20mb |-----L1.?------| " - "Committing partition 1:" - " Soft Deleting 2 files: L1.5, L0.8" - " Creating 2 files" - - "**** Simulation run 4, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[668]). 3 Input Files, 200mb total:" + - "**** Simulation run 4, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[669]). 3 Input Files, 200mb total:" - "L1 " - "L1.10[335,666] 10ns 99mb |------------------L1.10-------------------| " - "L1.12[934,1000] 10ns 20mb |L1.12-| " - "L1.11[667,933] 10ns 80mb |--------------L1.11---------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 200mb total:" - "L2 " - - "L2.?[335,668] 10ns 100mb |-------------------L2.?--------------------| " - - "L2.?[669,1000] 10ns 100mb |-------------------L2.?-------------------| " + - "L2.?[335,669] 10ns 100mb |-------------------L2.?--------------------| " + - "L2.?[670,1000] 10ns 99mb |-------------------L2.?-------------------| " - "Committing partition 1:" - " Soft Deleting 3 files: L1.10, L1.11, L1.12" - " Upgrading 1 files level to CompactionLevel::L2: L1.9" @@ -687,8 +689,8 @@ async fn two_large_files_total_over_max_compact_size_start_l0() { - "**** Final Output Files (800mb written)" - "L2 " - "L2.9[0,334] 10ns 100mb |------------L2.9------------| " - - "L2.13[335,668] 10ns 100mb |-----------L2.13-----------| " - - "L2.14[669,1000] 10ns 100mb |-----------L2.14-----------| " + - "L2.13[335,669] 10ns 100mb |-----------L2.13------------| " + - "L2.14[670,1000] 10ns 99mb |-----------L2.14-----------| " "### ); @@ -898,21 +900,21 @@ async fn target_too_large_2() { - "L1.3[1001,2000] 11ns |------------------------------------------L1.3------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 50mb total:" - "L1 " - - "L1.?[1001,1001] 11ns 0b |L1.?| " + - "L1.?[1001,1001] 11ns 51kb|L1.?| " - "L1.?[1002,2000] 11ns 50mb|-----------------------------------------L1.?------------------------------------------| " - "Committing partition 1:" - " Soft Deleting 1 files: L1.3" - " Creating 2 files" - - "**** Simulation run 2, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[685]). 3 Input Files, 146mb total:" + - "**** Simulation run 2, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[684]). 3 Input Files, 146mb total:" - "L1 " - "L1.2[1,1000] 10ns 69mb |-----------------------------------------L1.2------------------------------------------| " - - "L1.6[1001,1001] 11ns 0b |L1.6|" + - "L1.6[1001,1001] 11ns 51kb |L1.6|" - "L2 " - "L2.4[1,1001] 5ns 77mb |------------------------------------------L2.4------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 146mb total:" - "L2 " - - "L2.?[1,685] 11ns 100mb |---------------------------L2.?----------------------------| " - - "L2.?[686,1001] 11ns 46mb |-----------L2.?-----------| " + - "L2.?[1,684] 11ns 100mb |---------------------------L2.?----------------------------| " + - "L2.?[685,1001] 11ns 46mb |-----------L2.?-----------| " - "Committing partition 1:" - " Soft Deleting 3 files: L1.2, L2.4, L1.6" - " Creating 2 files" @@ -921,8 +923,8 @@ async fn target_too_large_2() { - "L1.7[1002,2000] 11ns 50mb |-----------L1.7------------| " - "L2 " - "L2.5[1002,3000] 5ns 155mb |--------------------------L2.5---------------------------| " - - "L2.8[1,685] 11ns 100mb |-------L2.8-------| " - - "L2.9[686,1001] 11ns 46mb |-L2.9--| " + - "L2.8[1,684] 11ns 100mb |-------L2.8-------| " + - "L2.9[685,1001] 11ns 46mb |-L2.9--| " - "WARNING: file L2.5[1002,3000] 5ns 155mb exceeds soft limit 100mb by more than 50%" "### ); @@ -1000,43 +1002,43 @@ async fn start_too_large_similar_time_range() { - "L1.5[802,1000] 9ns |-----------------------------------------L1.5------------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 50mb total:" - "L1 " - - "L1.?[802,802] 9ns 0b |L1.?| " + - "L1.?[802,802] 9ns 256kb |L1.?| " - "L1.?[803,1000] 9ns 50mb |-----------------------------------------L1.?------------------------------------------| " - "**** Simulation run 3, type=split(ReduceOverlap)(split_times=[402]). 1 Input Files, 100mb total:" - "L1, all files 100mb " - "L1.4[402,801] 9ns |------------------------------------------L1.4------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:" - "L1 " - - "L1.?[402,402] 9ns 0b |L1.?| " + - "L1.?[402,402] 9ns 256kb |L1.?| " - "L1.?[403,801] 9ns 100mb |-----------------------------------------L1.?------------------------------------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L1.4, L1.5" - " Creating 4 files" - - "**** Simulation run 4, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[333, 665]). 6 Input Files, 242mb total:" + - "**** Simulation run 4, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[332, 663]). 6 Input Files, 242mb total:" - "L1 " - "L1.3[1,401] 9ns 100mb |-------------------L1.3-------------------| " - - "L1.11[402,402] 9ns 0b |L1.11| " + - "L1.11[402,402] 9ns 256kb |L1.11| " - "L1.12[403,801] 9ns 100mb |------------------L1.12-------------------| " - - "L1.9[802,802] 9ns 0b |L1.9|" + - "L1.9[802,802] 9ns 256kb |L1.9|" - "L2 " - "L2.6[2,402] 8ns 21mb |-------------------L2.6-------------------| " - "L2.7[403,802] 8ns 21mb |-------------------L2.7-------------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 242mb total:" - "L2 " - - "L2.?[1,333] 9ns 100mb |---------------L2.?----------------| " - - "L2.?[334,665] 9ns 100mb |---------------L2.?----------------| " - - "L2.?[666,802] 9ns 42mb |----L2.?-----| " + - "L2.?[1,332] 9ns 100mb |---------------L2.?----------------| " + - "L2.?[333,663] 9ns 100mb |---------------L2.?----------------| " + - "L2.?[664,802] 9ns 42mb |----L2.?-----| " - "Committing partition 1:" - " Soft Deleting 6 files: L1.3, L2.6, L2.7, L1.9, L1.11, L1.12" - " Creating 3 files" - - "**** Final Output Files (693mb written)" + - "**** Final Output Files (694mb written)" - "L1 " - "L1.10[803,1000] 9ns 50mb |-----L1.10-----| " - "L2 " - "L2.8[803,1000] 8ns 10mb |-----L2.8------| " - - "L2.13[1,333] 9ns 100mb |-----------L2.13-----------| " - - "L2.14[334,665] 9ns 100mb |-----------L2.14-----------| " - - "L2.15[666,802] 9ns 42mb |--L2.15---| " + - "L2.13[1,332] 9ns 100mb |-----------L2.13-----------| " + - "L2.14[333,663] 9ns 100mb |-----------L2.14-----------| " + - "L2.15[664,802] 9ns 42mb |--L2.15---| " "### ); } @@ -1178,7 +1180,7 @@ async fn start_too_large_small_time_range_2() { - "Committing partition 1:" - " Soft Deleting 1 files: L1.1" - " Creating 3 files" - - "**** Simulation run 1, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[2132, 2983]). 3 Input Files, 202mb total:" + - "**** Simulation run 1, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[2133, 2985]). 3 Input Files, 202mb total:" - "L1 " - "L1.5[1761,2000] 1ns 50mb |---L1.5---| " - "L1.4[1281,1760] 1ns 100mb|---------L1.4----------| " @@ -1186,9 +1188,9 @@ async fn start_too_large_small_time_range_2() { - "L2.2[1600,3000] 1ns 52mb |---------------------------------L2.2----------------------------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 202mb total:" - "L2 " - - "L2.?[1281,2132] 1ns 100mb|-------------------L2.?-------------------| " - - "L2.?[2133,2983] 1ns 100mb |-------------------L2.?-------------------| " - - "L2.?[2984,3000] 1ns 2mb |L2.?|" + - "L2.?[1281,2133] 1ns 100mb|-------------------L2.?-------------------| " + - "L2.?[2134,2985] 1ns 100mb |-------------------L2.?-------------------| " + - "L2.?[2986,3000] 1ns 2mb |L2.?|" - "Committing partition 1:" - " Soft Deleting 3 files: L2.2, L1.4, L1.5" - " Upgrading 1 files level to CompactionLevel::L2: L1.3" @@ -1196,9 +1198,9 @@ async fn start_too_large_small_time_range_2() { - "**** Final Output Files (452mb written)" - "L2 " - "L2.3[800,1280] 1ns 100mb |------L2.3-------| " - - "L2.6[1281,2132] 1ns 100mb |--------------L2.6--------------| " - - "L2.7[2133,2983] 1ns 100mb |--------------L2.7--------------| " - - "L2.8[2984,3000] 1ns 2mb |L2.8|" + - "L2.6[1281,2133] 1ns 100mb |--------------L2.6--------------| " + - "L2.7[2134,2985] 1ns 100mb |--------------L2.7--------------| " + - "L2.8[2986,3000] 1ns 2mb |L2.8|" "### ); } @@ -1256,22 +1258,22 @@ async fn start_too_large_small_time_range_3() { - "**** 3 Output Files (parquet_file_id not yet assigned), 250mb total:" - "L1 " - "L1.?[0,120] 9ns 100mb |---------------L1.?---------------| " - - "L1.?[121,240] 9ns 99mb |--------------L1.?---------------| " - - "L1.?[241,300] 9ns 51mb |-----L1.?------| " + - "L1.?[121,240] 9ns 100mb |--------------L1.?---------------| " + - "L1.?[241,300] 9ns 50mb |-----L1.?------| " - "Committing partition 1:" - " Soft Deleting 1 files: L1.1" - " Creating 3 files" - - "**** Simulation run 1, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[705, 1289]). 3 Input Files, 202mb total:" + - "**** Simulation run 1, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[707, 1293]). 3 Input Files, 202mb total:" - "L1 " - - "L1.5[241,300] 9ns 51mb |L1.5| " - - "L1.4[121,240] 9ns 99mb |-L1.4--| " + - "L1.5[241,300] 9ns 50mb |L1.5| " + - "L1.4[121,240] 9ns 100mb |-L1.4--| " - "L2 " - "L2.2[200,1300] 8ns 52mb |--------------------------------------L2.2---------------------------------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 202mb total:" - "L2 " - - "L2.?[121,705] 9ns 100mb |-------------------L2.?-------------------| " - - "L2.?[706,1289] 9ns 100mb |-------------------L2.?-------------------| " - - "L2.?[1290,1300] 9ns 2mb |L2.?|" + - "L2.?[121,707] 9ns 100mb |-------------------L2.?-------------------| " + - "L2.?[708,1293] 9ns 100mb |-------------------L2.?-------------------| " + - "L2.?[1294,1300] 9ns 1mb |L2.?|" - "Committing partition 1:" - " Soft Deleting 3 files: L2.2, L1.4, L1.5" - " Upgrading 1 files level to CompactionLevel::L2: L1.3" @@ -1279,9 +1281,9 @@ async fn start_too_large_small_time_range_3() { - "**** Final Output Files (452mb written)" - "L2 " - "L2.3[0,120] 9ns 100mb |-L2.3-| " - - "L2.6[121,705] 9ns 100mb |-----------------L2.6-----------------| " - - "L2.7[706,1289] 9ns 100mb |-----------------L2.7-----------------| " - - "L2.8[1290,1300] 9ns 2mb |L2.8|" + - "L2.6[121,707] 9ns 100mb |-----------------L2.6-----------------| " + - "L2.7[708,1293] 9ns 100mb |-----------------L2.7-----------------| " + - "L2.8[1294,1300] 9ns 1mb |L2.8|" "### ); } diff --git a/compactor/tests/layouts/large_overlaps.rs b/compactor/tests/layouts/large_overlaps.rs index cf12dcf57f..18eca24024 100644 --- a/compactor/tests/layouts/large_overlaps.rs +++ b/compactor/tests/layouts/large_overlaps.rs @@ -80,35 +80,35 @@ async fn one_l1_overlaps_with_many_l2s() { - "L1.?[1,100] 240s 40mb |--------------L1.?---------------| " - "L1.?[101,150] 240s 20mb |-----L1.?------| " - "L1.?[151,200] 240s 20mb |-----L1.?------| " - - "L1.?[201,250] 240s 21mb |-----L1.?------| " + - "L1.?[201,250] 240s 20mb |-----L1.?------| " - "Committing partition 1:" - " Soft Deleting 1 files: L1.4" - " Creating 4 files" - - "**** Simulation run 1, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[59, 117]). 4 Input Files, 259mb total:" + - "**** Simulation run 1, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[59, 117]). 4 Input Files, 260mb total:" - "L1 " - "L1.6[1,100] 240s 40mb |--------------------------L1.6---------------------------| " - "L1.7[101,150] 240s 20mb |-----------L1.7------------| " - "L2 " - "L2.1[51,100] 0ns 100mb |-----------L2.1------------| " - "L2.2[101,150] 60s 100mb |-----------L2.2------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 259mb total:" + - "**** 3 Output Files (parquet_file_id not yet assigned), 260mb total:" - "L2 " - - "L2.?[1,59] 240s 101mb |--------------L2.?---------------| " - - "L2.?[60,117] 240s 99mb |--------------L2.?--------------| " - - "L2.?[118,150] 240s 59mb |------L2.?-------| " + - "L2.?[1,59] 240s 102mb |--------------L2.?---------------| " + - "L2.?[60,117] 240s 101mb |--------------L2.?--------------| " + - "L2.?[118,150] 240s 57mb |------L2.?-------| " - "Committing partition 1:" - " Soft Deleting 4 files: L2.1, L2.2, L1.6, L1.7" - " Creating 3 files" - - "**** Final Output Files (359mb written)" + - "**** Final Output Files (360mb written)" - "L1 " - "L1.5[251,500] 300s 30mb |-------------------L1.5-------------------| " - "L1.8[151,200] 240s 20mb |-L1.8-| " - - "L1.9[201,250] 240s 21mb |-L1.9-| " + - "L1.9[201,250] 240s 20mb |-L1.9-| " - "L2 " - "L2.3[151,200] 120s 70mb |-L2.3-| " - - "L2.10[1,59] 240s 101mb |-L2.10--| " - - "L2.11[60,117] 240s 99mb |-L2.11--| " - - "L2.12[118,150] 240s 59mb |L2.12| " + - "L2.10[1,59] 240s 102mb |-L2.10--| " + - "L2.11[60,117] 240s 101mb |-L2.11--| " + - "L2.12[118,150] 240s 57mb |L2.12| " "### ); } @@ -203,8 +203,8 @@ async fn many_l1_overlaps_with_many_l2s() { - "L1.6[91,105] 360s |------------------------------------------L1.6------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 13mb total:" - "L1 " - - "L1.?[91,100] 360s 8mb |-------------------------L1.?--------------------------| " - - "L1.?[101,105] 360s 5mb |---------L1.?----------| " + - "L1.?[91,100] 360s 9mb |-------------------------L1.?--------------------------| " + - "L1.?[101,105] 360s 4mb |---------L1.?----------| " - "Committing partition 1:" - " Soft Deleting 1 files: L1.6" - " Creating 2 files" @@ -212,8 +212,8 @@ async fn many_l1_overlaps_with_many_l2s() { - "L1 " - "L1.4[61,75] 240s 13mb |---L1.4---| " - "L1.5[76,90] 300s 13mb |---L1.5---| " - - "L1.12[91,100] 360s 8mb |L1.12-| " - - "L1.13[101,105] 360s 5mb |L1.13| " + - "L1.12[91,100] 360s 9mb |L1.12-| " + - "L1.13[101,105] 360s 4mb |L1.13| " - "L1.7[106,120] 420s 13mb |---L1.7---| " - "L1.8[121,135] 480s 13mb |---L1.8---| " - "L1.9[136,150] 540s 13mb |---L1.9---| " @@ -222,9 +222,9 @@ async fn many_l1_overlaps_with_many_l2s() { - "L2.2[101,150] 60s 100mb |-------------------L2.2-------------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 278mb total:" - "L2 " - - "L2.?[51,87] 540s 101mb |-------------L2.?-------------| " - - "L2.?[88,123] 540s 98mb |------------L2.?-------------| " - - "L2.?[124,150] 540s 79mb |--------L2.?---------| " + - "L2.?[51,87] 540s 103mb |-------------L2.?-------------| " + - "L2.?[88,123] 540s 100mb |------------L2.?-------------| " + - "L2.?[124,150] 540s 75mb |--------L2.?---------| " - "Committing partition 1:" - " Soft Deleting 9 files: L2.1, L2.2, L1.4, L1.5, L1.7, L1.8, L1.9, L1.12, L1.13" - " Creating 3 files" @@ -234,9 +234,9 @@ async fn many_l1_overlaps_with_many_l2s() { - "L1.11[201,215] 660s 13mb |L1.11| " - "L2 " - "L2.3[151,200] 120s 70mb |----------L2.3----------| " - - "L2.14[51,87] 540s 101mb |------L2.14------| " - - "L2.15[88,123] 540s 98mb |------L2.15------| " - - "L2.16[124,150] 540s 79mb |---L2.16----| " + - "L2.14[51,87] 540s 103mb |------L2.14------| " + - "L2.15[88,123] 540s 100mb |------L2.15------| " + - "L2.16[124,150] 540s 75mb |---L2.16----| " "### ); } @@ -718,124 +718,125 @@ async fn many_good_size_l0_files() { - "L0.150[149,150] 150ns |L0.150|" - "**** 3 Output Files (parquet_file_id not yet assigned), 300mb total:" - "L1 " - - "L1.?[0,50] 150ns 100mb |------------L1.?------------| " - - "L1.?[51,100] 150ns 98mb |-----------L1.?------------| " - - "L1.?[101,150] 150ns 102mb |-----------L1.?------------| " + - "L1.?[0,50] 150ns 101mb |------------L1.?------------| " + - "L1.?[51,100] 150ns 99mb |-----------L1.?------------| " + - "L1.?[101,150] 150ns 99mb |-----------L1.?------------| " - "Committing partition 1:" - " Soft Deleting 150 files: L0.1, L0.2, L0.3, L0.4, L0.5, L0.6, L0.7, L0.8, L0.9, L0.10, L0.11, L0.12, L0.13, L0.14, L0.15, L0.16, L0.17, L0.18, L0.19, L0.20, L0.21, L0.22, L0.23, L0.24, L0.25, L0.26, L0.27, L0.28, L0.29, L0.30, L0.31, L0.32, L0.33, L0.34, L0.35, L0.36, L0.37, L0.38, L0.39, L0.40, L0.41, L0.42, L0.43, L0.44, L0.45, L0.46, L0.47, L0.48, L0.49, L0.50, L0.51, L0.52, L0.53, L0.54, L0.55, L0.56, L0.57, L0.58, L0.59, L0.60, L0.61, L0.62, L0.63, L0.64, L0.65, L0.66, L0.67, L0.68, L0.69, L0.70, L0.71, L0.72, L0.73, L0.74, L0.75, L0.76, L0.77, L0.78, L0.79, L0.80, L0.81, L0.82, L0.83, L0.84, L0.85, L0.86, L0.87, L0.88, L0.89, L0.90, L0.91, L0.92, L0.93, L0.94, L0.95, L0.96, L0.97, L0.98, L0.99, L0.100, L0.101, L0.102, L0.103, L0.104, L0.105, L0.106, L0.107, L0.108, L0.109, L0.110, L0.111, L0.112, L0.113, L0.114, L0.115, L0.116, L0.117, L0.118, L0.119, L0.120, L0.121, L0.122, L0.123, L0.124, L0.125, L0.126, L0.127, L0.128, L0.129, L0.130, L0.131, L0.132, L0.133, L0.134, L0.135, L0.136, L0.137, L0.138, L0.139, L0.140, L0.141, L0.142, L0.143, L0.144, L0.145, L0.146, L0.147, L0.148, L0.149, L0.150" - " Creating 3 files" - - "**** Simulation run 1, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[151, 201]). 100 Input Files, 300mb total:" + - "**** Simulation run 1, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[151, 201]). 101 Input Files, 299mb total:" - "L0 " - "L0.151[150,151] 151ns 2mb |L0.151| " - "L0.152[151,152] 152ns 2mb |L0.152| " - - "L0.153[152,153] 153ns 2mb |L0.153| " + - "L0.153[152,153] 153ns 2mb |L0.153| " - "L0.154[153,154] 154ns 2mb |L0.154| " - "L0.155[154,155] 155ns 2mb |L0.155| " - "L0.156[155,156] 156ns 2mb |L0.156| " - "L0.157[156,157] 157ns 2mb |L0.157| " - - "L0.158[157,158] 158ns 2mb |L0.158| " + - "L0.158[157,158] 158ns 2mb |L0.158| " - "L0.159[158,159] 159ns 2mb |L0.159| " - "L0.160[159,160] 160ns 2mb |L0.160| " - "L0.161[160,161] 161ns 2mb |L0.161| " - "L0.162[161,162] 162ns 2mb |L0.162| " - - "L0.163[162,163] 163ns 2mb |L0.163| " + - "L0.163[162,163] 163ns 2mb |L0.163| " - "L0.164[163,164] 164ns 2mb |L0.164| " - "L0.165[164,165] 165ns 2mb |L0.165| " - "L0.166[165,166] 166ns 2mb |L0.166| " - "L0.167[166,167] 167ns 2mb |L0.167| " - - "L0.168[167,168] 168ns 2mb |L0.168| " + - "L0.168[167,168] 168ns 2mb |L0.168| " - "L0.169[168,169] 169ns 2mb |L0.169| " - "L0.170[169,170] 170ns 2mb |L0.170| " - "L0.171[170,171] 171ns 2mb |L0.171| " - "L0.172[171,172] 172ns 2mb |L0.172| " - - "L0.173[172,173] 173ns 2mb |L0.173| " + - "L0.173[172,173] 173ns 2mb |L0.173| " - "L0.174[173,174] 174ns 2mb |L0.174| " - "L0.175[174,175] 175ns 2mb |L0.175| " - - "L0.176[175,176] 176ns 2mb |L0.176| " + - "L0.176[175,176] 176ns 2mb |L0.176| " - "L0.177[176,177] 177ns 2mb |L0.177| " - - "L0.178[177,178] 178ns 2mb |L0.178| " + - "L0.178[177,178] 178ns 2mb |L0.178| " - "L0.179[178,179] 179ns 2mb |L0.179| " - "L0.180[179,180] 180ns 2mb |L0.180| " - - "L0.181[180,181] 181ns 2mb |L0.181| " + - "L0.181[180,181] 181ns 2mb |L0.181| " - "L0.182[181,182] 182ns 2mb |L0.182| " - - "L0.183[182,183] 183ns 2mb |L0.183| " + - "L0.183[182,183] 183ns 2mb |L0.183| " - "L0.184[183,184] 184ns 2mb |L0.184| " - "L0.185[184,185] 185ns 2mb |L0.185| " - - "L0.186[185,186] 186ns 2mb |L0.186| " + - "L0.186[185,186] 186ns 2mb |L0.186| " - "L0.187[186,187] 187ns 2mb |L0.187| " - - "L0.188[187,188] 188ns 2mb |L0.188| " + - "L0.188[187,188] 188ns 2mb |L0.188| " - "L0.189[188,189] 189ns 2mb |L0.189| " - "L0.190[189,190] 190ns 2mb |L0.190| " - - "L0.191[190,191] 191ns 2mb |L0.191| " + - "L0.191[190,191] 191ns 2mb |L0.191| " - "L0.192[191,192] 192ns 2mb |L0.192| " - - "L0.193[192,193] 193ns 2mb |L0.193| " + - "L0.193[192,193] 193ns 2mb |L0.193| " - "L0.194[193,194] 194ns 2mb |L0.194| " - "L0.195[194,195] 195ns 2mb |L0.195| " - - "L0.196[195,196] 196ns 2mb |L0.196| " + - "L0.196[195,196] 196ns 2mb |L0.196| " - "L0.197[196,197] 197ns 2mb |L0.197| " - - "L0.198[197,198] 198ns 2mb |L0.198| " + - "L0.198[197,198] 198ns 2mb |L0.198| " - "L0.199[198,199] 199ns 2mb |L0.199| " - "L0.200[199,200] 200ns 2mb |L0.200| " - - "L0.201[200,201] 201ns 2mb |L0.201| " + - "L0.201[200,201] 201ns 2mb |L0.201| " - "L0.202[201,202] 202ns 2mb |L0.202| " - "L0.203[202,203] 203ns 2mb |L0.203| " - - "L0.204[203,204] 204ns 2mb |L0.204| " + - "L0.204[203,204] 204ns 2mb |L0.204| " - "L0.205[204,205] 205ns 2mb |L0.205| " - - "L0.206[205,206] 206ns 2mb |L0.206| " + - "L0.206[205,206] 206ns 2mb |L0.206| " - "L0.207[206,207] 207ns 2mb |L0.207| " - "L0.208[207,208] 208ns 2mb |L0.208| " - - "L0.209[208,209] 209ns 2mb |L0.209| " + - "L0.209[208,209] 209ns 2mb |L0.209| " - "L0.210[209,210] 210ns 2mb |L0.210| " - - "L0.211[210,211] 211ns 2mb |L0.211| " + - "L0.211[210,211] 211ns 2mb |L0.211| " - "L0.212[211,212] 212ns 2mb |L0.212| " - "L0.213[212,213] 213ns 2mb |L0.213| " - - "L0.214[213,214] 214ns 2mb |L0.214| " + - "L0.214[213,214] 214ns 2mb |L0.214| " - "L0.215[214,215] 215ns 2mb |L0.215| " - - "L0.216[215,216] 216ns 2mb |L0.216| " + - "L0.216[215,216] 216ns 2mb |L0.216| " - "L0.217[216,217] 217ns 2mb |L0.217| " - "L0.218[217,218] 218ns 2mb |L0.218| " - - "L0.219[218,219] 219ns 2mb |L0.219| " + - "L0.219[218,219] 219ns 2mb |L0.219| " - "L0.220[219,220] 220ns 2mb |L0.220| " - - "L0.221[220,221] 221ns 2mb |L0.221| " + - "L0.221[220,221] 221ns 2mb |L0.221| " - "L0.222[221,222] 222ns 2mb |L0.222| " - "L0.223[222,223] 223ns 2mb |L0.223| " - - "L0.224[223,224] 224ns 2mb |L0.224| " + - "L0.224[223,224] 224ns 2mb |L0.224| " - "L0.225[224,225] 225ns 2mb |L0.225| " - - "L0.226[225,226] 226ns 2mb |L0.226| " - - "L0.227[226,227] 227ns 2mb |L0.227| " + - "L0.226[225,226] 226ns 2mb |L0.226| " + - "L0.227[226,227] 227ns 2mb |L0.227| " - "L0.228[227,228] 228ns 2mb |L0.228| " - - "L0.229[228,229] 229ns 2mb |L0.229| " + - "L0.229[228,229] 229ns 2mb |L0.229| " - "L0.230[229,230] 230ns 2mb |L0.230| " - - "L0.231[230,231] 231ns 2mb |L0.231| " - - "L0.232[231,232] 232ns 2mb |L0.232| " + - "L0.231[230,231] 231ns 2mb |L0.231| " + - "L0.232[231,232] 232ns 2mb |L0.232| " - "L0.233[232,233] 233ns 2mb |L0.233| " - - "L0.234[233,234] 234ns 2mb |L0.234| " + - "L0.234[233,234] 234ns 2mb |L0.234| " - "L0.235[234,235] 235ns 2mb |L0.235| " - - "L0.236[235,236] 236ns 2mb |L0.236| " - - "L0.237[236,237] 237ns 2mb |L0.237|" + - "L0.236[235,236] 236ns 2mb |L0.236| " + - "L0.237[236,237] 237ns 2mb |L0.237| " - "L0.238[237,238] 238ns 2mb |L0.238|" - - "L0.239[238,239] 239ns 2mb |L0.239|" + - "L0.239[238,239] 239ns 2mb |L0.239|" - "L0.240[239,240] 240ns 2mb |L0.240|" - - "L0.241[240,241] 241ns 2mb |L0.241|" - - "L0.242[241,242] 242ns 2mb |L0.242|" + - "L0.241[240,241] 241ns 2mb |L0.241|" + - "L0.242[241,242] 242ns 2mb |L0.242|" - "L0.243[242,243] 243ns 2mb |L0.243|" - - "L0.244[243,244] 244ns 2mb |L0.244|" + - "L0.244[243,244] 244ns 2mb |L0.244|" - "L0.245[244,245] 245ns 2mb |L0.245|" - - "L0.246[245,246] 246ns 2mb |L0.246|" - - "L0.247[246,247] 247ns 2mb |L0.247|" + - "L0.246[245,246] 246ns 2mb |L0.246|" + - "L0.247[246,247] 247ns 2mb |L0.247|" - "L0.248[247,248] 248ns 2mb |L0.248|" - - "L0.249[248,249] 249ns 2mb |L0.249|" + - "L0.249[248,249] 249ns 2mb |L0.249|" + - "L0.250[249,250] 250ns 2mb |L0.250|" - "L1 " - - "L1.291[101,150] 150ns 102mb|----------L1.291-----------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 300mb total:" + - "L1.291[101,150] 150ns 99mb|----------L1.291-----------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 299mb total:" - "L1 " - - "L1.?[101,151] 249ns 101mb|------------L1.?------------| " - - "L1.?[152,201] 249ns 99mb |-----------L1.?------------| " - - "L1.?[202,249] 249ns 99mb |-----------L1.?-----------| " + - "L1.?[101,151] 250ns 102mb|------------L1.?------------| " + - "L1.?[152,201] 250ns 100mb |-----------L1.?------------| " + - "L1.?[202,250] 250ns 98mb |-----------L1.?-----------| " - "Committing partition 1:" - - " Soft Deleting 100 files: L0.151, L0.152, L0.153, L0.154, L0.155, L0.156, L0.157, L0.158, L0.159, L0.160, L0.161, L0.162, L0.163, L0.164, L0.165, L0.166, L0.167, L0.168, L0.169, L0.170, L0.171, L0.172, L0.173, L0.174, L0.175, L0.176, L0.177, L0.178, L0.179, L0.180, L0.181, L0.182, L0.183, L0.184, L0.185, L0.186, L0.187, L0.188, L0.189, L0.190, L0.191, L0.192, L0.193, L0.194, L0.195, L0.196, L0.197, L0.198, L0.199, L0.200, L0.201, L0.202, L0.203, L0.204, L0.205, L0.206, L0.207, L0.208, L0.209, L0.210, L0.211, L0.212, L0.213, L0.214, L0.215, L0.216, L0.217, L0.218, L0.219, L0.220, L0.221, L0.222, L0.223, L0.224, L0.225, L0.226, L0.227, L0.228, L0.229, L0.230, L0.231, L0.232, L0.233, L0.234, L0.235, L0.236, L0.237, L0.238, L0.239, L0.240, L0.241, L0.242, L0.243, L0.244, L0.245, L0.246, L0.247, L0.248, L0.249, L1.291" + - " Soft Deleting 101 files: L0.151, L0.152, L0.153, L0.154, L0.155, L0.156, L0.157, L0.158, L0.159, L0.160, L0.161, L0.162, L0.163, L0.164, L0.165, L0.166, L0.167, L0.168, L0.169, L0.170, L0.171, L0.172, L0.173, L0.174, L0.175, L0.176, L0.177, L0.178, L0.179, L0.180, L0.181, L0.182, L0.183, L0.184, L0.185, L0.186, L0.187, L0.188, L0.189, L0.190, L0.191, L0.192, L0.193, L0.194, L0.195, L0.196, L0.197, L0.198, L0.199, L0.200, L0.201, L0.202, L0.203, L0.204, L0.205, L0.206, L0.207, L0.208, L0.209, L0.210, L0.211, L0.212, L0.213, L0.214, L0.215, L0.216, L0.217, L0.218, L0.219, L0.220, L0.221, L0.222, L0.223, L0.224, L0.225, L0.226, L0.227, L0.228, L0.229, L0.230, L0.231, L0.232, L0.233, L0.234, L0.235, L0.236, L0.237, L0.238, L0.239, L0.240, L0.241, L0.242, L0.243, L0.244, L0.245, L0.246, L0.247, L0.248, L0.249, L0.250, L1.291" - " Creating 3 files" - - "**** Simulation run 2, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[251]). 40 Input Files, 177mb total:" + - "**** Simulation run 2, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[252]). 39 Input Files, 174mb total:" - "L0 " - "L0.288[287,288] 288ns 2mb |L0.288|" - "L0.287[286,287] 287ns 2mb |L0.287|" @@ -875,50 +876,48 @@ async fn many_good_size_l0_files() { - "L0.253[252,253] 253ns 2mb |L0.253| " - "L0.252[251,252] 252ns 2mb |L0.252| " - "L0.251[250,251] 251ns 2mb |L0.251| " - - "L0.250[249,250] 250ns 2mb |L0.250| " - "L1 " - - "L1.294[202,249] 249ns 99mb|--------------------L1.294---------------------| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 177mb total:" + - "L1.294[202,250] 250ns 98mb|---------------------L1.294---------------------| " + - "**** 2 Output Files (parquet_file_id not yet assigned), 174mb total:" - "L1 " - - "L1.?[202,251] 288ns 101mb|----------------------L1.?-----------------------| " - - "L1.?[252,288] 288ns 76mb |---------------L1.?----------------| " + - "L1.?[202,252] 288ns 102mb|-----------------------L1.?-----------------------| " + - "L1.?[253,288] 288ns 72mb |---------------L1.?---------------| " - "Committing partition 1:" - - " Soft Deleting 40 files: L0.250, L0.251, L0.252, L0.253, L0.254, L0.255, L0.256, L0.257, L0.258, L0.259, L0.260, L0.261, L0.262, L0.263, L0.264, L0.265, L0.266, L0.267, L0.268, L0.269, L0.270, L0.271, L0.272, L0.273, L0.274, L0.275, L0.276, L0.277, L0.278, L0.279, L0.280, L0.281, L0.282, L0.283, L0.284, L0.285, L0.286, L0.287, L0.288, L1.294" + - " Soft Deleting 39 files: L0.251, L0.252, L0.253, L0.254, L0.255, L0.256, L0.257, L0.258, L0.259, L0.260, L0.261, L0.262, L0.263, L0.264, L0.265, L0.266, L0.267, L0.268, L0.269, L0.270, L0.271, L0.272, L0.273, L0.274, L0.275, L0.276, L0.277, L0.278, L0.279, L0.280, L0.281, L0.282, L0.283, L0.284, L0.285, L0.286, L0.287, L0.288, L1.294" - " Creating 2 files" - - "**** Simulation run 3, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[102, 153]). 3 Input Files, 299mb total:" + - "**** Simulation run 3, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[101]). 2 Input Files, 201mb total:" - "L1 " - - "L1.290[51,100] 150ns 98mb|----------L1.290-----------| " - - "L1.292[101,151] 249ns 101mb |-----------L1.292-----------| " - - "L1.293[152,201] 249ns 99mb |----------L1.293-----------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 299mb total:" + - "L1.290[51,100] 150ns 99mb|------------------L1.290------------------| " + - "L1.292[101,151] 250ns 102mb |------------------L1.292-------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 201mb total:" - "L2 " - - "L2.?[51,102] 249ns 102mb |------------L2.?------------| " - - "L2.?[103,153] 249ns 100mb |------------L2.?------------| " - - "L2.?[154,201] 249ns 98mb |-----------L2.?-----------| " + - "L2.?[51,101] 250ns 102mb |-------------------L2.?--------------------| " + - "L2.?[102,151] 250ns 100mb |-------------------L2.?-------------------| " - "Committing partition 1:" - - " Soft Deleting 3 files: L1.290, L1.292, L1.293" + - " Soft Deleting 2 files: L1.290, L1.292" - " Upgrading 1 files level to CompactionLevel::L2: L1.289" - - " Creating 3 files" - - "**** Simulation run 4, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[280]). 1 Input Files, 76mb total:" - - "L1, all files 76mb " - - "L1.296[252,288] 288ns |-----------------------------------------L1.296-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 76mb total:" + - " Creating 2 files" + - "**** Simulation run 4, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[202, 252]). 3 Input Files, 274mb total:" + - "L1 " + - "L1.296[253,288] 288ns 72mb |-------L1.296--------| " + - "L1.293[152,201] 250ns 100mb|------------L1.293------------| " + - "L1.295[202,252] 288ns 102mb |------------L1.295-------------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 274mb total:" - "L2 " - - "L2.?[252,280] 288ns 59mb |--------------------------------L2.?--------------------------------| " - - "L2.?[281,288] 288ns 17mb |-----L2.?------| " + - "L2.?[152,202] 288ns 102mb|-------------L2.?--------------| " + - "L2.?[203,252] 288ns 100mb |-------------L2.?-------------| " + - "L2.?[253,288] 288ns 72mb |--------L2.?---------| " - "Committing partition 1:" - - " Soft Deleting 1 files: L1.296" - - " Upgrading 1 files level to CompactionLevel::L2: L1.295" - - " Creating 2 files" - - "**** Final Output Files (1.13gb written)" + - " Soft Deleting 3 files: L1.293, L1.295, L1.296" + - " Creating 3 files" + - "**** Final Output Files (1.22gb written)" - "L2 " - - "L2.289[0,50] 150ns 100mb |---L2.289----| " - - "L2.295[202,251] 288ns 101mb |---L2.295----| " - - "L2.297[51,102] 249ns 102mb |---L2.297----| " - - "L2.298[103,153] 249ns 100mb |---L2.298----| " - - "L2.299[154,201] 249ns 98mb |---L2.299---| " - - "L2.300[252,280] 288ns 59mb |L2.300| " - - "L2.301[281,288] 288ns 17mb |L2.301|" + - "L2.289[0,50] 150ns 101mb |---L2.289----| " + - "L2.297[51,101] 250ns 102mb |---L2.297----| " + - "L2.298[102,151] 250ns 100mb |---L2.298----| " + - "L2.299[152,202] 288ns 102mb |---L2.299----| " + - "L2.300[203,252] 288ns 100mb |---L2.300----| " + - "L2.301[253,288] 288ns 72mb |-L2.301-| " "### ); } diff --git a/compactor/tests/layouts/many_files.rs b/compactor/tests/layouts/many_files.rs index 47f724fcd5..e7bc77fbaa 100644 --- a/compactor/tests/layouts/many_files.rs +++ b/compactor/tests/layouts/many_files.rs @@ -420,15 +420,15 @@ async fn many_l1_files() { - "L1.24[24,25] 23ns 13mb |L1.24| " - "**** 2 Output Files (parquet_file_id not yet assigned), 203mb total:" - "L2 " - - "L2.?[0,20] 23ns 104mb |--------------------L2.?--------------------| " - - "L2.?[21,39] 23ns 99mb |-----------------L2.?------------------| " + - "L2.?[0,20] 23ns 107mb |--------------------L2.?--------------------| " + - "L2.?[21,39] 23ns 96mb |-----------------L2.?------------------| " - "Committing partition 1:" - " Soft Deleting 20 files: L1.1, L1.2, L1.3, L1.4, L1.5, L1.6, L1.7, L1.8, L1.9, L1.10, L1.11, L1.12, L1.14, L1.15, L1.16, L1.17, L1.18, L1.19, L1.20, L1.24" - " Creating 2 files" - "**** Final Output Files (216mb written)" - "L2 " - - "L2.25[0,20] 23ns 104mb |-------------------L2.25--------------------| " - - "L2.26[21,39] 23ns 99mb |-----------------L2.26-----------------| " + - "L2.25[0,20] 23ns 107mb |-------------------L2.25--------------------| " + - "L2.26[21,39] 23ns 96mb |-----------------L2.26-----------------| " "### ); } @@ -4670,17 +4670,17 @@ async fn l0s_almost_needing_vertical_split() { - "L0.998[24,100] 1.02us |-----------------------------------------L0.998-----------------------------------------|" - "L0.999[24,100] 1.02us |-----------------------------------------L0.999-----------------------------------------|" - "L0.1000[24,100] 1.02us |----------------------------------------L0.1000-----------------------------------------|" - - "**** Final Output Files (5.23gb written)" + - "**** Final Output Files (5.13gb written)" + - "L1 " + - "L1.3091[92,100] 1.02us 88mb |L1.3091| " - "L2 " - - "L2.3086[24,35] 1.02us 102mb|--L2.3086--| " - - "L2.3095[36,47] 1.02us 105mb |--L2.3095--| " - - "L2.3096[48,58] 1.02us 95mb |-L2.3096-| " - - "L2.3097[59,65] 1.02us 76mb |L2.3097| " - - "L2.3098[66,76] 1.02us 106mb |-L2.3098-| " - - "L2.3099[77,86] 1.02us 96mb |L2.3099-| " - - "L2.3100[87,90] 1.02us 53mb |L2.3100| " - - "L2.3101[91,98] 1.02us 90mb |L2.3101| " - - "L2.3102[99,100] 1.02us 26mb |L2.3102|" + - "L2.3084[24,34] 1.02us 107mb|-L2.3084-| " + - "L2.3092[35,45] 1.02us 107mb |-L2.3092-| " + - "L2.3093[46,55] 1.02us 97mb |L2.3093-| " + - "L2.3094[56,63] 1.02us 78mb |L2.3094| " + - "L2.3095[64,74] 1.02us 107mb |-L2.3095-| " + - "L2.3096[75,84] 1.02us 97mb |L2.3096-| " + - "L2.3097[85,91] 1.02us 68mb |L2.3097| " "### ); } diff --git a/compactor/tests/layouts/stuck.rs b/compactor/tests/layouts/stuck.rs index 0ad6f71a8d..0ad19a1e7e 100644 --- a/compactor/tests/layouts/stuck.rs +++ b/compactor/tests/layouts/stuck.rs @@ -1114,73 +1114,73 @@ async fn stuck_l0() { - "**** Final Output Files (41.44gb written)" - "L2 " - "L2.1506[1686841379000000000,1686841830146391412] 1686936871.55s 100mb|L2.1506| " - - "L2.1615[1686841830146391413,1686842272734755186] 1686936871.55s 100mb |L2.1615| " - - "L2.1616[1686842272734755187,1686842715323118959] 1686936871.55s 100mb |L2.1616| " - - "L2.1617[1686842715323118960,1686843089433843333] 1686936871.55s 85mb |L2.1617| " - - "L2.1631[1686843089433843334,1686843544819541615] 1686936871.55s 100mb |L2.1631| " - - "L2.1632[1686843544819541616,1686844000205239896] 1686936871.55s 100mb |L2.1632| " - - "L2.1633[1686844000205239897,1686844006223321786] 1686936871.55s 1mb |L2.1633| " + - "L2.1615[1686841830146391413,1686842272734754598] 1686936871.55s 100mb |L2.1615| " + - "L2.1616[1686842272734754599,1686842715323117783] 1686936871.55s 100mb |L2.1616| " + - "L2.1617[1686842715323117784,1686843089433841661] 1686936871.55s 85mb |L2.1617| " + - "L2.1631[1686843089433841662,1686843544819538616] 1686936871.55s 100mb |L2.1631| " + - "L2.1632[1686843544819538617,1686844000205235570] 1686936871.55s 100mb |L2.1632| " + - "L2.1633[1686844000205235571,1686844006223321786] 1686936871.55s 1mb |L2.1633| " - "L2.1634[1686844006223321787,1686844558565925583] 1686936871.55s 100mb |L2.1634| " - "L2.1635[1686844558565925584,1686845110908529379] 1686936871.55s 100mb |L2.1635| " - "L2.1636[1686845110908529380,1686845399754314022] 1686936871.55s 52mb |L2.1636| " - - "L2.1637[1686845399754314023,1686845828734678291] 1686936871.55s 100mb |L2.1637| " - - "L2.1638[1686845828734678292,1686846257715042559] 1686936871.55s 100mb |L2.1638| " - - "L2.1639[1686846257715042560,1686846333021888151] 1686936871.55s 18mb |L2.1639| " - - "L2.1640[1686846333021888152,1686846691134015225] 1686936871.55s 100mb |L2.1640| " - - "L2.1641[1686846691134015226,1686847049246142298] 1686936871.55s 100mb |L2.1641| " - - "L2.1642[1686847049246142299,1686847215718005883] 1686936871.55s 46mb |L2.1642| " - - "L2.1643[1686847215718005884,1686847702600237277] 1686936871.55s 100mb |L2.1643| " - - "L2.1644[1686847702600237278,1686848189482468670] 1686936871.55s 100mb |L2.1644| " - - "L2.1645[1686848189482468671,1686848513620960849] 1686936871.55s 67mb |L2.1645| " - - "L2.1656[1686848513620960850,1686849040083878200] 1686936871.55s 100mb |L2.1656| " - - "L2.1657[1686849040083878201,1686849561540101956] 1686936871.55s 99mb |L2.1657| " - - "L2.1658[1686849561540101957,1686849962539875179] 1686936871.55s 100mb |L2.1658| " - - "L2.1659[1686849962539875180,1686850363539648401] 1686936871.55s 100mb |L2.1659| " - - "L2.1660[1686850363539648402,1686850403000000000] 1686936871.55s 10mb |L2.1660| " - - "L2.1683[1686850403000000001,1686850839595749708] 1686936871.55s 100mb |L2.1683| " - - "L2.1684[1686850839595749709,1686851276191499415] 1686936871.55s 100mb |L2.1684| " - - "L2.1685[1686851276191499416,1686851529568101662] 1686936871.55s 58mb |L2.1685| " + - "L2.1637[1686845399754314023,1686845828734675041] 1686936871.55s 100mb |L2.1637| " + - "L2.1638[1686845828734675042,1686846257715036059] 1686936871.55s 100mb |L2.1638| " + - "L2.1639[1686846257715036060,1686846333021885171] 1686936871.55s 18mb |L2.1639| " + - "L2.1640[1686846333021885172,1686846691134013454] 1686936871.55s 100mb |L2.1640| " + - "L2.1641[1686846691134013455,1686847049246141736] 1686936871.55s 100mb |L2.1641| " + - "L2.1642[1686847049246141737,1686847215718005883] 1686936871.55s 46mb |L2.1642| " + - "L2.1643[1686847215718005884,1686847702600239065] 1686936871.55s 100mb |L2.1643| " + - "L2.1644[1686847702600239066,1686848189482472246] 1686936871.55s 100mb |L2.1644| " + - "L2.1645[1686848189482472247,1686848513620965616] 1686936871.55s 67mb |L2.1645| " + - "L2.1656[1686848513620965617,1686849040083884614] 1686936871.55s 100mb |L2.1656| " + - "L2.1657[1686849040083884615,1686849561540115022] 1686936871.55s 99mb |L2.1657| " + - "L2.1658[1686849561540115023,1686849962539887485] 1686936871.55s 100mb |L2.1658| " + - "L2.1659[1686849962539887486,1686850363539659947] 1686936871.55s 100mb |L2.1659| " + - "L2.1660[1686850363539659948,1686850403000000000] 1686936871.55s 10mb |L2.1660| " + - "L2.1683[1686850403000000001,1686850839595751321] 1686936871.55s 100mb |L2.1683| " + - "L2.1684[1686850839595751322,1686851276191502641] 1686936871.55s 100mb |L2.1684| " + - "L2.1685[1686851276191502642,1686851529568101662] 1686936871.55s 58mb |L2.1685| " - "L2.1692[1686851529568101663,1686852034790162698] 1686936871.55s 100mb |L2.1692| " - "L2.1693[1686852034790162699,1686852540012223733] 1686936871.55s 100mb |L2.1693| " - "L2.1694[1686852540012223734,1686852699540540531] 1686936871.55s 32mb |L2.1694| " - - "L2.1707[1686852699540540532,1686853178683863953] 1686936871.55s 100mb |L2.1707| " - - "L2.1708[1686853178683863954,1686853657827187374] 1686936871.55s 100mb |L2.1708| " - - "L2.1709[1686853657827187375,1686853937177044706] 1686936871.55s 58mb |L2.1709| " - - "L2.1710[1686853937177044707,1686854376975632961] 1686936871.55s 100mb |L2.1710| " - - "L2.1711[1686854376975632962,1686854816774221215] 1686936871.55s 100mb |L2.1711| " - - "L2.1712[1686854816774221216,1686854951811508537] 1686936871.55s 31mb |L2.1712| " - - "L2.1713[1686854951811508538,1686855334994521682] 1686936871.55s 100mb |L2.1713| " - - "L2.1714[1686855334994521683,1686855718177534826] 1686936871.55s 100mb |L2.1714| " - - "L2.1715[1686855718177534827,1686855814952948639] 1686936871.55s 25mb |L2.1715| " - - "L2.1716[1686855814952948640,1686856242828115252] 1686936871.55s 100mb |L2.1716| " - - "L2.1717[1686856242828115253,1686856670703281864] 1686936871.55s 100mb |L2.1717| " - - "L2.1718[1686856670703281865,1686857061484417402] 1686936871.55s 91mb |L2.1718| " - - "L2.1721[1686857061484417403,1686857533170366744] 1686936871.55s 100mb |L2.1721| " - - "L2.1722[1686857533170366745,1686858004856316085] 1686936871.55s 100mb |L2.1722| " - - "L2.1723[1686858004856316086,1686858110845670602] 1686936871.55s 22mb |L2.1723| " - - "L2.1724[1686858110845670603,1686858549633350154] 1686936871.55s 100mb |L2.1724| " - - "L2.1725[1686858549633350155,1686858988421029705] 1686936871.55s 100mb |L2.1725| " - - "L2.1726[1686858988421029706,1686859260137995384] 1686936871.55s 62mb |L2.1726| " - - "L2.1727[1686859260137995385,1686859794383304379] 1686936871.55s 100mb |L2.1727| " - - "L2.1728[1686859794383304380,1686860328628613373] 1686936871.55s 100mb |L2.1728| " - - "L2.1729[1686860328628613374,1686860846662278383] 1686936871.55s 97mb |L2.1729| " - - "L2.1734[1686860846662278384,1686861228052663290] 1686936871.55s 100mb |L2.1734| " - - "L2.1735[1686861228052663291,1686861609443048196] 1686936871.55s 100mb |L2.1735| " - - "L2.1736[1686861609443048197,1686861812108731550] 1686936871.55s 53mb |L2.1736| " - - "L2.1737[1686861812108731551,1686862208555553207] 1686936871.55s 100mb |L2.1737| " - - "L2.1738[1686862208555553208,1686862605002374863] 1686936871.55s 100mb |L2.1738| " - - "L2.1739[1686862605002374864,1686862618002078989] 1686936871.55s 3mb |L2.1739| " - - "L2.1740[1686862618002078990,1686863086619859827] 1686936871.55s 100mb |L2.1740| " - - "L2.1741[1686863086619859828,1686863555237640664] 1686936871.55s 100mb |L2.1741| " - - "L2.1742[1686863555237640665,1686863699000000000] 1686936871.55s 31mb |L2.1742| " - - "L2.1743[1686863699000000001,1686865135092425370] 1686936871.55s 100mb |L2.1743| " - - "L2.1744[1686865135092425371,1686866571184850739] 1686936871.55s 100mb |L2.1744| " - - "L2.1745[1686866571184850740,1686867839000000000] 1686936871.55s 88mb |L2.1745| " + - "L2.1707[1686852699540540532,1686853178683862218] 1686936871.55s 100mb |L2.1707| " + - "L2.1708[1686853178683862219,1686853657827183904] 1686936871.55s 100mb |L2.1708| " + - "L2.1709[1686853657827183905,1686853937177035656] 1686936871.55s 58mb |L2.1709| " + - "L2.1710[1686853937177035657,1686854376975623947] 1686936871.55s 100mb |L2.1710| " + - "L2.1711[1686854376975623948,1686854816774212237] 1686936871.55s 100mb |L2.1711| " + - "L2.1712[1686854816774212238,1686854951811503765] 1686936871.55s 31mb |L2.1712| " + - "L2.1713[1686854951811503766,1686855334994518642] 1686936871.55s 100mb |L2.1713| " + - "L2.1714[1686855334994518643,1686855718177533518] 1686936871.55s 100mb |L2.1714| " + - "L2.1715[1686855718177533519,1686855814952951422] 1686936871.55s 25mb |L2.1715| " + - "L2.1716[1686855814952951423,1686856242828120943] 1686936871.55s 100mb |L2.1716| " + - "L2.1717[1686856242828120944,1686856670703290463] 1686936871.55s 100mb |L2.1717| " + - "L2.1718[1686856670703290464,1686857061484420497] 1686936871.55s 91mb |L2.1718| " + - "L2.1721[1686857061484420498,1686857533170368523] 1686936871.55s 100mb |L2.1721| " + - "L2.1722[1686857533170368524,1686858004856316548] 1686936871.55s 100mb |L2.1722| " + - "L2.1723[1686858004856316549,1686858110845666271] 1686936871.55s 22mb |L2.1723| " + - "L2.1724[1686858110845666272,1686858549633346816] 1686936871.55s 100mb |L2.1724| " + - "L2.1725[1686858549633346817,1686858988421027360] 1686936871.55s 100mb |L2.1725| " + - "L2.1726[1686858988421027361,1686859260138002023] 1686936871.55s 62mb |L2.1726| " + - "L2.1727[1686859260138002024,1686859794383312214] 1686936871.55s 100mb |L2.1727| " + - "L2.1728[1686859794383312215,1686860328628622404] 1686936871.55s 100mb |L2.1728| " + - "L2.1729[1686860328628622405,1686860846662278383] 1686936871.55s 97mb |L2.1729| " + - "L2.1734[1686860846662278384,1686861228052662060] 1686936871.55s 100mb |L2.1734| " + - "L2.1735[1686861228052662061,1686861609443045736] 1686936871.55s 100mb |L2.1735| " + - "L2.1736[1686861609443045737,1686861812108728437] 1686936871.55s 53mb |L2.1736| " + - "L2.1737[1686861812108728438,1686862208555549766] 1686936871.55s 100mb |L2.1737| " + - "L2.1738[1686862208555549767,1686862605002371094] 1686936871.55s 100mb |L2.1738| " + - "L2.1739[1686862605002371095,1686862618002078989] 1686936871.55s 3mb |L2.1739| " + - "L2.1740[1686862618002078990,1686863086619857890] 1686936871.55s 100mb |L2.1740| " + - "L2.1741[1686863086619857891,1686863555237636790] 1686936871.55s 100mb |L2.1741| " + - "L2.1742[1686863555237636791,1686863699000000000] 1686936871.55s 31mb |L2.1742| " + - "L2.1743[1686863699000000001,1686865135092420619] 1686936871.55s 100mb |L2.1743| " + - "L2.1744[1686865135092420620,1686866571184841237] 1686936871.55s 100mb |L2.1744| " + - "L2.1745[1686866571184841238,1686867839000000000] 1686936871.55s 88mb |L2.1745| " - "L2.1746[1686867839000000001,1686868223000000000] 1686936871.55s 30mb |L2.1746| " - "L2.1747[1686868223000000001,1686868319000000000] 1686936871.55s 8mb |L2.1747| " - - "L2.1782[1686868319000000001,1686869127171208393] 1686936871.55s 100mb |L2.1782| " - - "L2.1783[1686869127171208394,1686869935342416785] 1686936871.55s 100mb |L2.1783| " - - "L2.1784[1686869935342416786,1686870115756756731] 1686936871.55s 22mb |L2.1784| " + - "L2.1782[1686868319000000001,1686869127171211860] 1686936871.55s 100mb |L2.1782| " + - "L2.1783[1686869127171211861,1686869935342423719] 1686936871.55s 100mb |L2.1783| " + - "L2.1784[1686869935342423720,1686870115756756731] 1686936871.55s 22mb |L2.1784| " - "L2.1785[1686870115756756732,1686870608246428569] 1686936871.55s 100mb |L2.1785| " - "L2.1786[1686870608246428570,1686871100736100406] 1686936871.55s 100mb |L2.1786|" - "L2.1787[1686871100736100407,1686871497752641904] 1686936871.55s 81mb |L2.1787|" @@ -7389,7 +7389,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 0ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 0ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 0ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 0ns 9mb |-L0.?-| " - "**** Simulation run 292, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.2[1,2000] 1ns |------------------------------------------L0.2------------------------------------------|" @@ -7397,7 +7397,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 1ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 1ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 1ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 1ns 9mb |-L0.?-| " - "**** Simulation run 293, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.3[1,2000] 2ns |------------------------------------------L0.3------------------------------------------|" @@ -7405,7 +7405,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 2ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 2ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 2ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 2ns 9mb |-L0.?-| " - "**** Simulation run 294, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.4[1,2000] 3ns |------------------------------------------L0.4------------------------------------------|" @@ -7413,7 +7413,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 3ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 3ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 3ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 3ns 9mb |-L0.?-| " - "**** Simulation run 295, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.5[1,2000] 4ns |------------------------------------------L0.5------------------------------------------|" @@ -7421,7 +7421,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 4ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 4ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 4ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 4ns 9mb |-L0.?-| " - "**** Simulation run 296, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.6[1,2000] 5ns |------------------------------------------L0.6------------------------------------------|" @@ -7429,7 +7429,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 5ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 5ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 5ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 5ns 9mb |-L0.?-| " - "**** Simulation run 297, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.7[1,2000] 6ns |------------------------------------------L0.7------------------------------------------|" @@ -7437,7 +7437,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 6ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 6ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 6ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 6ns 9mb |-L0.?-| " - "**** Simulation run 298, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.8[1,2000] 7ns |------------------------------------------L0.8------------------------------------------|" @@ -7445,7 +7445,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 7ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 7ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 7ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 7ns 9mb |-L0.?-| " - "**** Simulation run 299, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.9[1,2000] 8ns |------------------------------------------L0.9------------------------------------------|" @@ -7453,7 +7453,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 8ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 8ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 8ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 8ns 9mb |-L0.?-| " - "**** Simulation run 300, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.10[1,2000] 9ns |-----------------------------------------L0.10------------------------------------------|" @@ -7461,7 +7461,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 9ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 9ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 9ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 9ns 9mb |-L0.?-| " - "**** Simulation run 301, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.11[1,2000] 10ns |-----------------------------------------L0.11------------------------------------------|" @@ -7469,7 +7469,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 10ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 10ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 10ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 10ns 9mb |-L0.?-| " - "**** Simulation run 302, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.12[1,2000] 11ns |-----------------------------------------L0.12------------------------------------------|" @@ -7477,7 +7477,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 11ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 11ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 11ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 11ns 9mb |-L0.?-| " - "**** Simulation run 303, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.13[1,2000] 12ns |-----------------------------------------L0.13------------------------------------------|" @@ -7485,7 +7485,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 12ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 12ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 12ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 12ns 9mb |-L0.?-| " - "**** Simulation run 304, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.14[1,2000] 13ns |-----------------------------------------L0.14------------------------------------------|" @@ -7493,7 +7493,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 13ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 13ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 13ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 13ns 9mb |-L0.?-| " - "**** Simulation run 305, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.15[1,2000] 14ns |-----------------------------------------L0.15------------------------------------------|" @@ -7501,7 +7501,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 14ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 14ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 14ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 14ns 9mb |-L0.?-| " - "**** Simulation run 306, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.16[1,2000] 15ns |-----------------------------------------L0.16------------------------------------------|" @@ -7509,7 +7509,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 15ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 15ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 15ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 15ns 9mb |-L0.?-| " - "**** Simulation run 307, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.17[1,2000] 16ns |-----------------------------------------L0.17------------------------------------------|" @@ -7517,7 +7517,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 16ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 16ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 16ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 16ns 9mb |-L0.?-| " - "**** Simulation run 308, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.18[1,2000] 17ns |-----------------------------------------L0.18------------------------------------------|" @@ -7525,7 +7525,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 17ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 17ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 17ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 17ns 9mb |-L0.?-| " - "**** Simulation run 309, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.19[1,2000] 18ns |-----------------------------------------L0.19------------------------------------------|" @@ -7533,7 +7533,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 18ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 18ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 18ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 18ns 9mb |-L0.?-| " - "**** Simulation run 310, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811]). 1 Input Files, 100mb total:" - "L0, all files 100mb " - "L0.20[1,2000] 19ns |-----------------------------------------L0.20------------------------------------------|" @@ -7541,7 +7541,7 @@ async fn stuck_l0_large_l0s() { - "L0 " - "L0.?[1,906] 19ns 45mb |-----------------L0.?-----------------| " - "L0.?[907,1811] 19ns 45mb |-----------------L0.?-----------------| " - - "L0.?[1812,2000] 19ns 10mb |-L0.?-| " + - "L0.?[1812,2000] 19ns 9mb |-L0.?-| " - "**** Simulation run 311, type=split(HighL0OverlapTotalBacklog)(split_times=[906, 1811, 2716, 3621, 4526, 5431, 6336, 7241, 8146, 9051, 9956, 10861]). 1 Input Files, 12b total:" - "L0, all files 12b " - "L0.2183[20,11776] 153ns |----------------------------------------L0.2183----------------------------------------| " @@ -8450,8 +8450,8 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 20 files: L0.2293, L0.2306, L0.2319, L0.2332, L0.2345, L0.2358, L0.2371, L0.2384, L0.2397, L0.2410, L0.2423, L0.2436, L0.2449, L0.2462, L0.2475, L0.2488, L0.2501, L0.2514, L0.2527, L0.2540" - " Creating 1 files" - - "**** Simulation run 360, type=compact(ManySmallFiles). 20 Input Files, 190mb total:" - - "L0, all files 10mb " + - "**** Simulation run 360, type=compact(ManySmallFiles). 20 Input Files, 189mb total:" + - "L0, all files 9mb " - "L0.2234[1812,2000] 0ns |----------------------------------------L0.2234-----------------------------------------|" - "L0.2237[1812,2000] 1ns |----------------------------------------L0.2237-----------------------------------------|" - "L0.2240[1812,2000] 2ns |----------------------------------------L0.2240-----------------------------------------|" @@ -8472,8 +8472,8 @@ async fn stuck_l0_large_l0s() { - "L0.2285[1812,2000] 17ns |----------------------------------------L0.2285-----------------------------------------|" - "L0.2288[1812,2000] 18ns |----------------------------------------L0.2288-----------------------------------------|" - "L0.2291[1812,2000] 19ns |----------------------------------------L0.2291-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 190mb total:" - - "L0, all files 190mb " + - "**** 1 Output Files (parquet_file_id not yet assigned), 189mb total:" + - "L0, all files 189mb " - "L0.?[1812,2000] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 20 files: L0.2234, L0.2237, L0.2240, L0.2243, L0.2246, L0.2249, L0.2252, L0.2255, L0.2258, L0.2261, L0.2264, L0.2267, L0.2270, L0.2273, L0.2276, L0.2279, L0.2282, L0.2285, L0.2288, L0.2291" @@ -9459,14 +9459,14 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 20 files: L0.2553, L0.2566, L0.2579, L0.2592, L0.2605, L0.2618, L0.2631, L0.2644, L0.2657, L0.2670, L0.2683, L0.2696, L0.2709, L0.2722, L0.2735, L0.2748, L0.2761, L0.2774, L0.2787, L0.2904" - " Creating 1 files" - - "**** Simulation run 408, type=compact(ManySmallFiles). 4 Input Files, 190mb total:" + - "**** Simulation run 408, type=compact(ManySmallFiles). 4 Input Files, 189mb total:" - "L0 " - - "L0.2905[1812,2000] 19ns 190mb|----L0.2905-----| " + - "L0.2905[1812,2000] 19ns 189mb|----L0.2905-----| " - "L0.2906[1812,2716] 172ns 0b|----------------------------------------L0.2906-----------------------------------------|" - "L0.2907[1812,2716] 192ns 0b|----------------------------------------L0.2907-----------------------------------------|" - "L0.2908[1812,2716] 199ns 0b|----------------------------------------L0.2908-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 190mb total:" - - "L0, all files 190mb " + - "**** 1 Output Files (parquet_file_id not yet assigned), 189mb total:" + - "L0, all files 189mb " - "L0.?[1812,2716] 199ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 4 files: L0.2905, L0.2906, L0.2907, L0.2908" @@ -9926,7 +9926,7 @@ async fn stuck_l0_large_l0s() { - "L0.2233[907,1811] 0ns |----------------------------------------L0.2233-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 0ns 6mb |---L0.?---| " + - "L0.?[907,1036] 0ns 7mb |---L0.?---| " - "L0.?[1037,1165] 0ns 6mb |---L0.?---| " - "L0.?[1166,1294] 0ns 6mb |---L0.?---| " - "L0.?[1295,1423] 0ns 6mb |---L0.?---| " @@ -9938,7 +9938,7 @@ async fn stuck_l0_large_l0s() { - "L0.2236[907,1811] 1ns |----------------------------------------L0.2236-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 1ns 6mb |---L0.?---| " + - "L0.?[907,1036] 1ns 7mb |---L0.?---| " - "L0.?[1037,1165] 1ns 6mb |---L0.?---| " - "L0.?[1166,1294] 1ns 6mb |---L0.?---| " - "L0.?[1295,1423] 1ns 6mb |---L0.?---| " @@ -9950,7 +9950,7 @@ async fn stuck_l0_large_l0s() { - "L0.2239[907,1811] 2ns |----------------------------------------L0.2239-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 2ns 6mb |---L0.?---| " + - "L0.?[907,1036] 2ns 7mb |---L0.?---| " - "L0.?[1037,1165] 2ns 6mb |---L0.?---| " - "L0.?[1166,1294] 2ns 6mb |---L0.?---| " - "L0.?[1295,1423] 2ns 6mb |---L0.?---| " @@ -9962,7 +9962,7 @@ async fn stuck_l0_large_l0s() { - "L0.2242[907,1811] 3ns |----------------------------------------L0.2242-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 3ns 6mb |---L0.?---| " + - "L0.?[907,1036] 3ns 7mb |---L0.?---| " - "L0.?[1037,1165] 3ns 6mb |---L0.?---| " - "L0.?[1166,1294] 3ns 6mb |---L0.?---| " - "L0.?[1295,1423] 3ns 6mb |---L0.?---| " @@ -9974,7 +9974,7 @@ async fn stuck_l0_large_l0s() { - "L0.2245[907,1811] 4ns |----------------------------------------L0.2245-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 4ns 6mb |---L0.?---| " + - "L0.?[907,1036] 4ns 7mb |---L0.?---| " - "L0.?[1037,1165] 4ns 6mb |---L0.?---| " - "L0.?[1166,1294] 4ns 6mb |---L0.?---| " - "L0.?[1295,1423] 4ns 6mb |---L0.?---| " @@ -9986,7 +9986,7 @@ async fn stuck_l0_large_l0s() { - "L0.2248[907,1811] 5ns |----------------------------------------L0.2248-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 5ns 6mb |---L0.?---| " + - "L0.?[907,1036] 5ns 7mb |---L0.?---| " - "L0.?[1037,1165] 5ns 6mb |---L0.?---| " - "L0.?[1166,1294] 5ns 6mb |---L0.?---| " - "L0.?[1295,1423] 5ns 6mb |---L0.?---| " @@ -9998,7 +9998,7 @@ async fn stuck_l0_large_l0s() { - "L0.2251[907,1811] 6ns |----------------------------------------L0.2251-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 6ns 6mb |---L0.?---| " + - "L0.?[907,1036] 6ns 7mb |---L0.?---| " - "L0.?[1037,1165] 6ns 6mb |---L0.?---| " - "L0.?[1166,1294] 6ns 6mb |---L0.?---| " - "L0.?[1295,1423] 6ns 6mb |---L0.?---| " @@ -10010,7 +10010,7 @@ async fn stuck_l0_large_l0s() { - "L0.2254[907,1811] 7ns |----------------------------------------L0.2254-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 7ns 6mb |---L0.?---| " + - "L0.?[907,1036] 7ns 7mb |---L0.?---| " - "L0.?[1037,1165] 7ns 6mb |---L0.?---| " - "L0.?[1166,1294] 7ns 6mb |---L0.?---| " - "L0.?[1295,1423] 7ns 6mb |---L0.?---| " @@ -10022,7 +10022,7 @@ async fn stuck_l0_large_l0s() { - "L0.2257[907,1811] 8ns |----------------------------------------L0.2257-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 8ns 6mb |---L0.?---| " + - "L0.?[907,1036] 8ns 7mb |---L0.?---| " - "L0.?[1037,1165] 8ns 6mb |---L0.?---| " - "L0.?[1166,1294] 8ns 6mb |---L0.?---| " - "L0.?[1295,1423] 8ns 6mb |---L0.?---| " @@ -10034,7 +10034,7 @@ async fn stuck_l0_large_l0s() { - "L0.2260[907,1811] 9ns |----------------------------------------L0.2260-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 9ns 6mb |---L0.?---| " + - "L0.?[907,1036] 9ns 7mb |---L0.?---| " - "L0.?[1037,1165] 9ns 6mb |---L0.?---| " - "L0.?[1166,1294] 9ns 6mb |---L0.?---| " - "L0.?[1295,1423] 9ns 6mb |---L0.?---| " @@ -10046,7 +10046,7 @@ async fn stuck_l0_large_l0s() { - "L0.2263[907,1811] 10ns |----------------------------------------L0.2263-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 10ns 6mb |---L0.?---| " + - "L0.?[907,1036] 10ns 7mb |---L0.?---| " - "L0.?[1037,1165] 10ns 6mb |---L0.?---| " - "L0.?[1166,1294] 10ns 6mb |---L0.?---| " - "L0.?[1295,1423] 10ns 6mb |---L0.?---| " @@ -10061,7 +10061,7 @@ async fn stuck_l0_large_l0s() { - "L0.2266[907,1811] 11ns |----------------------------------------L0.2266-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 11ns 6mb |---L0.?---| " + - "L0.?[907,1036] 11ns 7mb |---L0.?---| " - "L0.?[1037,1165] 11ns 6mb |---L0.?---| " - "L0.?[1166,1294] 11ns 6mb |---L0.?---| " - "L0.?[1295,1423] 11ns 6mb |---L0.?---| " @@ -10073,7 +10073,7 @@ async fn stuck_l0_large_l0s() { - "L0.2269[907,1811] 12ns |----------------------------------------L0.2269-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 12ns 6mb |---L0.?---| " + - "L0.?[907,1036] 12ns 7mb |---L0.?---| " - "L0.?[1037,1165] 12ns 6mb |---L0.?---| " - "L0.?[1166,1294] 12ns 6mb |---L0.?---| " - "L0.?[1295,1423] 12ns 6mb |---L0.?---| " @@ -10085,7 +10085,7 @@ async fn stuck_l0_large_l0s() { - "L0.2272[907,1811] 13ns |----------------------------------------L0.2272-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 13ns 6mb |---L0.?---| " + - "L0.?[907,1036] 13ns 7mb |---L0.?---| " - "L0.?[1037,1165] 13ns 6mb |---L0.?---| " - "L0.?[1166,1294] 13ns 6mb |---L0.?---| " - "L0.?[1295,1423] 13ns 6mb |---L0.?---| " @@ -10097,7 +10097,7 @@ async fn stuck_l0_large_l0s() { - "L0.2275[907,1811] 14ns |----------------------------------------L0.2275-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 14ns 6mb |---L0.?---| " + - "L0.?[907,1036] 14ns 7mb |---L0.?---| " - "L0.?[1037,1165] 14ns 6mb |---L0.?---| " - "L0.?[1166,1294] 14ns 6mb |---L0.?---| " - "L0.?[1295,1423] 14ns 6mb |---L0.?---| " @@ -10109,7 +10109,7 @@ async fn stuck_l0_large_l0s() { - "L0.2278[907,1811] 15ns |----------------------------------------L0.2278-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 15ns 6mb |---L0.?---| " + - "L0.?[907,1036] 15ns 7mb |---L0.?---| " - "L0.?[1037,1165] 15ns 6mb |---L0.?---| " - "L0.?[1166,1294] 15ns 6mb |---L0.?---| " - "L0.?[1295,1423] 15ns 6mb |---L0.?---| " @@ -10121,7 +10121,7 @@ async fn stuck_l0_large_l0s() { - "L0.2281[907,1811] 16ns |----------------------------------------L0.2281-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 16ns 6mb |---L0.?---| " + - "L0.?[907,1036] 16ns 7mb |---L0.?---| " - "L0.?[1037,1165] 16ns 6mb |---L0.?---| " - "L0.?[1166,1294] 16ns 6mb |---L0.?---| " - "L0.?[1295,1423] 16ns 6mb |---L0.?---| " @@ -10133,7 +10133,7 @@ async fn stuck_l0_large_l0s() { - "L0.2284[907,1811] 17ns |----------------------------------------L0.2284-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 17ns 6mb |---L0.?---| " + - "L0.?[907,1036] 17ns 7mb |---L0.?---| " - "L0.?[1037,1165] 17ns 6mb |---L0.?---| " - "L0.?[1166,1294] 17ns 6mb |---L0.?---| " - "L0.?[1295,1423] 17ns 6mb |---L0.?---| " @@ -10145,7 +10145,7 @@ async fn stuck_l0_large_l0s() { - "L0.2287[907,1811] 18ns |----------------------------------------L0.2287-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 18ns 6mb |---L0.?---| " + - "L0.?[907,1036] 18ns 7mb |---L0.?---| " - "L0.?[1037,1165] 18ns 6mb |---L0.?---| " - "L0.?[1166,1294] 18ns 6mb |---L0.?---| " - "L0.?[1295,1423] 18ns 6mb |---L0.?---| " @@ -10157,7 +10157,7 @@ async fn stuck_l0_large_l0s() { - "L0.2290[907,1811] 19ns |----------------------------------------L0.2290-----------------------------------------|" - "**** 7 Output Files (parquet_file_id not yet assigned), 45mb total:" - "L0 " - - "L0.?[907,1036] 19ns 6mb |---L0.?---| " + - "L0.?[907,1036] 19ns 7mb |---L0.?---| " - "L0.?[1037,1165] 19ns 6mb |---L0.?---| " - "L0.?[1166,1294] 19ns 6mb |---L0.?---| " - "L0.?[1295,1423] 19ns 6mb |---L0.?---| " @@ -10275,7 +10275,7 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 18 files: L0.2266, L0.2269, L0.2272, L0.2275, L0.2278, L0.2281, L0.2284, L0.2287, L0.2290, L0.2800, L0.2813, L0.2826, L0.2839, L0.2852, L0.2865, L0.2878, L0.2891, L0.2952" - " Creating 126 files" - - "**** Simulation run 477, type=compact(ManySmallFiles). 20 Input Files, 129mb total:" + - "**** Simulation run 477, type=compact(ManySmallFiles). 20 Input Files, 130mb total:" - "L0, all files 6mb " - "L0.2964[1,130] 0ns |----------------------------------------L0.2964-----------------------------------------|" - "L0.2971[1,130] 1ns |----------------------------------------L0.2971-----------------------------------------|" @@ -10297,13 +10297,13 @@ async fn stuck_l0_large_l0s() { - "L0.3083[1,130] 17ns |----------------------------------------L0.3083-----------------------------------------|" - "L0.3090[1,130] 18ns |----------------------------------------L0.3090-----------------------------------------|" - "L0.3097[1,130] 19ns |----------------------------------------L0.3097-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 129mb total:" - - "L0, all files 129mb " + - "**** 1 Output Files (parquet_file_id not yet assigned), 130mb total:" + - "L0, all files 130mb " - "L0.?[1,130] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 20 files: L0.2964, L0.2971, L0.2978, L0.2985, L0.2992, L0.2999, L0.3006, L0.3013, L0.3020, L0.3027, L0.3034, L0.3041, L0.3048, L0.3055, L0.3062, L0.3069, L0.3076, L0.3083, L0.3090, L0.3097" - " Creating 1 files" - - "**** Simulation run 478, type=compact(ManySmallFiles). 20 Input Files, 128mb total:" + - "**** Simulation run 478, type=compact(ManySmallFiles). 20 Input Files, 129mb total:" - "L0, all files 6mb " - "L0.2965[131,259] 0ns |----------------------------------------L0.2965-----------------------------------------|" - "L0.2972[131,259] 1ns |----------------------------------------L0.2972-----------------------------------------|" @@ -10325,8 +10325,8 @@ async fn stuck_l0_large_l0s() { - "L0.3084[131,259] 17ns |----------------------------------------L0.3084-----------------------------------------|" - "L0.3091[131,259] 18ns |----------------------------------------L0.3091-----------------------------------------|" - "L0.3098[131,259] 19ns |----------------------------------------L0.3098-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 128mb total:" - - "L0, all files 128mb " + - "**** 1 Output Files (parquet_file_id not yet assigned), 129mb total:" + - "L0, all files 129mb " - "L0.?[131,259] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 20 files: L0.2965, L0.2972, L0.2979, L0.2986, L0.2993, L0.3000, L0.3007, L0.3014, L0.3021, L0.3028, L0.3035, L0.3042, L0.3049, L0.3056, L0.3063, L0.3070, L0.3077, L0.3084, L0.3091, L0.3098" @@ -10348,7 +10348,7 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 9 files: L0.3105, L0.3111, L0.3117, L0.3123, L0.3129, L0.3135, L0.3141, L0.3147, L0.3153" - " Creating 1 files" - - "**** Simulation run 480, type=compact(ManySmallFiles). 20 Input Files, 128mb total:" + - "**** Simulation run 480, type=compact(ManySmallFiles). 20 Input Files, 129mb total:" - "L0, all files 6mb " - "L0.2966[260,388] 0ns |----------------------------------------L0.2966-----------------------------------------|" - "L0.2973[260,388] 1ns |----------------------------------------L0.2973-----------------------------------------|" @@ -10370,8 +10370,8 @@ async fn stuck_l0_large_l0s() { - "L0.3085[260,388] 17ns |----------------------------------------L0.3085-----------------------------------------|" - "L0.3092[260,388] 18ns |----------------------------------------L0.3092-----------------------------------------|" - "L0.3099[260,388] 19ns |----------------------------------------L0.3099-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 128mb total:" - - "L0, all files 128mb " + - "**** 1 Output Files (parquet_file_id not yet assigned), 129mb total:" + - "L0, all files 129mb " - "L0.?[260,388] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 20 files: L0.2966, L0.2973, L0.2980, L0.2987, L0.2994, L0.3001, L0.3008, L0.3015, L0.3022, L0.3029, L0.3036, L0.3043, L0.3050, L0.3057, L0.3064, L0.3071, L0.3078, L0.3085, L0.3092, L0.3099" @@ -10393,7 +10393,7 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 9 files: L0.3106, L0.3112, L0.3118, L0.3124, L0.3130, L0.3136, L0.3142, L0.3148, L0.3154" - " Creating 1 files" - - "**** Simulation run 482, type=compact(ManySmallFiles). 20 Input Files, 128mb total:" + - "**** Simulation run 482, type=compact(ManySmallFiles). 20 Input Files, 129mb total:" - "L0, all files 6mb " - "L0.2967[389,517] 0ns |----------------------------------------L0.2967-----------------------------------------|" - "L0.2974[389,517] 1ns |----------------------------------------L0.2974-----------------------------------------|" @@ -10415,8 +10415,8 @@ async fn stuck_l0_large_l0s() { - "L0.3086[389,517] 17ns |----------------------------------------L0.3086-----------------------------------------|" - "L0.3093[389,517] 18ns |----------------------------------------L0.3093-----------------------------------------|" - "L0.3100[389,517] 19ns |----------------------------------------L0.3100-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 128mb total:" - - "L0, all files 128mb " + - "**** 1 Output Files (parquet_file_id not yet assigned), 129mb total:" + - "L0, all files 129mb " - "L0.?[389,517] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 20 files: L0.2967, L0.2974, L0.2981, L0.2988, L0.2995, L0.3002, L0.3009, L0.3016, L0.3023, L0.3030, L0.3037, L0.3044, L0.3051, L0.3058, L0.3065, L0.3072, L0.3079, L0.3086, L0.3093, L0.3100" @@ -10438,7 +10438,7 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 9 files: L0.3107, L0.3113, L0.3119, L0.3125, L0.3131, L0.3137, L0.3143, L0.3149, L0.3155" - " Creating 1 files" - - "**** Simulation run 484, type=compact(ManySmallFiles). 20 Input Files, 128mb total:" + - "**** Simulation run 484, type=compact(ManySmallFiles). 20 Input Files, 129mb total:" - "L0, all files 6mb " - "L0.2968[518,646] 0ns |----------------------------------------L0.2968-----------------------------------------|" - "L0.2975[518,646] 1ns |----------------------------------------L0.2975-----------------------------------------|" @@ -10460,8 +10460,8 @@ async fn stuck_l0_large_l0s() { - "L0.3087[518,646] 17ns |----------------------------------------L0.3087-----------------------------------------|" - "L0.3094[518,646] 18ns |----------------------------------------L0.3094-----------------------------------------|" - "L0.3101[518,646] 19ns |----------------------------------------L0.3101-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 128mb total:" - - "L0, all files 128mb " + - "**** 1 Output Files (parquet_file_id not yet assigned), 129mb total:" + - "L0, all files 129mb " - "L0.?[518,646] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 20 files: L0.2968, L0.2975, L0.2982, L0.2989, L0.2996, L0.3003, L0.3010, L0.3017, L0.3024, L0.3031, L0.3038, L0.3045, L0.3052, L0.3059, L0.3066, L0.3073, L0.3080, L0.3087, L0.3094, L0.3101" @@ -10483,7 +10483,7 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 9 files: L0.3108, L0.3114, L0.3120, L0.3126, L0.3132, L0.3138, L0.3144, L0.3150, L0.3156" - " Creating 1 files" - - "**** Simulation run 486, type=compact(ManySmallFiles). 20 Input Files, 128mb total:" + - "**** Simulation run 486, type=compact(ManySmallFiles). 20 Input Files, 129mb total:" - "L0, all files 6mb " - "L0.2969[647,775] 0ns |----------------------------------------L0.2969-----------------------------------------|" - "L0.2976[647,775] 1ns |----------------------------------------L0.2976-----------------------------------------|" @@ -10505,8 +10505,8 @@ async fn stuck_l0_large_l0s() { - "L0.3088[647,775] 17ns |----------------------------------------L0.3088-----------------------------------------|" - "L0.3095[647,775] 18ns |----------------------------------------L0.3095-----------------------------------------|" - "L0.3102[647,775] 19ns |----------------------------------------L0.3102-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 128mb total:" - - "L0, all files 128mb " + - "**** 1 Output Files (parquet_file_id not yet assigned), 129mb total:" + - "L0, all files 129mb " - "L0.?[647,775] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 20 files: L0.2969, L0.2976, L0.2983, L0.2990, L0.2997, L0.3004, L0.3011, L0.3018, L0.3025, L0.3032, L0.3039, L0.3046, L0.3053, L0.3060, L0.3067, L0.3074, L0.3081, L0.3088, L0.3095, L0.3102" @@ -10528,7 +10528,7 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 9 files: L0.3109, L0.3115, L0.3121, L0.3127, L0.3133, L0.3139, L0.3145, L0.3151, L0.3157" - " Creating 1 files" - - "**** Simulation run 488, type=compact(ManySmallFiles). 20 Input Files, 136mb total:" + - "**** Simulation run 488, type=compact(ManySmallFiles). 20 Input Files, 131mb total:" - "L0, all files 7mb " - "L0.2970[776,906] 0ns |----------------------------------------L0.2970-----------------------------------------|" - "L0.2977[776,906] 1ns |----------------------------------------L0.2977-----------------------------------------|" @@ -10550,14 +10550,14 @@ async fn stuck_l0_large_l0s() { - "L0.3089[776,906] 17ns |----------------------------------------L0.3089-----------------------------------------|" - "L0.3096[776,906] 18ns |----------------------------------------L0.3096-----------------------------------------|" - "L0.3103[776,906] 19ns |----------------------------------------L0.3103-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 136mb total:" - - "L0, all files 136mb " + - "**** 1 Output Files (parquet_file_id not yet assigned), 131mb total:" + - "L0, all files 131mb " - "L0.?[776,906] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 20 files: L0.2970, L0.2977, L0.2984, L0.2991, L0.2998, L0.3005, L0.3012, L0.3019, L0.3026, L0.3033, L0.3040, L0.3047, L0.3054, L0.3061, L0.3068, L0.3075, L0.3082, L0.3089, L0.3096, L0.3103" - " Creating 1 files" - - "**** Simulation run 489, type=compact(ManySmallFiles). 20 Input Files, 129mb total:" - - "L0, all files 6mb " + - "**** Simulation run 489, type=compact(ManySmallFiles). 20 Input Files, 130mb total:" + - "L0, all files 7mb " - "L0.3159[907,1036] 0ns |----------------------------------------L0.3159-----------------------------------------|" - "L0.3166[907,1036] 1ns |----------------------------------------L0.3166-----------------------------------------|" - "L0.3173[907,1036] 2ns |----------------------------------------L0.3173-----------------------------------------|" @@ -10578,8 +10578,8 @@ async fn stuck_l0_large_l0s() { - "L0.3278[907,1036] 17ns |----------------------------------------L0.3278-----------------------------------------|" - "L0.3285[907,1036] 18ns |----------------------------------------L0.3285-----------------------------------------|" - "L0.3292[907,1036] 19ns |----------------------------------------L0.3292-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 129mb total:" - - "L0, all files 129mb " + - "**** 1 Output Files (parquet_file_id not yet assigned), 130mb total:" + - "L0, all files 130mb " - "L0.?[907,1036] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 20 files: L0.3159, L0.3166, L0.3173, L0.3180, L0.3187, L0.3194, L0.3201, L0.3208, L0.3215, L0.3222, L0.3229, L0.3236, L0.3243, L0.3250, L0.3257, L0.3264, L0.3271, L0.3278, L0.3285, L0.3292" @@ -10601,7 +10601,7 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 9 files: L0.3299, L0.3306, L0.3313, L0.3320, L0.3327, L0.3334, L0.3341, L0.3348, L0.3355" - " Creating 1 files" - - "**** Simulation run 491, type=compact(ManySmallFiles). 20 Input Files, 128mb total:" + - "**** Simulation run 491, type=compact(ManySmallFiles). 20 Input Files, 129mb total:" - "L0, all files 6mb " - "L0.3160[1037,1165] 0ns |----------------------------------------L0.3160-----------------------------------------|" - "L0.3167[1037,1165] 1ns |----------------------------------------L0.3167-----------------------------------------|" @@ -10623,8 +10623,8 @@ async fn stuck_l0_large_l0s() { - "L0.3279[1037,1165] 17ns |----------------------------------------L0.3279-----------------------------------------|" - "L0.3286[1037,1165] 18ns |----------------------------------------L0.3286-----------------------------------------|" - "L0.3293[1037,1165] 19ns |----------------------------------------L0.3293-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 128mb total:" - - "L0, all files 128mb " + - "**** 1 Output Files (parquet_file_id not yet assigned), 129mb total:" + - "L0, all files 129mb " - "L0.?[1037,1165] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 20 files: L0.3160, L0.3167, L0.3174, L0.3181, L0.3188, L0.3195, L0.3202, L0.3209, L0.3216, L0.3223, L0.3230, L0.3237, L0.3244, L0.3251, L0.3258, L0.3265, L0.3272, L0.3279, L0.3286, L0.3293" @@ -10646,7 +10646,7 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 9 files: L0.3300, L0.3307, L0.3314, L0.3321, L0.3328, L0.3335, L0.3342, L0.3349, L0.3356" - " Creating 1 files" - - "**** Simulation run 493, type=compact(ManySmallFiles). 20 Input Files, 128mb total:" + - "**** Simulation run 493, type=compact(ManySmallFiles). 20 Input Files, 129mb total:" - "L0, all files 6mb " - "L0.3161[1166,1294] 0ns |----------------------------------------L0.3161-----------------------------------------|" - "L0.3168[1166,1294] 1ns |----------------------------------------L0.3168-----------------------------------------|" @@ -10668,8 +10668,8 @@ async fn stuck_l0_large_l0s() { - "L0.3280[1166,1294] 17ns |----------------------------------------L0.3280-----------------------------------------|" - "L0.3287[1166,1294] 18ns |----------------------------------------L0.3287-----------------------------------------|" - "L0.3294[1166,1294] 19ns |----------------------------------------L0.3294-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 128mb total:" - - "L0, all files 128mb " + - "**** 1 Output Files (parquet_file_id not yet assigned), 129mb total:" + - "L0, all files 129mb " - "L0.?[1166,1294] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 20 files: L0.3161, L0.3168, L0.3175, L0.3182, L0.3189, L0.3196, L0.3203, L0.3210, L0.3217, L0.3224, L0.3231, L0.3238, L0.3245, L0.3252, L0.3259, L0.3266, L0.3273, L0.3280, L0.3287, L0.3294" @@ -10691,7 +10691,7 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 9 files: L0.3301, L0.3308, L0.3315, L0.3322, L0.3329, L0.3336, L0.3343, L0.3350, L0.3357" - " Creating 1 files" - - "**** Simulation run 495, type=compact(ManySmallFiles). 20 Input Files, 128mb total:" + - "**** Simulation run 495, type=compact(ManySmallFiles). 20 Input Files, 129mb total:" - "L0, all files 6mb " - "L0.3162[1295,1423] 0ns |----------------------------------------L0.3162-----------------------------------------|" - "L0.3169[1295,1423] 1ns |----------------------------------------L0.3169-----------------------------------------|" @@ -10713,8 +10713,8 @@ async fn stuck_l0_large_l0s() { - "L0.3281[1295,1423] 17ns |----------------------------------------L0.3281-----------------------------------------|" - "L0.3288[1295,1423] 18ns |----------------------------------------L0.3288-----------------------------------------|" - "L0.3295[1295,1423] 19ns |----------------------------------------L0.3295-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 128mb total:" - - "L0, all files 128mb " + - "**** 1 Output Files (parquet_file_id not yet assigned), 129mb total:" + - "L0, all files 129mb " - "L0.?[1295,1423] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 20 files: L0.3162, L0.3169, L0.3176, L0.3183, L0.3190, L0.3197, L0.3204, L0.3211, L0.3218, L0.3225, L0.3232, L0.3239, L0.3246, L0.3253, L0.3260, L0.3267, L0.3274, L0.3281, L0.3288, L0.3295" @@ -10753,7 +10753,7 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 9 files: L0.3110, L0.3116, L0.3122, L0.3128, L0.3134, L0.3140, L0.3146, L0.3152, L0.3158" - " Creating 1 files" - - "**** Simulation run 498, type=compact(ManySmallFiles). 20 Input Files, 128mb total:" + - "**** Simulation run 498, type=compact(ManySmallFiles). 20 Input Files, 129mb total:" - "L0, all files 6mb " - "L0.3163[1424,1552] 0ns |----------------------------------------L0.3163-----------------------------------------|" - "L0.3170[1424,1552] 1ns |----------------------------------------L0.3170-----------------------------------------|" @@ -10775,8 +10775,8 @@ async fn stuck_l0_large_l0s() { - "L0.3282[1424,1552] 17ns |----------------------------------------L0.3282-----------------------------------------|" - "L0.3289[1424,1552] 18ns |----------------------------------------L0.3289-----------------------------------------|" - "L0.3296[1424,1552] 19ns |----------------------------------------L0.3296-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 128mb total:" - - "L0, all files 128mb " + - "**** 1 Output Files (parquet_file_id not yet assigned), 129mb total:" + - "L0, all files 129mb " - "L0.?[1424,1552] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 20 files: L0.3163, L0.3170, L0.3177, L0.3184, L0.3191, L0.3198, L0.3205, L0.3212, L0.3219, L0.3226, L0.3233, L0.3240, L0.3247, L0.3254, L0.3261, L0.3268, L0.3275, L0.3282, L0.3289, L0.3296" @@ -10798,7 +10798,7 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 9 files: L0.3303, L0.3310, L0.3317, L0.3324, L0.3331, L0.3338, L0.3345, L0.3352, L0.3359" - " Creating 1 files" - - "**** Simulation run 500, type=compact(ManySmallFiles). 20 Input Files, 128mb total:" + - "**** Simulation run 500, type=compact(ManySmallFiles). 20 Input Files, 129mb total:" - "L0, all files 6mb " - "L0.3164[1553,1681] 0ns |----------------------------------------L0.3164-----------------------------------------|" - "L0.3171[1553,1681] 1ns |----------------------------------------L0.3171-----------------------------------------|" @@ -10820,8 +10820,8 @@ async fn stuck_l0_large_l0s() { - "L0.3283[1553,1681] 17ns |----------------------------------------L0.3283-----------------------------------------|" - "L0.3290[1553,1681] 18ns |----------------------------------------L0.3290-----------------------------------------|" - "L0.3297[1553,1681] 19ns |----------------------------------------L0.3297-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 128mb total:" - - "L0, all files 128mb " + - "**** 1 Output Files (parquet_file_id not yet assigned), 129mb total:" + - "L0, all files 129mb " - "L0.?[1553,1681] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 20 files: L0.3164, L0.3171, L0.3178, L0.3185, L0.3192, L0.3199, L0.3206, L0.3213, L0.3220, L0.3227, L0.3234, L0.3241, L0.3248, L0.3255, L0.3262, L0.3269, L0.3276, L0.3283, L0.3290, L0.3297" @@ -10843,7 +10843,7 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 9 files: L0.3304, L0.3311, L0.3318, L0.3325, L0.3332, L0.3339, L0.3346, L0.3353, L0.3360" - " Creating 1 files" - - "**** Simulation run 502, type=compact(ManySmallFiles). 20 Input Files, 135mb total:" + - "**** Simulation run 502, type=compact(ManySmallFiles). 20 Input Files, 130mb total:" - "L0, all files 7mb " - "L0.3165[1682,1811] 0ns |----------------------------------------L0.3165-----------------------------------------|" - "L0.3172[1682,1811] 1ns |----------------------------------------L0.3172-----------------------------------------|" @@ -10865,8 +10865,8 @@ async fn stuck_l0_large_l0s() { - "L0.3284[1682,1811] 17ns |----------------------------------------L0.3284-----------------------------------------|" - "L0.3291[1682,1811] 18ns |----------------------------------------L0.3291-----------------------------------------|" - "L0.3298[1682,1811] 19ns |----------------------------------------L0.3298-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 135mb total:" - - "L0, all files 135mb " + - "**** 1 Output Files (parquet_file_id not yet assigned), 130mb total:" + - "L0, all files 130mb " - "L0.?[1682,1811] 19ns |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - " Soft Deleting 20 files: L0.3165, L0.3172, L0.3179, L0.3186, L0.3193, L0.3200, L0.3207, L0.3214, L0.3221, L0.3228, L0.3235, L0.3242, L0.3249, L0.3256, L0.3263, L0.3270, L0.3277, L0.3284, L0.3291, L0.3298" @@ -10888,468 +10888,483 @@ async fn stuck_l0_large_l0s() { - "Committing partition 1:" - " Soft Deleting 9 files: L0.3305, L0.3312, L0.3319, L0.3326, L0.3333, L0.3340, L0.3347, L0.3354, L0.3361" - " Creating 1 files" - - "**** Simulation run 504, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[102, 203]). 2 Input Files, 257mb total:" + - "**** Simulation run 504, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[101, 201]). 2 Input Files, 259mb total:" - "L0 " - - "L0.3362[1,130] 19ns 129mb|------------------L0.3362------------------| " - - "L0.3363[131,259] 19ns 128mb |-----------------L0.3363------------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 257mb total:" + - "L0.3362[1,130] 19ns 130mb|------------------L0.3362------------------| " + - "L0.3363[131,259] 19ns 129mb |-----------------L0.3363------------------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 259mb total:" - "L1 " - - "L1.?[1,102] 19ns 101mb |--------------L1.?---------------| " - - "L1.?[103,203] 19ns 100mb |--------------L1.?--------------| " - - "L1.?[204,259] 19ns 57mb |------L1.?-------| " + - "L1.?[1,101] 19ns 101mb |--------------L1.?--------------| " + - "L1.?[102,201] 19ns 100mb |--------------L1.?--------------| " + - "L1.?[202,259] 19ns 58mb |------L1.?-------| " - "Committing partition 1:" - " Soft Deleting 2 files: L0.3362, L0.3363" - " Creating 3 files" - - "**** Simulation run 505, type=split(ReduceOverlap)(split_times=[102]). 1 Input Files, 0b total:" + - "**** Simulation run 505, type=split(ReduceOverlap)(split_times=[101]). 1 Input Files, 0b total:" - "L0, all files 0b " - "L0.3104[20,130] 191ns |----------------------------------------L0.3104----------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[20,102] 191ns |------------------------------L0.?-------------------------------| " - - "L0.?[103,130] 191ns |--------L0.?--------| " - - "**** Simulation run 506, type=split(ReduceOverlap)(split_times=[203]). 1 Input Files, 0b total:" + - "L0.?[20,101] 191ns |------------------------------L0.?------------------------------| " + - "L0.?[102,130] 191ns |--------L0.?--------| " + - "**** Simulation run 506, type=split(ReduceOverlap)(split_times=[201]). 1 Input Files, 0b total:" - "L0, all files 0b " - "L0.3364[131,259] 199ns |----------------------------------------L0.3364-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[131,203] 199ns |----------------------L0.?----------------------| " - - "L0.?[204,259] 199ns |----------------L0.?----------------| " + - "L0.?[131,201] 199ns |---------------------L0.?----------------------| " + - "L0.?[202,259] 199ns |-----------------L0.?-----------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L0.3104, L0.3364" - " Creating 4 files" - - "**** Simulation run 507, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[361, 462]). 2 Input Files, 256mb total:" - - "L0, all files 128mb " + - "**** Simulation run 507, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[360, 460]). 2 Input Files, 258mb total:" + - "L0, all files 129mb " - "L0.3365[260,388] 19ns |-----------------L0.3365------------------| " - "L0.3367[389,517] 19ns |-----------------L0.3367------------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 256mb total:" + - "**** 3 Output Files (parquet_file_id not yet assigned), 258mb total:" - "L1 " - - "L1.?[260,361] 19ns 101mb |--------------L1.?---------------| " - - "L1.?[362,462] 19ns 100mb |--------------L1.?---------------| " - - "L1.?[463,517] 19ns 56mb |------L1.?------| " + - "L1.?[260,360] 19ns 101mb |--------------L1.?---------------| " + - "L1.?[361,460] 19ns 100mb |--------------L1.?--------------| " + - "L1.?[461,517] 19ns 57mb |------L1.?-------| " - "Committing partition 1:" - " Soft Deleting 2 files: L0.3365, L0.3367" - " Creating 3 files" - - "**** Simulation run 508, type=split(ReduceOverlap)(split_times=[361]). 1 Input Files, 0b total:" + - "**** Simulation run 508, type=split(ReduceOverlap)(split_times=[360]). 1 Input Files, 0b total:" - "L0, all files 0b " - "L0.3366[260,388] 199ns |----------------------------------------L0.3366-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[260,361] 199ns |--------------------------------L0.?---------------------------------| " - - "L0.?[362,388] 199ns |------L0.?------| " - - "**** Simulation run 509, type=split(ReduceOverlap)(split_times=[462]). 1 Input Files, 0b total:" + - "L0.?[260,360] 199ns |--------------------------------L0.?--------------------------------| " + - "L0.?[361,388] 199ns |------L0.?------| " + - "**** Simulation run 509, type=split(ReduceOverlap)(split_times=[460]). 1 Input Files, 0b total:" - "L0, all files 0b " - "L0.3368[389,517] 199ns |----------------------------------------L0.3368-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[389,462] 199ns |----------------------L0.?-----------------------| " - - "L0.?[463,517] 199ns |---------------L0.?----------------| " + - "L0.?[389,460] 199ns |---------------------L0.?----------------------| " + - "L0.?[461,517] 199ns |----------------L0.?-----------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L0.3366, L0.3368" - " Creating 4 files" - - "**** Simulation run 510, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[619, 720]). 2 Input Files, 256mb total:" - - "L0, all files 128mb " + - "**** Simulation run 510, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[618, 718]). 2 Input Files, 258mb total:" + - "L0, all files 129mb " - "L0.3369[518,646] 19ns |-----------------L0.3369------------------| " - "L0.3371[647,775] 19ns |-----------------L0.3371------------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 256mb total:" + - "**** 3 Output Files (parquet_file_id not yet assigned), 258mb total:" - "L1 " - - "L1.?[518,619] 19ns 101mb |--------------L1.?---------------| " - - "L1.?[620,720] 19ns 100mb |--------------L1.?---------------| " - - "L1.?[721,775] 19ns 56mb |------L1.?------| " + - "L1.?[518,618] 19ns 101mb |--------------L1.?---------------| " + - "L1.?[619,718] 19ns 100mb |--------------L1.?--------------| " + - "L1.?[719,775] 19ns 57mb |------L1.?-------| " - "Committing partition 1:" - " Soft Deleting 2 files: L0.3369, L0.3371" - " Creating 3 files" - - "**** Simulation run 511, type=split(ReduceOverlap)(split_times=[619]). 1 Input Files, 0b total:" + - "**** Simulation run 511, type=split(ReduceOverlap)(split_times=[618]). 1 Input Files, 0b total:" - "L0, all files 0b " - "L0.3370[518,646] 199ns |----------------------------------------L0.3370-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[518,619] 199ns |--------------------------------L0.?---------------------------------| " - - "L0.?[620,646] 199ns |------L0.?------| " - - "**** Simulation run 512, type=split(ReduceOverlap)(split_times=[720]). 1 Input Files, 0b total:" + - "L0.?[518,618] 199ns |--------------------------------L0.?--------------------------------| " + - "L0.?[619,646] 199ns |------L0.?------| " + - "**** Simulation run 512, type=split(ReduceOverlap)(split_times=[718]). 1 Input Files, 0b total:" - "L0, all files 0b " - "L0.3372[647,775] 199ns |----------------------------------------L0.3372-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[647,720] 199ns |----------------------L0.?-----------------------| " - - "L0.?[721,775] 199ns |---------------L0.?----------------| " + - "L0.?[647,718] 199ns |---------------------L0.?----------------------| " + - "L0.?[719,775] 199ns |----------------L0.?-----------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L0.3370, L0.3372" - " Creating 4 files" - - "**** Simulation run 513, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[875, 974]). 2 Input Files, 265mb total:" + - "**** Simulation run 513, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[876, 976]). 2 Input Files, 261mb total:" - "L0 " - - "L0.3381[776,906] 19ns 136mb|------------------L0.3381------------------| " - - "L0.3373[907,1036] 19ns 129mb |-----------------L0.3373------------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 265mb total:" + - "L0.3381[776,906] 19ns 131mb|------------------L0.3381------------------| " + - "L0.3373[907,1036] 19ns 130mb |-----------------L0.3373------------------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 261mb total:" - "L1 " - - "L1.?[776,875] 19ns 101mb |--------------L1.?--------------| " - - "L1.?[876,974] 19ns 100mb |-------------L1.?--------------| " - - "L1.?[975,1036] 19ns 64mb |-------L1.?--------| " + - "L1.?[776,876] 19ns 101mb |--------------L1.?--------------| " + - "L1.?[877,976] 19ns 100mb |--------------L1.?--------------| " + - "L1.?[977,1036] 19ns 60mb |-------L1.?-------| " - "Committing partition 1:" - " Soft Deleting 2 files: L0.3373, L0.3381" - " Creating 3 files" - - "**** Simulation run 514, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[204, 305]). 3 Input Files, 257mb total:" + - "**** Simulation run 514, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[202, 302]). 3 Input Files, 259mb total:" - "L1 " - - "L1.3390[103,203] 19ns 100mb|------------L1.3390-------------| " - - "L1.3391[204,259] 19ns 57mb |-----L1.3391-----| " - - "L1.3396[260,361] 19ns 101mb |-------------L1.3396-------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 257mb total:" + - "L1.3390[102,201] 19ns 100mb|------------L1.3390-------------| " + - "L1.3391[202,259] 19ns 58mb |-----L1.3391-----| " + - "L1.3396[260,360] 19ns 101mb |------------L1.3396-------------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 259mb total:" - "L2 " - - "L2.?[103,204] 19ns 101mb |--------------L2.?---------------| " - - "L2.?[205,305] 19ns 100mb |--------------L2.?--------------| " - - "L2.?[306,361] 19ns 57mb |------L2.?-------| " + - "L2.?[102,202] 19ns 101mb |--------------L2.?--------------| " + - "L2.?[203,302] 19ns 100mb |--------------L2.?--------------| " + - "L2.?[303,360] 19ns 58mb |------L2.?-------| " - "Committing partition 1:" - " Soft Deleting 3 files: L1.3390, L1.3391, L1.3396" - " Upgrading 1 files level to CompactionLevel::L2: L1.3389" - " Creating 3 files" - - "**** Simulation run 515, type=split(ReduceOverlap)(split_times=[875]). 1 Input Files, 0b total:" + - "**** Simulation run 515, type=split(ReduceOverlap)(split_times=[876]). 1 Input Files, 0b total:" - "L0, all files 0b " - "L0.3382[776,906] 199ns |----------------------------------------L0.3382-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[776,875] 199ns |-------------------------------L0.?-------------------------------| " - - "L0.?[876,906] 199ns |-------L0.?-------| " - - "**** Simulation run 516, type=split(ReduceOverlap)(split_times=[974]). 1 Input Files, 0b total:" + - "L0.?[776,876] 199ns |-------------------------------L0.?--------------------------------| " + - "L0.?[877,906] 199ns |-------L0.?-------| " + - "**** Simulation run 516, type=split(ReduceOverlap)(split_times=[976]). 1 Input Files, 0b total:" - "L0, all files 0b " - "L0.3374[907,1036] 199ns |----------------------------------------L0.3374-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[907,974] 199ns |--------------------L0.?--------------------| " - - "L0.?[975,1036] 199ns |------------------L0.?------------------| " + - "L0.?[907,976] 199ns |---------------------L0.?---------------------| " + - "L0.?[977,1036] 199ns |-----------------L0.?------------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L0.3374, L0.3382" - " Creating 4 files" - - "**** Simulation run 517, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1138, 1239]). 2 Input Files, 256mb total:" - - "L0, all files 128mb " + - "**** Simulation run 517, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1137, 1237]). 2 Input Files, 258mb total:" + - "L0, all files 129mb " - "L0.3375[1037,1165] 19ns |-----------------L0.3375------------------| " - "L0.3377[1166,1294] 19ns |-----------------L0.3377------------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 256mb total:" + - "**** 3 Output Files (parquet_file_id not yet assigned), 258mb total:" - "L1 " - - "L1.?[1037,1138] 19ns 101mb|--------------L1.?---------------| " - - "L1.?[1139,1239] 19ns 100mb |--------------L1.?---------------| " - - "L1.?[1240,1294] 19ns 56mb |------L1.?------| " + - "L1.?[1037,1137] 19ns 101mb|--------------L1.?---------------| " + - "L1.?[1138,1237] 19ns 100mb |--------------L1.?--------------| " + - "L1.?[1238,1294] 19ns 57mb |------L1.?-------| " - "Committing partition 1:" - " Soft Deleting 2 files: L0.3375, L0.3377" - " Creating 3 files" - - "**** Simulation run 518, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[463, 564]). 3 Input Files, 256mb total:" + - "**** Simulation run 518, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[461, 561]). 3 Input Files, 258mb total:" - "L1 " - - "L1.3397[362,462] 19ns 100mb|-------------L1.3397-------------| " - - "L1.3398[463,517] 19ns 56mb |----L1.3398-----| " - - "L1.3403[518,619] 19ns 101mb |-------------L1.3403-------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 256mb total:" + - "L1.3397[361,460] 19ns 100mb|------------L1.3397-------------| " + - "L1.3398[461,517] 19ns 57mb |-----L1.3398-----| " + - "L1.3403[518,618] 19ns 101mb |-------------L1.3403-------------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 258mb total:" - "L2 " - - "L2.?[362,463] 19ns 101mb |--------------L2.?---------------| " - - "L2.?[464,564] 19ns 100mb |--------------L2.?---------------| " - - "L2.?[565,619] 19ns 56mb |------L2.?------| " + - "L2.?[361,461] 19ns 101mb |--------------L2.?---------------| " + - "L2.?[462,561] 19ns 100mb |--------------L2.?--------------| " + - "L2.?[562,618] 19ns 57mb |------L2.?-------| " - "Committing partition 1:" - " Soft Deleting 3 files: L1.3397, L1.3398, L1.3403" - " Creating 3 files" - - "**** Simulation run 519, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[170, 320, 470, 620, 770, 920, 1070, 1220, 1370, 1520, 1670]). 20 Input Files, 1.17gb total:" - - "L0 " - - "L0.3379[1295,1423] 19ns 128mb |L0.3379| " - - "L0.3383[1424,1552] 19ns 128mb |L0.3383| " - - "L0.3385[1553,1681] 19ns 128mb |L0.3385| " - - "L0.3387[1682,1811] 19ns 135mb |L0.3387|" - - "L0.3392[20,102] 191ns 0b |L0.3392| " - - "L0.3393[103,130] 191ns 0b |L0.3393| " - - "L0.3394[131,203] 199ns 0b |L0.3394| " - - "L0.3395[204,259] 199ns 0b |L0.3395| " - - "L0.3399[260,361] 199ns 0b |L0.3399| " - - "L0.3400[362,388] 199ns 0b |L0.3400| " - - "L0.3401[389,462] 199ns 0b |L0.3401| " - - "L0.3402[463,517] 199ns 0b |L0.3402| " + - "**** Simulation run 519, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[171, 322, 473, 624, 775, 926, 1077, 1228, 1379, 1530, 1681]). 20 Input Files, 1.17gb total:" + - "L0 " + - "L0.3379[1295,1423] 19ns 129mb |L0.3379| " + - "L0.3383[1424,1552] 19ns 129mb |L0.3383| " + - "L0.3385[1553,1681] 19ns 129mb |L0.3385| " + - "L0.3387[1682,1811] 19ns 130mb |L0.3387|" + - "L0.3392[20,101] 191ns 0b |L0.3392| " + - "L0.3393[102,130] 191ns 0b |L0.3393| " + - "L0.3394[131,201] 199ns 0b |L0.3394| " + - "L0.3395[202,259] 199ns 0b |L0.3395| " + - "L0.3399[260,360] 199ns 0b |L0.3399| " + - "L0.3400[361,388] 199ns 0b |L0.3400| " + - "L0.3401[389,460] 199ns 0b |L0.3401| " + - "L0.3402[461,517] 199ns 0b |L0.3402| " - "L1 " - - "L1.3404[620,720] 19ns 100mb |L1.3404| " - - "L1.3405[721,775] 19ns 56mb |L1.3405| " - - "L1.3410[776,875] 19ns 101mb |L1.3410| " - - "L1.3411[876,974] 19ns 100mb |L1.3411| " - - "L1.3412[975,1036] 19ns 64mb |L1.3412| " - - "L1.3420[1037,1138] 19ns 101mb |L1.3420| " - - "L1.3421[1139,1239] 19ns 100mb |L1.3421| " - - "L1.3422[1240,1294] 19ns 56mb |L1.3422| " + - "L1.3404[619,718] 19ns 100mb |L1.3404| " + - "L1.3405[719,775] 19ns 57mb |L1.3405| " + - "L1.3410[776,876] 19ns 101mb |L1.3410| " + - "L1.3411[877,976] 19ns 100mb |L1.3411| " + - "L1.3412[977,1036] 19ns 60mb |L1.3412| " + - "L1.3420[1037,1137] 19ns 101mb |L1.3420| " + - "L1.3421[1138,1237] 19ns 100mb |L1.3421| " + - "L1.3422[1238,1294] 19ns 57mb |L1.3422| " - "**** 12 Output Files (parquet_file_id not yet assigned), 1.17gb total:" - "L1 " - - "L1.?[20,170] 199ns 100mb |L1.?-| " - - "L1.?[171,320] 199ns 99mb |L1.?-| " - - "L1.?[321,470] 199ns 99mb |L1.?-| " - - "L1.?[471,620] 199ns 99mb |L1.?-| " - - "L1.?[621,770] 199ns 99mb |L1.?-| " - - "L1.?[771,920] 199ns 99mb |L1.?-| " - - "L1.?[921,1070] 199ns 99mb |L1.?-| " - - "L1.?[1071,1220] 199ns 99mb |L1.?-| " - - "L1.?[1221,1370] 199ns 99mb |L1.?-| " - - "L1.?[1371,1520] 199ns 99mb |L1.?-| " - - "L1.?[1521,1670] 199ns 99mb |L1.?-| " - - "L1.?[1671,1811] 199ns 101mb |L1.?-| " + - "L1.?[20,171] 199ns 101mb |L1.?-| " + - "L1.?[172,322] 199ns 101mb |L1.?-| " + - "L1.?[323,473] 199ns 101mb |L1.?-| " + - "L1.?[474,624] 199ns 101mb |L1.?-| " + - "L1.?[625,775] 199ns 101mb |L1.?-| " + - "L1.?[776,926] 199ns 101mb |L1.?-| " + - "L1.?[927,1077] 199ns 101mb |L1.?-| " + - "L1.?[1078,1228] 199ns 101mb |L1.?-| " + - "L1.?[1229,1379] 199ns 101mb |L1.?-| " + - "L1.?[1380,1530] 199ns 101mb |L1.?-| " + - "L1.?[1531,1681] 199ns 101mb |L1.?-| " + - "L1.?[1682,1811] 199ns 87mb |L1.?| " - "Committing partition 1:" - " Soft Deleting 20 files: L0.3379, L0.3383, L0.3385, L0.3387, L0.3392, L0.3393, L0.3394, L0.3395, L0.3399, L0.3400, L0.3401, L0.3402, L1.3404, L1.3405, L1.3410, L1.3411, L1.3412, L1.3420, L1.3421, L1.3422" - " Creating 12 files" - - "**** Simulation run 520, type=split(ReduceOverlap)(split_times=[564, 619]). 1 Input Files, 99mb total:" - - "L1, all files 99mb " - - "L1.3429[471,620] 199ns |----------------------------------------L1.3429-----------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 99mb total:" + - "**** Simulation run 520, type=split(ReduceOverlap)(split_times=[561, 618]). 1 Input Files, 101mb total:" + - "L1, all files 101mb " + - "L1.3429[474,624] 199ns |----------------------------------------L1.3429-----------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 101mb total:" - "L1 " - - "L1.?[471,564] 199ns 62mb |-------------------------L1.?-------------------------| " - - "L1.?[565,619] 199ns 36mb |-------------L1.?-------------| " - - "L1.?[620,620] 199ns 1mb |L1.?|" - - "**** Simulation run 521, type=split(ReduceOverlap)(split_times=[361, 463]). 1 Input Files, 99mb total:" - - "L1, all files 99mb " - - "L1.3428[321,470] 199ns |----------------------------------------L1.3428-----------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 99mb total:" + - "L1.?[474,561] 199ns 59mb |-----------------------L1.?-----------------------| " + - "L1.?[562,618] 199ns 38mb |-------------L1.?--------------| " + - "L1.?[619,624] 199ns 4mb |L1.?|" + - "**** Simulation run 521, type=split(ReduceOverlap)(split_times=[360, 461]). 1 Input Files, 101mb total:" + - "L1, all files 101mb " + - "L1.3428[323,473] 199ns |----------------------------------------L1.3428-----------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 101mb total:" - "L1 " - - "L1.?[321,361] 199ns 27mb |---------L1.?---------| " - - "L1.?[362,463] 199ns 67mb |---------------------------L1.?----------------------------| " - - "L1.?[464,470] 199ns 5mb |L1.?|" - - "**** Simulation run 522, type=split(ReduceOverlap)(split_times=[204, 305]). 1 Input Files, 99mb total:" - - "L1, all files 99mb " - - "L1.3427[171,320] 199ns |----------------------------------------L1.3427-----------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 99mb total:" + - "L1.?[323,360] 199ns 25mb |--------L1.?--------| " + - "L1.?[361,461] 199ns 67mb |---------------------------L1.?---------------------------| " + - "L1.?[462,473] 199ns 8mb |L1.?| " + - "**** Simulation run 522, type=split(ReduceOverlap)(split_times=[202, 302]). 1 Input Files, 101mb total:" + - "L1, all files 101mb " + - "L1.3427[172,322] 199ns |----------------------------------------L1.3427-----------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 101mb total:" - "L1 " - - "L1.?[171,204] 199ns 22mb |------L1.?-------| " - - "L1.?[205,305] 199ns 67mb |---------------------------L1.?---------------------------| " - - "L1.?[306,320] 199ns 11mb |-L1.?-| " - - "**** Simulation run 523, type=split(ReduceOverlap)(split_times=[102]). 1 Input Files, 100mb total:" - - "L1, all files 100mb " - - "L1.3426[20,170] 199ns |----------------------------------------L1.3426-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:" + - "L1.?[172,202] 199ns 21mb |------L1.?------| " + - "L1.?[203,302] 199ns 67mb |--------------------------L1.?---------------------------| " + - "L1.?[303,322] 199ns 13mb |--L1.?---| " + - "**** Simulation run 523, type=split(ReduceOverlap)(split_times=[101]). 1 Input Files, 101mb total:" + - "L1, all files 101mb " + - "L1.3426[20,171] 199ns |----------------------------------------L1.3426-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 101mb total:" - "L1 " - - "L1.?[20,102] 199ns 55mb |---------------------L1.?----------------------| " - - "L1.?[103,170] 199ns 45mb |-----------------L1.?-----------------| " + - "L1.?[20,101] 199ns 55mb |---------------------L1.?---------------------| " + - "L1.?[102,171] 199ns 47mb |-----------------L1.?------------------| " - "Committing partition 1:" - " Soft Deleting 4 files: L1.3426, L1.3427, L1.3428, L1.3429" - - " Upgrading 1 files level to CompactionLevel::L2: L1.3437" - " Creating 11 files" - - "**** Simulation run 524, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[66]). 2 Input Files, 155mb total:" + - "**** Simulation run 524, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[66]). 2 Input Files, 156mb total:" - "L1 " - - "L1.3447[20,102] 199ns 55mb |--------------------------------L1.3447--------------------------------| " + - "L1.3447[20,101] 199ns 55mb |-------------------------------L1.3447--------------------------------| " - "L2 " - - "L2.3389[1,102] 19ns 101mb|----------------------------------------L2.3389-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 155mb total:" + - "L2.3389[1,101] 19ns 101mb|----------------------------------------L2.3389-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 156mb total:" - "L2 " - - "L2.?[1,66] 199ns 100mb |-------------------------L2.?--------------------------| " - - "L2.?[67,102] 199ns 55mb |------------L2.?-------------| " + - "L2.?[1,66] 199ns 102mb |--------------------------L2.?--------------------------| " + - "L2.?[67,101] 199ns 54mb |------------L2.?------------| " - "Committing partition 1:" - " Soft Deleting 2 files: L2.3389, L1.3447" - " Creating 2 files" - - "**** Simulation run 525, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[164]). 3 Input Files, 168mb total:" + - "**** Simulation run 525, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[162]). 3 Input Files, 168mb total:" - "L1 " - - "L1.3448[103,170] 199ns 45mb|-------------------------L1.3448-------------------------| " - - "L1.3444[171,204] 199ns 22mb |----------L1.3444----------| " + - "L1.3448[102,171] 199ns 47mb|--------------------------L1.3448---------------------------| " + - "L1.3444[172,202] 199ns 21mb |---------L1.3444---------|" - "L2 " - - "L2.3413[103,204] 19ns 101mb|----------------------------------------L2.3413-----------------------------------------|" + - "L2.3413[102,202] 19ns 101mb|----------------------------------------L2.3413-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 168mb total:" - "L2 " - - "L2.?[103,164] 199ns 102mb|------------------------L2.?------------------------| " - - "L2.?[165,204] 199ns 67mb |--------------L2.?--------------| " + - "L2.?[102,162] 199ns 102mb|------------------------L2.?------------------------| " + - "L2.?[163,202] 199ns 67mb |--------------L2.?---------------| " - "Committing partition 1:" - " Soft Deleting 3 files: L2.3413, L1.3444, L1.3448" - " Creating 2 files" - - "**** Simulation run 526, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[265, 325]). 5 Input Files, 261mb total:" + - "**** Simulation run 526, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[263, 323]). 5 Input Files, 263mb total:" - "L1 " - - "L1.3445[205,305] 199ns 67mb|------------------------L1.3445------------------------| " - - "L1.3446[306,320] 199ns 11mb |L1.3446| " - - "L1.3441[321,361] 199ns 27mb |-------L1.3441-------| " + - "L1.3445[203,302] 199ns 67mb|-----------------------L1.3445------------------------| " + - "L1.3446[303,322] 199ns 13mb |L1.3446-| " + - "L1.3441[323,360] 199ns 25mb |------L1.3441------| " - "L2 " - - "L2.3414[205,305] 19ns 100mb|------------------------L2.3414------------------------| " - - "L2.3415[306,361] 19ns 57mb |-----------L2.3415-----------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 261mb total:" + - "L2.3414[203,302] 19ns 100mb|-----------------------L2.3414------------------------| " + - "L2.3415[303,360] 19ns 58mb |-----------L2.3415------------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 263mb total:" - "L2 " - - "L2.?[205,265] 199ns 100mb|--------------L2.?--------------| " - - "L2.?[266,325] 199ns 99mb |--------------L2.?--------------| " - - "L2.?[326,361] 199ns 62mb |-------L2.?-------| " + - "L2.?[203,263] 199ns 102mb|--------------L2.?--------------| " + - "L2.?[264,323] 199ns 100mb |-------------L2.?--------------| " + - "L2.?[324,360] 199ns 62mb |-------L2.?-------| " - "Committing partition 1:" - " Soft Deleting 5 files: L2.3414, L2.3415, L1.3441, L1.3445, L1.3446" - " Creating 3 files" - - "**** Simulation run 527, type=split(HighL0OverlapTotalBacklog)(split_times=[832]). 1 Input Files, 99mb total:" - - "L1, all files 99mb " - - "L1.3431[771,920] 199ns |----------------------------------------L1.3431-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 99mb total:" + - "**** Simulation run 527, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[421]). 2 Input Files, 168mb total:" - "L1 " - - "L1.?[771,832] 199ns 41mb |---------------L1.?---------------| " - - "L1.?[833,920] 199ns 59mb |-----------------------L1.?-----------------------| " - - "**** Simulation run 528, type=split(HighL0OverlapTotalBacklog)(split_times=[832]). 1 Input Files, 0b total:" + - "L1.3442[361,461] 199ns 67mb|----------------------------------------L1.3442-----------------------------------------|" + - "L2 " + - "L2.3423[361,461] 19ns 101mb|----------------------------------------L2.3423-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 168mb total:" + - "L2 " + - "L2.?[361,421] 199ns 102mb|------------------------L2.?------------------------| " + - "L2.?[422,461] 199ns 67mb |--------------L2.?---------------| " + - "Committing partition 1:" + - " Soft Deleting 2 files: L2.3423, L1.3442" + - " Creating 2 files" + - "**** Simulation run 528, type=split(HighL0OverlapTotalBacklog)(split_times=[957]). 1 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3416[776,875] 199ns |----------------------------------------L0.3416----------------------------------------| " + - "L0.3418[907,976] 199ns |----------------------------------------L0.3418-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[776,832] 199ns |----------------------L0.?----------------------| " - - "L0.?[833,875] 199ns |----------------L0.?----------------| " - - "**** Simulation run 529, type=split(HighL0OverlapTotalBacklog)(split_times=[1044]). 1 Input Files, 99mb total:" - - "L1, all files 99mb " - - "L1.3432[921,1070] 199ns |----------------------------------------L1.3432-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 99mb total:" + - "L0.?[907,957] 199ns |-----------------------------L0.?------------------------------| " + - "L0.?[958,976] 199ns |--------L0.?---------| " + - "**** Simulation run 529, type=split(HighL0OverlapTotalBacklog)(split_times=[957]). 1 Input Files, 101mb total:" + - "L1, all files 101mb " + - "L1.3432[927,1077] 199ns |----------------------------------------L1.3432-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 101mb total:" - "L1 " - - "L1.?[921,1044] 199ns 82mb|----------------------------------L1.?----------------------------------| " - - "L1.?[1045,1070] 199ns 17mb |----L1.?-----| " - - "**** Simulation run 530, type=split(HighL0OverlapTotalBacklog)(split_times=[1044]). 1 Input Files, 0b total:" + - "L1.?[927,957] 199ns 21mb |------L1.?------| " + - "L1.?[958,1077] 199ns 80mb |--------------------------------L1.?---------------------------------| " + - "**** Simulation run 530, type=split(HighL0OverlapTotalBacklog)(split_times=[1138]). 1 Input Files, 0b total:" - "L0, all files 0b " - "L0.3376[1037,1165] 199ns |----------------------------------------L0.3376-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1037,1044] 199ns |L0.?| " - - "L0.?[1045,1165] 199ns |---------------------------------------L0.?---------------------------------------| " - - "**** Simulation run 531, type=split(HighL0OverlapTotalBacklog)(split_times=[1256]). 1 Input Files, 0b total:" + - "L0.?[1037,1138] 199ns |--------------------------------L0.?---------------------------------| " + - "L0.?[1139,1165] 199ns |------L0.?------| " + - "**** Simulation run 531, type=split(HighL0OverlapTotalBacklog)(split_times=[1138]). 1 Input Files, 101mb total:" + - "L1, all files 101mb " + - "L1.3433[1078,1228] 199ns |----------------------------------------L1.3433-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 101mb total:" + - "L1 " + - "L1.?[1078,1138] 199ns 41mb|---------------L1.?---------------| " + - "L1.?[1139,1228] 199ns 60mb |-----------------------L1.?------------------------| " + - "**** Simulation run 532, type=split(HighL0OverlapTotalBacklog)(split_times=[1319]). 1 Input Files, 101mb total:" + - "L1, all files 101mb " + - "L1.3434[1229,1379] 199ns |----------------------------------------L1.3434-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 101mb total:" + - "L1 " + - "L1.?[1229,1319] 199ns 61mb|------------------------L1.?------------------------| " + - "L1.?[1320,1379] 199ns 40mb |--------------L1.?---------------| " + - "**** Simulation run 533, type=split(HighL0OverlapTotalBacklog)(split_times=[1319]). 1 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3378[1166,1294] 199ns |----------------------------------------L0.3378-----------------------------------------|" + - "L0.3380[1295,1423] 199ns |----------------------------------------L0.3380-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1166,1256] 199ns |----------------------------L0.?-----------------------------| " - - "L0.?[1257,1294] 199ns |----------L0.?----------| " - - "**** Simulation run 532, type=split(HighL0OverlapTotalBacklog)(split_times=[1256]). 1 Input Files, 99mb total:" - - "L1, all files 99mb " - - "L1.3434[1221,1370] 199ns |----------------------------------------L1.3434-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 99mb total:" - - "L1 " - - "L1.?[1221,1256] 199ns 23mb|-------L1.?--------| " - - "L1.?[1257,1370] 199ns 76mb |-------------------------------L1.?-------------------------------| " - - "**** Simulation run 533, type=split(HighL0OverlapTotalBacklog)(split_times=[1468]). 1 Input Files, 99mb total:" - - "L1, all files 99mb " - - "L1.3435[1371,1520] 199ns |----------------------------------------L1.3435-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 99mb total:" + - "L0.?[1295,1319] 199ns |-----L0.?-----| " + - "L0.?[1320,1423] 199ns |---------------------------------L0.?---------------------------------| " + - "**** Simulation run 534, type=split(HighL0OverlapTotalBacklog)(split_times=[1500]). 1 Input Files, 101mb total:" + - "L1, all files 101mb " + - "L1.3435[1380,1530] 199ns |----------------------------------------L1.3435-----------------------------------------|" + - "**** 2 Output Files (parquet_file_id not yet assigned), 101mb total:" - "L1 " - - "L1.?[1371,1468] 199ns 65mb|--------------------------L1.?--------------------------| " - - "L1.?[1469,1520] 199ns 35mb |------------L1.?------------| " - - "**** Simulation run 534, type=split(HighL0OverlapTotalBacklog)(split_times=[1468]). 1 Input Files, 0b total:" + - "L1.?[1380,1500] 199ns 81mb|---------------------------------L1.?---------------------------------| " + - "L1.?[1501,1530] 199ns 20mb |-----L1.?------| " + - "**** Simulation run 535, type=split(HighL0OverlapTotalBacklog)(split_times=[1500]). 1 Input Files, 0b total:" - "L0, all files 0b " - "L0.3384[1424,1552] 199ns |----------------------------------------L0.3384-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1424,1468] 199ns |------------L0.?------------| " - - "L0.?[1469,1552] 199ns |--------------------------L0.?--------------------------| " + - "L0.?[1424,1500] 199ns |-----------------------L0.?------------------------| " + - "L0.?[1501,1552] 199ns |--------------L0.?---------------| " - "Committing partition 1:" - - " Soft Deleting 8 files: L0.3376, L0.3378, L0.3384, L0.3416, L1.3431, L1.3432, L1.3434, L1.3435" + - " Soft Deleting 8 files: L0.3376, L0.3380, L0.3384, L0.3418, L1.3432, L1.3433, L1.3434, L1.3435" - " Creating 16 files" - - "**** Simulation run 535, type=split(ReduceOverlap)(split_times=[564]). 1 Input Files, 0b total:" - - "L0, all files 0b " - - "L0.3406[518,619] 199ns |----------------------------------------L0.3406-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - - "L0, all files 0b " - - "L0.?[518,564] 199ns |-----------------L0.?-----------------| " - - "L0.?[565,619] 199ns |---------------------L0.?---------------------| " - - "**** Simulation run 536, type=split(ReduceOverlap)(split_times=[620]). 1 Input Files, 0b total:" + - "**** Simulation run 536, type=split(ReduceOverlap)(split_times=[561]). 1 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3407[620,646] 199ns |----------------------------------------L0.3407-----------------------------------------|" + - "L0.3406[518,618] 199ns |----------------------------------------L0.3406-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[620,620] 199ns |L0.?| " - - "L0.?[621,646] 199ns |----------------------------------------L0.?----------------------------------------| " - - "**** Simulation run 537, type=split(ReduceOverlap)(split_times=[770]). 1 Input Files, 0b total:" + - "L0.?[518,561] 199ns |----------------L0.?----------------| " + - "L0.?[562,618] 199ns |----------------------L0.?----------------------| " + - "**** Simulation run 537, type=split(ReduceOverlap)(split_times=[624]). 1 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3409[721,775] 199ns |----------------------------------------L0.3409-----------------------------------------|" + - "L0.3407[619,646] 199ns |----------------------------------------L0.3407-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[721,770] 199ns |-------------------------------------L0.?--------------------------------------| " - - "L0.?[771,775] 199ns |L0.?| " - - "**** Simulation run 538, type=split(ReduceOverlap)(split_times=[920]). 1 Input Files, 0b total:" + - "L0.?[619,624] 199ns |-----L0.?-----| " + - "L0.?[625,646] 199ns |--------------------------------L0.?--------------------------------|" + - "**** Simulation run 538, type=split(ReduceOverlap)(split_times=[926]). 1 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3418[907,974] 199ns |----------------------------------------L0.3418-----------------------------------------|" + - "L0.3458[907,957] 199ns |----------------------------------------L0.3458-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[907,920] 199ns |-----L0.?------| " - - "L0.?[921,974] 199ns |--------------------------------L0.?---------------------------------| " - - "**** Simulation run 539, type=split(ReduceOverlap)(split_times=[1070]). 1 Input Files, 0b total:" + - "L0.?[907,926] 199ns |--------------L0.?--------------| " + - "L0.?[927,957] 199ns |------------------------L0.?------------------------|" + - "**** Simulation run 539, type=split(ReduceOverlap)(split_times=[1077]). 1 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3463[1045,1165] 199ns |----------------------------------------L0.3463-----------------------------------------|" + - "L0.3462[1037,1138] 199ns |----------------------------------------L0.3462-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1045,1070] 199ns |------L0.?------| " - - "L0.?[1071,1165] 199ns |--------------------------------L0.?--------------------------------| " - - "**** Simulation run 540, type=split(ReduceOverlap)(split_times=[1220]). 1 Input Files, 0b total:" + - "L0.?[1037,1077] 199ns |--------------L0.?---------------| " + - "L0.?[1078,1138] 199ns |-----------------------L0.?------------------------| " + - "**** Simulation run 540, type=split(ReduceOverlap)(split_times=[1228]). 1 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3464[1166,1256] 199ns |----------------------------------------L0.3464-----------------------------------------|" + - "L0.3378[1166,1294] 199ns |----------------------------------------L0.3378-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1166,1220] 199ns |------------------------L0.?------------------------| " - - "L0.?[1221,1256] 199ns |--------------L0.?---------------|" - - "**** Simulation run 541, type=split(ReduceOverlap)(split_times=[1370]). 1 Input Files, 0b total:" + - "L0.?[1166,1228] 199ns |------------------L0.?-------------------| " + - "L0.?[1229,1294] 199ns |-------------------L0.?--------------------| " + - "**** Simulation run 541, type=split(ReduceOverlap)(split_times=[1379]). 1 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3380[1295,1423] 199ns |----------------------------------------L0.3380-----------------------------------------|" + - "L0.3469[1320,1423] 199ns |----------------------------------------L0.3469-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1295,1370] 199ns |-----------------------L0.?-----------------------| " - - "L0.?[1371,1423] 199ns |---------------L0.?---------------| " - - "**** Simulation run 542, type=split(ReduceOverlap)(split_times=[1520]). 1 Input Files, 0b total:" + - "L0.?[1320,1379] 199ns |----------------------L0.?-----------------------| " + - "L0.?[1380,1423] 199ns |---------------L0.?----------------| " + - "**** Simulation run 542, type=split(ReduceOverlap)(split_times=[1530]). 1 Input Files, 0b total:" - "L0, all files 0b " - - "L0.3471[1469,1552] 199ns |----------------------------------------L0.3471-----------------------------------------|" + - "L0.3473[1501,1552] 199ns |----------------------------------------L0.3473-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 0b total:" - "L0, all files 0b " - - "L0.?[1469,1520] 199ns |------------------------L0.?-------------------------| " - - "L0.?[1521,1552] 199ns |-------------L0.?--------------| " + - "L0.?[1501,1530] 199ns |----------------------L0.?-----------------------| " + - "L0.?[1531,1552] 199ns |---------------L0.?----------------| " - "Committing partition 1:" - - " Soft Deleting 8 files: L0.3380, L0.3406, L0.3407, L0.3409, L0.3418, L0.3463, L0.3464, L0.3471" - - " Creating 16 files" - - "**** Simulation run 543, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[622, 773]). 17 Input Files, 298mb total:" - - "L0 " - - "L0.3472[518,564] 199ns 0b |L0.3472| " - - "L0.3473[565,619] 199ns 0b |L0.3473-| " - - "L0.3474[620,620] 199ns 0b |L0.3474| " - - "L0.3475[621,646] 199ns 0b |L0.3475| " - - "L0.3408[647,720] 199ns 0b |--L0.3408---| " - - "L0.3476[721,770] 199ns 0b |L0.3476| " - - "L0.3477[771,775] 199ns 0b |L0.3477| " - - "L0.3458[776,832] 199ns 0b |-L0.3458-| " - - "L0.3459[833,875] 199ns 0b |L0.3459| " - - "L0.3417[876,906] 199ns 0b |L0.3417|" - - "L0.3478[907,920] 199ns 0b |L0.3478|" + - " Soft Deleting 7 files: L0.3378, L0.3406, L0.3407, L0.3458, L0.3462, L0.3469, L0.3473" + - " Creating 14 files" + - "**** Simulation run 543, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[624, 774]). 10 Input Files, 201mb total:" + - "L0 " + - "L0.3474[518,561] 199ns 0b |-L0.3474--| " + - "L0.3475[562,618] 199ns 0b |---L0.3475----| " + - "L0.3476[619,624] 199ns 0b |L0.3476| " + - "L0.3477[625,646] 199ns 0b |L0.3477| " + - "L0.3408[647,718] 199ns 0b |------L0.3408------| " + - "L0.3409[719,775] 199ns 0b |---L0.3409----| " + - "L1 " + - "L1.3438[474,561] 199ns 59mb|--------L1.3438---------| " + - "L1.3439[562,618] 199ns 38mb |---L1.3439----| " + - "L1.3440[619,624] 199ns 4mb |L1.3440| " + - "L1.3430[625,775] 199ns 101mb |-----------------L1.3430------------------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 201mb total:" - "L1 " - - "L1.3438[471,564] 199ns 62mb|----L1.3438-----| " - - "L1.3439[565,619] 199ns 36mb |L1.3439-| " - - "L1.3440[620,620] 199ns 1mb |L1.3440| " - - "L1.3430[621,770] 199ns 99mb |----------L1.3430----------| " - - "L1.3456[771,832] 199ns 41mb |-L1.3456--| " - - "L1.3457[833,920] 199ns 59mb |----L1.3457----| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 298mb total:" + - "L1.?[474,624] 199ns 101mb|-------------------L1.?-------------------| " + - "L1.?[625,774] 199ns 100mb |-------------------L1.?-------------------| " + - "L1.?[775,775] 199ns 682kb |L1.?|" + - "Committing partition 1:" + - " Soft Deleting 10 files: L0.3408, L0.3409, L1.3430, L1.3438, L1.3439, L1.3440, L0.3474, L0.3475, L0.3476, L0.3477" + - " Creating 3 files" + - "**** Simulation run 544, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[926, 1076]). 12 Input Files, 242mb total:" + - "L0 " + - "L0.3416[776,876] 199ns 0b|-------L0.3416--------| " + - "L0.3417[877,906] 199ns 0b |L0.3417| " + - "L0.3478[907,926] 199ns 0b |L0.3478| " + - "L0.3479[927,957] 199ns 0b |L0.3479| " + - "L0.3459[958,976] 199ns 0b |L0.3459| " + - "L0.3419[977,1036] 199ns 0b |--L0.3419---| " + - "L0.3480[1037,1077] 199ns 0b |L0.3480| " + - "L0.3481[1078,1138] 199ns 0b |--L0.3481---| " + - "L1 " + - "L1.3431[776,926] 199ns 101mb|--------------L1.3431--------------| " + - "L1.3460[927,957] 199ns 21mb |L1.3460| " + - "L1.3461[958,1077] 199ns 80mb |----------L1.3461----------| " + - "L1.3464[1078,1138] 199ns 41mb |--L1.3464---| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 242mb total:" - "L1 " - - "L1.?[471,622] 199ns 100mb|------------L1.?------------| " - - "L1.?[623,773] 199ns 100mb |------------L1.?------------| " - - "L1.?[774,920] 199ns 98mb |-----------L1.?------------| " + - "L1.?[776,926] 199ns 101mb|---------------L1.?----------------| " + - "L1.?[927,1076] 199ns 100mb |---------------L1.?----------------| " + - "L1.?[1077,1138] 199ns 41mb |----L1.?-----| " - "Committing partition 1:" - - " Soft Deleting 17 files: L0.3408, L0.3417, L1.3430, L1.3438, L1.3439, L1.3440, L1.3456, L1.3457, L0.3458, L0.3459, L0.3472, L0.3473, L0.3474, L0.3475, L0.3476, L0.3477, L0.3478" + - " Soft Deleting 12 files: L0.3416, L0.3417, L0.3419, L1.3431, L0.3459, L1.3460, L1.3461, L1.3464, L0.3478, L0.3479, L0.3480, L0.3481" - " Creating 3 files" - - "**** Simulation run 544, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1072, 1223]). 14 Input Files, 298mb total:" - - "L0 " - - "L0.3479[921,974] 199ns 0b|L0.3479-| " - - "L0.3419[975,1036] 199ns 0b |-L0.3419--| " - - "L0.3462[1037,1044] 199ns 0b |L0.3462| " - - "L0.3480[1045,1070] 199ns 0b |L0.3480| " - - "L0.3481[1071,1165] 199ns 0b |----L0.3481-----| " - - "L0.3482[1166,1220] 199ns 0b |L0.3482-| " - - "L0.3483[1221,1256] 199ns 0b |L0.3483| " - - "L0.3465[1257,1294] 199ns 0b |L0.3465| " - - "L0.3484[1295,1370] 199ns 0b |---L0.3484---| " + - "**** Simulation run 545, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1289, 1439]). 13 Input Files, 261mb total:" + - "L0 " + - "L0.3463[1139,1165] 199ns 0b|L0.3463| " + - "L0.3482[1166,1228] 199ns 0b |--L0.3482---| " + - "L0.3483[1229,1294] 199ns 0b |--L0.3483---| " + - "L0.3468[1295,1319] 199ns 0b |L0.3468| " + - "L0.3484[1320,1379] 199ns 0b |--L0.3484--| " + - "L0.3485[1380,1423] 199ns 0b |L0.3485| " + - "L0.3472[1424,1500] 199ns 0b |----L0.3472----| " + - "L0.3486[1501,1530] 199ns 0b |L0.3486|" - "L1 " - - "L1.3460[921,1044] 199ns 82mb|-------L1.3460--------| " - - "L1.3461[1045,1070] 199ns 17mb |L1.3461| " - - "L1.3433[1071,1220] 199ns 99mb |----------L1.3433----------| " - - "L1.3466[1221,1256] 199ns 23mb |L1.3466| " - - "L1.3467[1257,1370] 199ns 76mb |------L1.3467-------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 298mb total:" + - "L1.3465[1139,1228] 199ns 60mb|-----L1.3465------| " + - "L1.3466[1229,1319] 199ns 61mb |-----L1.3466------| " + - "L1.3467[1320,1379] 199ns 40mb |--L1.3467--| " + - "L1.3470[1380,1500] 199ns 81mb |---------L1.3470---------| " + - "L1.3471[1501,1530] 199ns 20mb |L1.3471|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 261mb total:" - "L1 " - - "L1.?[921,1072] 199ns 100mb|------------L1.?------------| " - - "L1.?[1073,1223] 199ns 100mb |------------L1.?------------| " - - "L1.?[1224,1370] 199ns 98mb |-----------L1.?------------| " + - "L1.?[1139,1289] 199ns 101mb|--------------L1.?--------------| " + - "L1.?[1290,1439] 199ns 100mb |--------------L1.?--------------| " + - "L1.?[1440,1530] 199ns 61mb |-------L1.?-------| " - "Committing partition 1:" - - " Soft Deleting 14 files: L0.3419, L1.3433, L1.3460, L1.3461, L0.3462, L0.3465, L1.3466, L1.3467, L0.3479, L0.3480, L0.3481, L0.3482, L0.3483, L0.3484" + - " Soft Deleting 13 files: L0.3463, L1.3465, L1.3466, L1.3467, L0.3468, L1.3470, L1.3471, L0.3472, L0.3482, L0.3483, L0.3484, L0.3485, L0.3486" - " Creating 3 files" - - "**** Simulation run 545, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1593]). 9 Input Files, 199mb total:" - - "L0 " - - "L0.3485[1371,1423] 199ns 0b|L0.3485-| " - - "L0.3470[1424,1468] 199ns 0b |L0.3470| " - - "L0.3486[1469,1520] 199ns 0b |L0.3486-| " - - "L0.3487[1521,1552] 199ns 0b |L0.3487| " - - "L0.3386[1553,1681] 199ns 0b |--------L0.3386---------| " - - "L0.3388[1682,1811] 199ns 0b |--------L0.3388---------| " + - "**** Simulation run 546, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1681]). 5 Input Files, 187mb total:" + - "L0 " + - "L0.3487[1531,1552] 199ns 0b|L0.3487| " + - "L0.3386[1553,1681] 199ns 0b |----------------L0.3386----------------| " + - "L0.3388[1682,1811] 199ns 0b |----------------L0.3388----------------| " - "L1 " - - "L1.3468[1371,1468] 199ns 65mb|-----L1.3468-----| " - - "L1.3469[1469,1520] 199ns 35mb |L1.3469-| " - - "L1.3436[1521,1670] 199ns 99mb |----------L1.3436-----------| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 199mb total:" + - "L1.3436[1531,1681] 199ns 101mb|-------------------L1.3436--------------------| " + - "L1.3437[1682,1811] 199ns 87mb |----------------L1.3437----------------| " + - "**** 2 Output Files (parquet_file_id not yet assigned), 187mb total:" - "L1 " - - "L1.?[1371,1593] 199ns 100mb|-------------------L1.?--------------------| " - - "L1.?[1594,1811] 199ns 99mb |-------------------L1.?-------------------| " + - "L1.?[1531,1681] 199ns 101mb|---------------------L1.?---------------------| " + - "L1.?[1682,1811] 199ns 87mb |-----------------L1.?------------------| " - "Committing partition 1:" - - " Soft Deleting 9 files: L0.3386, L0.3388, L1.3436, L1.3468, L1.3469, L0.3470, L0.3485, L0.3486, L0.3487" + - " Soft Deleting 5 files: L0.3386, L0.3388, L1.3436, L1.3437, L0.3487" - " Creating 2 files" - - "**** Simulation run 546, type=compact(FoundSubsetLessThanMaxCompactSize). 20 Input Files, 132b total:" + - "**** Simulation run 547, type=compact(FoundSubsetLessThanMaxCompactSize). 20 Input Files, 132b total:" - "L0 " - "L0.2954[2717,3621] 199ns 0b|L0.2954| " - "L0.2955[3622,4526] 199ns 0b|L0.2955| " @@ -11378,7 +11393,7 @@ async fn stuck_l0_large_l0s() { - " Soft Deleting 20 files: L0.2937, L0.2938, L0.2939, L0.2940, L0.2941, L0.2942, L0.2943, L0.2944, L0.2947, L0.2948, L0.2954, L0.2955, L0.2956, L0.2957, L0.2958, L0.2959, L0.2960, L0.2961, L0.2962, L0.2963" - " Upgrading 1 files level to CompactionLevel::L1: L0.2953" - " Creating 1 files" - - "**** Simulation run 547, type=compact(TotalSizeLessThanMaxCompactSize). 15 Input Files, 2kb total:" + - "**** Simulation run 548, type=compact(TotalSizeLessThanMaxCompactSize). 15 Input Files, 2kb total:" - "L0 " - "L0.1310[1836914,1990000] 199ns 160b |L0.1310|" - "L0.1324[1683838,1836913] 199ns 150b |L0.1324| " @@ -11395,126 +11410,119 @@ async fn stuck_l0_large_l0s() { - "L0.2950[141302,153077] 199ns 34b |L0.2950| " - "L0.2949[129527,141301] 199ns 12b |L0.2949| " - "L1 " - - "L1.3496[2717,129526] 199ns 132b|L1.3496| " + - "L1.3499[2717,129526] 199ns 132b|L1.3499| " - "**** 1 Output Files (parquet_file_id not yet assigned), 2kb total:" - "L1, all files 2kb " - "L1.?[2717,1990000] 199ns |------------------------------------------L1.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 15 files: L0.1310, L0.1314, L0.1315, L0.1316, L0.1317, L0.1318, L0.1319, L0.1320, L0.1321, L0.1322, L0.1323, L0.1324, L0.2949, L0.2950, L1.3496" + - " Soft Deleting 15 files: L0.1310, L0.1314, L0.1315, L0.1316, L0.1317, L0.1318, L0.1319, L0.1320, L0.1321, L0.1322, L0.1323, L0.1324, L0.2949, L0.2950, L1.3499" - " Creating 1 files" - - "**** Simulation run 548, type=split(ReduceOverlap)(split_times=[564, 619]). 1 Input Files, 100mb total:" - - "L1, all files 100mb " - - "L1.3488[471,622] 199ns |----------------------------------------L1.3488-----------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:" + - "**** Simulation run 549, type=split(ReduceOverlap)(split_times=[561, 618]). 1 Input Files, 101mb total:" + - "L1, all files 101mb " + - "L1.3488[474,624] 199ns |----------------------------------------L1.3488-----------------------------------------|" + - "**** 3 Output Files (parquet_file_id not yet assigned), 101mb total:" - "L1 " - - "L1.?[471,564] 199ns 62mb |------------------------L1.?-------------------------| " - - "L1.?[565,619] 199ns 36mb |-------------L1.?-------------| " - - "L1.?[620,622] 199ns 3mb |L1.?|" + - "L1.?[474,561] 199ns 59mb |-----------------------L1.?-----------------------| " + - "L1.?[562,618] 199ns 38mb |-------------L1.?--------------| " + - "L1.?[619,624] 199ns 4mb |L1.?|" - "Committing partition 1:" - " Soft Deleting 1 files: L1.3488" - " Creating 3 files" - - "**** Simulation run 549, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[423]). 2 Input Files, 168mb total:" - - "L1 " - - "L1.3442[362,463] 199ns 67mb|----------------------------------------L1.3442-----------------------------------------|" - - "L2 " - - "L2.3423[362,463] 19ns 101mb|----------------------------------------L2.3423-----------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 168mb total:" - - "L2 " - - "L2.?[362,423] 199ns 102mb|------------------------L2.?------------------------| " - - "L2.?[424,463] 199ns 67mb |--------------L2.?--------------| " - - "Committing partition 1:" - - " Soft Deleting 2 files: L2.3423, L1.3442" - - " Creating 2 files" - - "**** Simulation run 550, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[525, 586]). 6 Input Files, 261mb total:" + - "**** Simulation run 550, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[524, 586]). 6 Input Files, 266mb total:" - "L1 " - - "L1.3443[464,470] 199ns 5mb|L1.3443| " - - "L1.3498[471,564] 199ns 62mb |---------------------L1.3498----------------------| " - - "L1.3499[565,619] 199ns 36mb |----------L1.3499-----------| " - - "L1.3500[620,622] 199ns 3mb |L1.3500|" + - "L1.3443[462,473] 199ns 8mb|L1.3443| " + - "L1.3501[474,561] 199ns 59mb |-------------------L1.3501--------------------| " + - "L1.3502[562,618] 199ns 38mb |-----------L1.3502-----------| " + - "L1.3503[619,624] 199ns 4mb |L1.3503|" - "L2 " - - "L2.3424[464,564] 19ns 100mb|-----------------------L2.3424------------------------| " - - "L2.3425[565,619] 19ns 56mb |----------L2.3425-----------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 261mb total:" + - "L2.3424[462,561] 19ns 100mb|-----------------------L2.3424-----------------------| " + - "L2.3425[562,618] 19ns 57mb |-----------L2.3425-----------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 266mb total:" - "L2 " - - "L2.?[464,525] 199ns 101mb|--------------L2.?--------------| " - - "L2.?[526,586] 199ns 99mb |--------------L2.?--------------| " - - "L2.?[587,622] 199ns 61mb |------L2.?-------| " + - "L2.?[462,524] 199ns 103mb|--------------L2.?--------------| " + - "L2.?[525,586] 199ns 101mb |-------------L2.?--------------| " + - "L2.?[587,624] 199ns 62mb |-------L2.?-------| " - "Committing partition 1:" - - " Soft Deleting 6 files: L2.3424, L2.3425, L1.3443, L1.3498, L1.3499, L1.3500" + - " Soft Deleting 6 files: L2.3424, L2.3425, L1.3443, L1.3501, L1.3502, L1.3503" - " Creating 3 files" - - "**** Simulation run 551, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[774, 925]). 3 Input Files, 298mb total:" + - "**** Simulation run 551, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[775, 925]). 3 Input Files, 201mb total:" - "L1 " - - "L1.3489[623,773] 199ns 100mb|----------L1.3489-----------| " - - "L1.3490[774,920] 199ns 98mb |----------L1.3490----------| " - - "L1.3491[921,1072] 199ns 100mb |----------L1.3491-----------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 298mb total:" + - "L1.3489[625,774] 199ns 100mb|-----------------L1.3489------------------| " + - "L1.3490[775,775] 199ns 682kb |L1.3490| " + - "L1.3491[776,926] 199ns 101mb |-----------------L1.3491------------------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 201mb total:" - "L2 " - - "L2.?[623,774] 199ns 100mb|------------L2.?------------| " - - "L2.?[775,925] 199ns 100mb |------------L2.?------------| " - - "L2.?[926,1072] 199ns 98mb |-----------L2.?------------| " + - "L2.?[625,775] 199ns 101mb|-------------------L2.?-------------------| " + - "L2.?[776,925] 199ns 100mb |-------------------L2.?-------------------| " + - "L2.?[926,926] 199ns 682kb |L2.?|" - "Committing partition 1:" - " Soft Deleting 3 files: L1.3489, L1.3490, L1.3491" - " Creating 3 files" - - "**** Simulation run 552, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1248, 1423]). 3 Input Files, 299mb total:" + - "**** Simulation run 552, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1077, 1227]). 3 Input Files, 242mb total:" - "L1 " - - "L1.3492[1073,1223] 199ns 100mb|--------L1.3492--------| " - - "L1.3493[1224,1370] 199ns 98mb |--------L1.3493--------| " - - "L1.3494[1371,1593] 199ns 100mb |--------------L1.3494---------------| " - - "**** 3 Output Files (parquet_file_id not yet assigned), 299mb total:" + - "L1.3492[927,1076] 199ns 100mb|--------------L1.3492--------------| " + - "L1.3493[1077,1138] 199ns 41mb |---L1.3493---| " + - "L1.3494[1139,1289] 199ns 101mb |--------------L1.3494--------------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 242mb total:" - "L2 " - - "L2.?[1073,1248] 199ns 100mb|------------L2.?------------| " - - "L2.?[1249,1423] 199ns 100mb |------------L2.?------------| " - - "L2.?[1424,1593] 199ns 98mb |-----------L2.?------------| " + - "L2.?[927,1077] 199ns 101mb|---------------L2.?----------------| " + - "L2.?[1078,1227] 199ns 100mb |---------------L2.?----------------| " + - "L2.?[1228,1289] 199ns 41mb |----L2.?-----| " - "Committing partition 1:" - " Soft Deleting 3 files: L1.3492, L1.3493, L1.3494" - " Creating 3 files" - - "**** Simulation run 553, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1703]). 2 Input Files, 199mb total:" + - "**** Simulation run 553, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[1440, 1590]). 3 Input Files, 261mb total:" - "L1 " - - "L1.3495[1594,1811] 199ns 99mb|----------------------------------------L1.3495-----------------------------------------|" - - "L2 " - - "L2.3437[1671,1811] 199ns 101mb |------------------------L2.3437-------------------------| " - - "**** 2 Output Files (parquet_file_id not yet assigned), 199mb total:" + - "L1.3495[1290,1439] 199ns 100mb|------------L1.3495-------------| " + - "L1.3496[1440,1530] 199ns 61mb |-----L1.3496------| " + - "L1.3497[1531,1681] 199ns 101mb |------------L1.3497-------------| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 261mb total:" - "L2 " - - "L2.?[1594,1703] 199ns 100mb|-------------------L2.?--------------------| " - - "L2.?[1704,1811] 199ns 99mb |-------------------L2.?-------------------| " + - "L2.?[1290,1440] 199ns 101mb|--------------L2.?--------------| " + - "L2.?[1441,1590] 199ns 100mb |--------------L2.?--------------| " + - "L2.?[1591,1681] 199ns 61mb |-------L2.?-------| " - "Committing partition 1:" - - " Soft Deleting 2 files: L2.3437, L1.3495" - - " Creating 2 files" - - "**** Simulation run 554, type=compact(TotalSizeLessThanMaxCompactSize). 1 Input Files, 2kb total:" - - "L1, all files 2kb " - - "L1.3497[2717,1990000] 199ns|----------------------------------------L1.3497-----------------------------------------|" - - "**** 1 Output Files (parquet_file_id not yet assigned), 2kb total:" - - "L2, all files 2kb " - - "L2.?[2717,1990000] 199ns |------------------------------------------L2.?------------------------------------------|" + - " Soft Deleting 3 files: L1.3495, L1.3496, L1.3497" + - " Creating 3 files" + - "**** Simulation run 554, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[723271, 1444860]). 3 Input Files, 276mb total:" + - "L1 " + - "L1.3500[2717,1990000] 199ns 2kb|----------------------------------------L1.3500----------------------------------------| " + - "L1.3498[1682,1811] 199ns 87mb|L1.3498| " + - "L1.2953[1812,2716] 199ns 189mb|L1.2953| " + - "**** 3 Output Files (parquet_file_id not yet assigned), 276mb total:" + - "L2 " + - "L2.?[1682,723271] 199ns 100mb|-------------L2.?-------------| " + - "L2.?[723272,1444860] 199ns 100mb |-------------L2.?-------------| " + - "L2.?[1444861,1990000] 199ns 76mb |---------L2.?---------| " - "Committing partition 1:" - - " Soft Deleting 1 files: L1.3497" - - " Upgrading 1 files level to CompactionLevel::L2: L1.2953" - - " Creating 1 files" - - "**** Final Output Files (12.21gb written)" + - " Soft Deleting 3 files: L1.2953, L1.3498, L1.3500" + - " Creating 3 files" + - "**** Final Output Files (12.5gb written)" - "L2 " - - "L2.2953[1812,2716] 199ns 190mb|L2.2953| " - - "L2.3449[1,66] 199ns 100mb|L2.3449| " - - "L2.3450[67,102] 199ns 55mb|L2.3450| " - - "L2.3451[103,164] 199ns 102mb|L2.3451| " - - "L2.3452[165,204] 199ns 67mb|L2.3452| " - - "L2.3453[205,265] 199ns 100mb|L2.3453| " - - "L2.3454[266,325] 199ns 99mb|L2.3454| " - - "L2.3455[326,361] 199ns 62mb|L2.3455| " - - "L2.3501[362,423] 199ns 102mb|L2.3501| " - - "L2.3502[424,463] 199ns 67mb|L2.3502| " - - "L2.3503[464,525] 199ns 101mb|L2.3503| " - - "L2.3504[526,586] 199ns 99mb|L2.3504| " - - "L2.3505[587,622] 199ns 61mb|L2.3505| " - - "L2.3506[623,774] 199ns 100mb|L2.3506| " - - "L2.3507[775,925] 199ns 100mb|L2.3507| " - - "L2.3508[926,1072] 199ns 98mb|L2.3508| " - - "L2.3509[1073,1248] 199ns 100mb|L2.3509| " - - "L2.3510[1249,1423] 199ns 100mb|L2.3510| " - - "L2.3511[1424,1593] 199ns 98mb|L2.3511| " - - "L2.3512[1594,1703] 199ns 100mb|L2.3512| " - - "L2.3513[1704,1811] 199ns 99mb|L2.3513| " - - "L2.3514[2717,1990000] 199ns 2kb|----------------------------------------L2.3514----------------------------------------| " - - "WARNING: file L2.2953[1812,2716] 199ns 190mb exceeds soft limit 100mb by more than 50%" + - "L2.3449[1,66] 199ns 102mb|L2.3449| " + - "L2.3450[67,101] 199ns 54mb|L2.3450| " + - "L2.3451[102,162] 199ns 102mb|L2.3451| " + - "L2.3452[163,202] 199ns 67mb|L2.3452| " + - "L2.3453[203,263] 199ns 102mb|L2.3453| " + - "L2.3454[264,323] 199ns 100mb|L2.3454| " + - "L2.3455[324,360] 199ns 62mb|L2.3455| " + - "L2.3456[361,421] 199ns 102mb|L2.3456| " + - "L2.3457[422,461] 199ns 67mb|L2.3457| " + - "L2.3504[462,524] 199ns 103mb|L2.3504| " + - "L2.3505[525,586] 199ns 101mb|L2.3505| " + - "L2.3506[587,624] 199ns 62mb|L2.3506| " + - "L2.3507[625,775] 199ns 101mb|L2.3507| " + - "L2.3508[776,925] 199ns 100mb|L2.3508| " + - "L2.3509[926,926] 199ns 682kb|L2.3509| " + - "L2.3510[927,1077] 199ns 101mb|L2.3510| " + - "L2.3511[1078,1227] 199ns 100mb|L2.3511| " + - "L2.3512[1228,1289] 199ns 41mb|L2.3512| " + - "L2.3513[1290,1440] 199ns 101mb|L2.3513| " + - "L2.3514[1441,1590] 199ns 100mb|L2.3514| " + - "L2.3515[1591,1681] 199ns 61mb|L2.3515| " + - "L2.3516[1682,723271] 199ns 100mb|-----------L2.3516------------| " + - "L2.3517[723272,1444860] 199ns 100mb |-----------L2.3517------------| " + - "L2.3518[1444861,1990000] 199ns 76mb |-------L2.3518--------| " "### ); } @@ -12191,9 +12199,9 @@ async fn split_then_undo_it() { - "**** Final Output Files (1.51gb written)" - "L2 " - "L2.46[1679961600071000000,1680022452125054234] 1681420678.89s 100mb|----------------------------L2.46----------------------------| " - - "L2.56[1680022452125054235,1680032319822461332] 1681420678.89s 100mb |-L2.56--| " - - "L2.57[1680032319822461333,1680042187519868429] 1681420678.89s 100mb |-L2.57--| " - - "L2.58[1680042187519868430,1680045769912063525] 1681420678.89s 36mb |L2.58|" + - "L2.56[1680022452125054235,1680032319822421508] 1681420678.89s 100mb |-L2.56--| " + - "L2.57[1680032319822421509,1680042187519788781] 1681420678.89s 100mb |-L2.57--| " + - "L2.58[1680042187519788782,1680045769912063525] 1681420678.89s 36mb |L2.58|" - "L2.59[1680045769912063526,1680046349505534795] 1681420678.89s 100mb |L2.59|" - "L2.60[1680046349505534796,1680046929099006064] 1681420678.89s 100mb |L2.60|" - "L2.61[1680046929099006065,1680047338160274709] 1681420678.89s 71mb |L2.61|" @@ -12815,18 +12823,18 @@ async fn split_precent_loop() { - "L1.3[1676005158277000000,1676010156669000000] 1676010160.05s 58mb |L1.3| " - "WARNING: file L0.40[1676020762355000000,1676036230752000000] 1676036233.84s 159mb exceeds soft limit 100mb by more than 50%" - "WARNING: file L0.43[1676039845773000000,1676063836202000000] 1676063839.07s 242mb exceeds soft limit 100mb by more than 50%" - - "**** Final Output Files (3.4gb written)" + - "**** Final Output Files (3.1gb written)" - "L1 " - "L1.260[1676045833054395546,1676050409609000000] 1676066475.26s 41mb |L1.260| " - "L2 " - "L2.228[1676050409609000001,1676066212011000000] 1676066475.26s 145mb |----L2.228-----| " - "L2.251[1675987200001000000,1675995209209749739] 1676066475.26s 100mb|L2.251-| " - - "L2.261[1675995209209749740,1676003044683020379] 1676066475.26s 100mb |L2.261| " - - "L2.262[1676003044683020380,1676010880156291018] 1676066475.26s 100mb |L2.262| " - - "L2.263[1676010880156291019,1676018715629412205] 1676066475.26s 100mb |L2.263| " - - "L2.264[1676018715629412206,1676027900853050774] 1676066475.26s 100mb |-L2.264-| " - - "L2.265[1676027900853050775,1676037086076689342] 1676066475.26s 100mb |-L2.265-| " - - "L2.266[1676037086076689343,1676045833054395545] 1676066475.26s 95mb |L2.266-| " + - "L2.252[1675995209209749740,1676003218418499478] 1676066475.26s 100mb |L2.252-| " + - "L2.254[1676003218418499479,1676010967023992579] 1676066475.26s 100mb |L2.254| " + - "L2.255[1676010967023992580,1676018715629485679] 1676066475.26s 100mb |L2.255| " + - "L2.261[1676018715629485680,1676027900853158703] 1676066475.26s 100mb |-L2.261-| " + - "L2.262[1676027900853158704,1676037086076831726] 1676066475.26s 100mb |-L2.262-| " + - "L2.263[1676037086076831727,1676045833054395545] 1676066475.26s 95mb |L2.263-| " "### ); } @@ -13590,29 +13598,29 @@ async fn very_big_overlapped_backlog() { - "L2.1126[181951,181999] 299ns 5mb |L2.1126|" - "L2.1127[182000,183599] 299ns 84mb |L2.1127|" - "L2.1128[183600,183999] 299ns 21mb |L2.1128|" - - "L2.1129[184000,184981] 299ns 100mb |L2.1129|" - - "L2.1130[184982,185962] 299ns 100mb |L2.1130|" - - "L2.1131[185963,185999] 299ns 4mb |L2.1131|" + - "L2.1129[184000,184980] 299ns 100mb |L2.1129|" + - "L2.1130[184981,185960] 299ns 100mb |L2.1130|" + - "L2.1131[185961,185999] 299ns 4mb |L2.1131|" - "L2.1132[186000,186980] 299ns 100mb |L2.1132|" - "L2.1133[186981,187960] 299ns 100mb |L2.1133|" - "L2.1134[187961,187999] 299ns 4mb |L2.1134|" - - "L2.1135[188000,188981] 299ns 100mb |L2.1135|" - - "L2.1136[188982,189962] 299ns 100mb |L2.1136|" - - "L2.1137[189963,189999] 299ns 4mb |L2.1137|" - - "L2.1138[190000,190981] 299ns 100mb |L2.1138|" - - "L2.1139[190982,191962] 299ns 100mb |L2.1139|" - - "L2.1140[191963,191999] 299ns 4mb |L2.1140|" + - "L2.1135[188000,188980] 299ns 100mb |L2.1135|" + - "L2.1136[188981,189960] 299ns 100mb |L2.1136|" + - "L2.1137[189961,189999] 299ns 4mb |L2.1137|" + - "L2.1138[190000,190980] 299ns 100mb |L2.1138|" + - "L2.1139[190981,191960] 299ns 100mb |L2.1139|" + - "L2.1140[191961,191999] 299ns 4mb |L2.1140|" - "L2.1141[192000,192980] 299ns 100mb |L2.1141|" - "L2.1142[192981,193960] 299ns 100mb |L2.1142|" - "L2.1143[193961,193999] 299ns 4mb |L2.1143|" - - "L2.1144[194000,194981] 299ns 100mb |L2.1144|" - - "L2.1145[194982,195962] 299ns 100mb |L2.1145|" - - "L2.1146[195963,195999] 299ns 4mb |L2.1146|" + - "L2.1144[194000,194980] 299ns 100mb |L2.1144|" + - "L2.1145[194981,195960] 299ns 100mb |L2.1145|" + - "L2.1146[195961,195999] 299ns 4mb |L2.1146|" - "L2.1147[196000,196980] 299ns 100mb |L2.1147|" - "L2.1148[196981,197960] 299ns 100mb |L2.1148|" - - "L2.1150[197961,198941] 299ns 100mb |L2.1150|" - - "L2.1151[198942,199921] 299ns 100mb |L2.1151|" - - "L2.1152[199922,200000] 299ns 8mb |L2.1152|" + - "L2.1150[197961,198942] 299ns 100mb |L2.1150|" + - "L2.1151[198943,199923] 299ns 100mb |L2.1151|" + - "L2.1152[199924,200000] 299ns 8mb |L2.1152|" "### ); } diff --git a/compactor_test_utils/src/simulator.rs b/compactor_test_utils/src/simulator.rs index a9c960a2ea..5fc9536b7f 100644 --- a/compactor_test_utils/src/simulator.rs +++ b/compactor_test_utils/src/simulator.rs @@ -278,7 +278,7 @@ fn even_time_split( ) -> Vec<SimulatedFile> { let overall_min_time = files.iter().map(|f| f.min_time).min().unwrap(); let overall_max_time = files.iter().map(|f| f.max_time).max().unwrap(); - let overall_time_range = overall_max_time - overall_min_time; + let overall_time_range = overall_max_time - overall_min_time + 1; let total_input_rows: i64 = files.iter().map(|f| f.row_count).sum(); let total_input_size: i64 = files.iter().map(|f| f.file_size_bytes).sum(); @@ -291,6 +291,9 @@ fn even_time_split( "split times {last_split} {split} must be in ascending order", ); assert!( + // split time is the last ns in the resulting 'left' file. If split time + // matches the last ns of the input file, the input file does not need + // split at this time. Timestamp::new(*split) < overall_max_time, "split time {} must be less than time range max {}", split, @@ -327,7 +330,8 @@ fn even_time_split( let mut simulated_files: Vec<_> = time_ranges .into_iter() .map(|(min_time, max_time)| { - let p = ((max_time - min_time).get() as f64) / ((overall_time_range).get() as f64); + let p = + ((max_time - min_time).get() as f64 + 1.0) / ((overall_time_range).get() as f64); let file_size_bytes = (total_input_size as f64 * p) as i64; let row_count = (total_input_rows as f64 * p) as i64;
6a68b6edf0de04946222d7a46723c092e33df009
Dom Dwyer
2023-08-23 14:46:09
less proptest discards
Generate non-empty strings as inputs to proptest tests instead of generating random strings and filtering.
null
test(partition): less proptest discards Generate non-empty strings as inputs to proptest tests instead of generating random strings and filtering.
diff --git a/data_types/src/partition.rs b/data_types/src/partition.rs index e924624cd5..a4f791bbd4 100644 --- a/data_types/src/partition.rs +++ b/data_types/src/partition.rs @@ -500,10 +500,8 @@ mod tests { #[test] fn partition_hash_id_representations( table_id in 0..i64::MAX, - partition_key in any::<String>(), + partition_key in ".+", ) { - prop_assume!(!partition_key.is_empty()); - let table_id = TableId::new(table_id); let partition_key = PartitionKey::from(partition_key);
39768fa9896464648c7cf89ef1f73ed89d29d01a
Dom Dwyer
2023-09-20 13:45:06
init anti-entropy merkle search tree
Adds initialisation code to the routers to instantiate an AntiEntropyActor, pre-populate the Merkle Search Tree during schema warmup, and maintain it at runtime.
null
feat(router): init anti-entropy merkle search tree Adds initialisation code to the routers to instantiate an AntiEntropyActor, pre-populate the Merkle Search Tree during schema warmup, and maintain it at runtime.
diff --git a/Cargo.lock b/Cargo.lock index c44f8c593a..757b3af3d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3242,6 +3242,7 @@ dependencies = [ "metric", "mutable_batch", "object_store", + "observability_deps", "router", "thiserror", "tokio", diff --git a/ioxd_router/Cargo.toml b/ioxd_router/Cargo.toml index 51dbaaac55..2a31381f92 100644 --- a/ioxd_router/Cargo.toml +++ b/ioxd_router/Cargo.toml @@ -19,13 +19,9 @@ ioxd_common = { path = "../ioxd_common" } metric = { path = "../metric" } mutable_batch = { path = "../mutable_batch" } object_store = { workspace = true } +observability_deps = { version = "0.1.0", path = "../observability_deps" } router = { path = "../router" } thiserror = "1.0.48" -tokio-util = { version = "0.7.8" } -trace = { path = "../trace" } -workspace-hack = { version = "0.1", path = "../workspace-hack" } - -[dev-dependencies] tokio = { version = "1.32", features = [ "macros", "net", @@ -35,3 +31,6 @@ tokio = { version = "1.32", features = [ "sync", "time", ] } +tokio-util = { version = "0.7.8" } +trace = { path = "../trace" } +workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/ioxd_router/src/lib.rs b/ioxd_router/src/lib.rs index 22e78264c0..fa0def3d28 100644 --- a/ioxd_router/src/lib.rs +++ b/ioxd_router/src/lib.rs @@ -14,6 +14,7 @@ use gossip::TopicInterests; use gossip_schema::{dispatcher::SchemaRx, handle::SchemaTx}; +use observability_deps::tracing::info; // Workaround for "unused crate" lint false positives. use workspace_hack as _; @@ -54,7 +55,11 @@ use router::{ InstrumentationDecorator, Partitioner, RetentionValidator, RpcWrite, SchemaValidator, }, gossip::{ - namespace_cache::NamespaceSchemaGossip, schema_change_observer::SchemaChangeObserver, + anti_entropy::mst::{ + actor::AntiEntropyActor, handle::AntiEntropyHandle, merkle::MerkleTree, + }, + namespace_cache::NamespaceSchemaGossip, + schema_change_observer::SchemaChangeObserver, }, namespace_cache::{ metrics::InstrumentedCache, MaybeLayer, MemoryNamespaceCache, NamespaceCache, @@ -260,17 +265,32 @@ pub async fn create_router_server_type( // Initialise an instrumented namespace cache to be shared with the schema // validator, and namespace auto-creator that reports cache hit/miss/update // metrics. - let ns_cache = InstrumentedCache::new( + let ns_cache = Arc::new(InstrumentedCache::new( ShardedCache::new(std::iter::repeat_with(MemoryNamespaceCache::default).take(10)), &metrics, - ); + )); + + // Initialise the anti-entropy subsystem, responsible for maintaining a + // Merkle Search Tree that encodes the content of the namespace schema + // cache. + let (actor, mst) = AntiEntropyActor::new(Arc::clone(&ns_cache)); + // Start the actor task, which exits when the `mst` handle to it drops. + tokio::spawn(actor.run()); // Pre-warm the cache before adding the gossip layer to avoid broadcasting // the full cache content at startup. - pre_warm_schema_cache(&ns_cache, &*catalog) + // + // This method blocks until all schemas have been stored in the cache, and + // all schemas have been enqueued for the MST actor to process. + pre_warm_schema_cache(&ns_cache, &*catalog, &mst) .await .expect("namespace cache pre-warming failed"); + // Now the cache and anti-entropy merkle tree have been pre-populated, wrap + // the namespace cache in an observer that tracks any future changes to the + // cache content, ensuring the MST remains in-sync. + let ns_cache = MerkleTree::new(ns_cache, mst); + // Optionally initialise the schema gossip subsystem. // // The schema gossip primitives sit in the stack of NamespaceCache layers: @@ -472,24 +492,39 @@ pub async fn create_router_server_type( async fn pre_warm_schema_cache<T>( cache: &T, catalog: &dyn Catalog, + mst: &AntiEntropyHandle, ) -> Result<(), iox_catalog::interface::Error> where T: NamespaceCache, { - iox_catalog::interface::list_schemas(catalog) - .await? - .for_each(|(ns, schema)| { - let name = NamespaceName::try_from(ns.name) - .expect("cannot convert existing namespace string to a `NamespaceName` instance"); + let mut n = 0; + for (ns, schema) in iox_catalog::interface::list_schemas(catalog).await? { + let name = NamespaceName::try_from(ns.name) + .expect("cannot convert existing namespace string to a `NamespaceName` instance"); + + cache.put_schema(name.clone(), schema); + mst.observe_update_blocking(name).await; + n += 1; + } + + info!(n, "pre-warmed schema cache"); - cache.put_schema(name, schema); - }); + // Calculate the root hash after pre-warming. + // + // This causes the merkle tree pages to be pre-hashed ahead of first use, + // and is guaranteed to happen after the backlog of MST updates has + // completed (schema updates are processed before other operations). + let root_hash = mst.content_hash().await; + + info!(%root_hash, "initialised anti-entropy merkle tree"); Ok(()) } #[cfg(test)] mod tests { + use std::sync::Arc; + use data_types::ColumnType; use iox_catalog::{ mem::MemCatalog, @@ -513,8 +548,16 @@ mod tests { drop(repos); // Or it'll deadlock. - let cache = MemoryNamespaceCache::default(); - pre_warm_schema_cache(&cache, &*catalog) + let cache = Arc::new(MemoryNamespaceCache::default()); + + let (actor, mst) = AntiEntropyActor::new(Arc::clone(&cache)); + tokio::spawn(actor.run()); + + // Remember the initial hash of the merkle tree covering the cache + // content. + let initial_hash = mst.content_hash().await; + + pre_warm_schema_cache(&cache, &*catalog, &mst) .await .expect("pre-warming failed"); @@ -525,5 +568,10 @@ mod tests { .expect("should contain a schema"); assert!(got.tables.get("name").is_some()); + + // Assert the MST has observed at least one cache update, causing the + // root hashes to diverge. + let now = mst.content_hash().await; + assert_ne!(initial_hash, now); } } diff --git a/router/src/gossip/anti_entropy/mst/handle.rs b/router/src/gossip/anti_entropy/mst/handle.rs index 010afccf13..bc0ca0cd14 100644 --- a/router/src/gossip/anti_entropy/mst/handle.rs +++ b/router/src/gossip/anti_entropy/mst/handle.rs @@ -97,11 +97,23 @@ impl AntiEntropyHandle { } } + /// Send `name` to the MST actor to observe a new schema state. + /// + /// This method is the blocking variant of the non-blocking + /// [`AntiEntropyHandle::observe_update()`] that waits for `name` to be + /// successfully enqueued (blocking if the queue is full). + pub async fn observe_update_blocking(&self, name: NamespaceName<'static>) { + self.schema_tx + .send(name) + .await + .expect("mst actor not running"); + } + /// Return the current content hash ([`RootHash`]) describing the set of /// [`NamespaceSchema`] observed so far. /// /// [`NamespaceSchema`]: data_types::NamespaceSchema - pub(crate) async fn content_hash(&self) -> RootHash { + pub async fn content_hash(&self) -> RootHash { let (tx, rx) = oneshot::channel(); self.op_tx
911ba92ab4133e75fe2a420e16ed9cb4cf32196f
praveen-influx
2025-02-02 16:51:53
clear query buffer incrementally when snapshotting (#25948)
* feat: clear query buffer incrementally when snapshotting This commit clears the query buffer incrementally as soon as a table's data in buffer is written into parquet file and cached. Previously, clearing the buffer happened at the end in the background * refactor: only clear buffer after adding to persisted files * refactor: rename function
null
feat: clear query buffer incrementally when snapshotting (#25948) * feat: clear query buffer incrementally when snapshotting This commit clears the query buffer incrementally as soon as a table's data in buffer is written into parquet file and cached. Previously, clearing the buffer happened at the end in the background * refactor: only clear buffer after adding to persisted files * refactor: rename function
diff --git a/influxdb3_write/src/write_buffer/persisted_files.rs b/influxdb3_write/src/write_buffer/persisted_files.rs index ece6b6efaa..c14a320276 100644 --- a/influxdb3_write/src/write_buffer/persisted_files.rs +++ b/influxdb3_write/src/write_buffer/persisted_files.rs @@ -36,6 +36,12 @@ impl PersistedFiles { inner.add_persisted_snapshot(persisted_snapshot); } + /// Add single file to a table + pub fn add_persisted_file(&self, db_id: &DbId, table_id: &TableId, parquet_file: &ParquetFile) { + let mut inner = self.inner.write(); + inner.add_persisted_file(db_id, table_id, parquet_file); + } + /// Get the list of files for a given database and table, always return in descending order of min_time pub fn get_files(&self, db_id: DbId, table_id: TableId) -> Vec<ParquetFile> { self.get_files_filtered(db_id, table_id, &ChunkFilter::default()) @@ -124,6 +130,26 @@ impl Inner { update_persisted_files_with_snapshot(false, persisted_snapshot, &mut self.files); self.parquet_files_count += file_count; } + + pub fn add_persisted_file( + &mut self, + db_id: &DbId, + table_id: &TableId, + parquet_file: &ParquetFile, + ) { + let existing_parquet_files = self + .files + .entry(*db_id) + .or_default() + .entry(*table_id) + .or_default(); + if !existing_parquet_files.contains(parquet_file) { + self.parquet_files_row_count += parquet_file.row_count; + self.parquet_files_size_mb += as_mb(parquet_file.size_bytes); + existing_parquet_files.push(parquet_file.clone()); + } + self.parquet_files_count += 1; + } } fn as_mb(bytes: u64) -> f64 { diff --git a/influxdb3_write/src/write_buffer/queryable_buffer.rs b/influxdb3_write/src/write_buffer/queryable_buffer.rs index 9f643ba855..cacf6d10b9 100644 --- a/influxdb3_write/src/write_buffer/queryable_buffer.rs +++ b/influxdb3_write/src/write_buffer/queryable_buffer.rs @@ -283,6 +283,8 @@ impl QueryableBuffer { let executor = Arc::clone(&executor); let persisted_snapshot = Arc::clone(&persisted_snapshot); let parquet_cache = parquet_cache.clone(); + let buffer = Arc::clone(&buffer); + let persisted_files = Arc::clone(&persisted_files); set.spawn(async move { let path = persist_job.path.to_string(); @@ -313,20 +315,33 @@ impl QueryableBuffer { // https://github.com/influxdata/influxdb/issues/25676 // https://github.com/influxdata/influxdb/issues/25677 .expect("sort, deduplicate, and persist buffer data as parquet"); + let parquet_file = ParquetFile { + id: ParquetFileId::new(), + path, + size_bytes: file_size_bytes, + row_count: file_meta_data.num_rows as u64, + chunk_time, + min_time, + max_time, + }; - persisted_snapshot.lock().add_parquet_file( - database_id, - table_id, - ParquetFile { - id: ParquetFileId::new(), - path, - size_bytes: file_size_bytes, - row_count: file_meta_data.num_rows as u64, - chunk_time, - min_time, - max_time, - }, - ) + { + // we can clear the buffer as we move on + let mut buffer = buffer.write(); + + // add file first + persisted_files.add_persisted_file(&database_id, &table_id, &parquet_file); + // then clear the buffer + if let Some(db) = buffer.db_to_table.get_mut(&database_id) { + if let Some(table) = db.get_mut(&table_id) { + table.clear_snapshots(); + } + } + } + + persisted_snapshot + .lock() + .add_parquet_file(database_id, table_id, parquet_file) }); } @@ -386,24 +401,6 @@ impl QueryableBuffer { } } - // clear out the write buffer and add all the persisted files to the persisted files - // on a background task to ensure that the cache has been populated before we clear - // the buffer - tokio::spawn(async move { - // same reason as explained above, if persist jobs are empty, no snapshotting - // has happened so no need to clear the snapshots - if !persist_jobs_empty { - let mut buffer = buffer.write(); - for (_, table_map) in buffer.db_to_table.iter_mut() { - for (_, table_buffer) in table_map.iter_mut() { - table_buffer.clear_snapshots(); - } - } - - persisted_files.add_persisted_snapshot_files(persisted_snapshot); - } - }); - let _ = sender.send(snapshot_details); });
8729977851f9973bc0fdaca2730a3988cd516ecf
Andrew Lamb
2022-12-13 15:16:09
Upgrade datafusion / arrow to 29.0.0 to get flightsql client (#6396)
* chore: Update datafusion pin * chore: Update for API change * chore: Run cargo hakari tasks
Co-authored-by: CircleCI[bot] <[email protected]>
chore: Upgrade datafusion / arrow to 29.0.0 to get flightsql client (#6396) * chore: Update datafusion pin * chore: Update for API change * chore: Run cargo hakari tasks Co-authored-by: CircleCI[bot] <[email protected]>
diff --git a/Cargo.lock b/Cargo.lock index b222651a33..74870060d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -100,9 +100,9 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "arrow" -version = "28.0.0" +version = "29.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aed9849f86164fad5cb66ce4732782b15f1bc97f8febab04e782c20cce9d4b6c" +checksum = "2fe17dc0113da7e2eaeaedbd304d347aa8ea64916d225b79a5c3f3b6b5d8da4c" dependencies = [ "ahash 0.8.2", "arrow-array", @@ -112,8 +112,10 @@ dependencies = [ "arrow-data", "arrow-ipc", "arrow-json", + "arrow-ord", "arrow-schema", "arrow-select", + "arrow-string", "chrono", "comfy-table", "half 2.1.0", @@ -121,14 +123,13 @@ dependencies = [ "multiversion", "num", "regex", - "regex-syntax", ] [[package]] name = "arrow-array" -version = "28.0.0" +version = "29.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b8504cf0a6797e908eecf221a865e7d339892720587f87c8b90262863015b08" +checksum = "b9452131e027aec3276e43449162af084db611c42ef875e54d231e6580bc6254" dependencies = [ "ahash 0.8.2", "arrow-buffer", @@ -142,9 +143,9 @@ dependencies = [ [[package]] name = "arrow-buffer" -version = "28.0.0" +version = "29.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6de64a27cea684b24784647d9608314bc80f7c4d55acb44a425e05fab39d916" +checksum = "4a301001e8ed7da638a12fa579ac5f3f154c44c0655f2ca6ed0f8586b418a779" dependencies = [ "half 2.1.0", "num", @@ -152,9 +153,9 @@ dependencies = [ [[package]] name = "arrow-cast" -version = "28.0.0" +version = "29.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec4a54502eefe05923c385c90a005d69474fa06ca7aa2a2b123c9f9532f6178" +checksum = "048c91d067f2eb8cc327f086773e5b0f0d7714780807fc4db09366584e23bac8" dependencies = [ "arrow-array", "arrow-buffer", @@ -168,9 +169,9 @@ dependencies = [ [[package]] name = "arrow-csv" -version = "28.0.0" +version = "29.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7902bbf8127eac48554fe902775303377047ad49a9fd473c2b8cb399d092080" +checksum = "ed914cd0006a3bb9cac8136b3098ac7796ad26b82362f00d4f2e7c1a54684b86" dependencies = [ "arrow-array", "arrow-buffer", @@ -186,9 +187,9 @@ dependencies = [ [[package]] name = "arrow-data" -version = "28.0.0" +version = "29.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e4882efe617002449d5c6b5de9ddb632339074b36df8a96ea7147072f1faa8a" +checksum = "e59619d9d102e4e6b22087b2bd60c07df76fcb68683620841718f6bc8e8f02cb" dependencies = [ "arrow-buffer", "arrow-schema", @@ -198,9 +199,9 @@ dependencies = [ [[package]] name = "arrow-flight" -version = "28.0.0" +version = "29.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4164981ac7c8e7f8a194b70490f1c27e6616fe44554f6e14b97487eab9cb4100" +checksum = "6bb6e49945f93a8fbd3ec0568167f42097b56134b88686602b9e639a7042ef38" dependencies = [ "arrow-array", "arrow-buffer", @@ -211,7 +212,7 @@ dependencies = [ "futures", "proc-macro2", "prost 0.11.3", - "prost-build 0.11.2", + "prost-build 0.11.3", "prost-derive 0.11.2", "tokio", "tonic", @@ -220,9 +221,9 @@ dependencies = [ [[package]] name = "arrow-ipc" -version = "28.0.0" +version = "29.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa0703a6de2785828561b03a4d7793ecd333233e1b166316b4bfc7cfce55a4a7" +checksum = "fb7ad6d2fa06a1cebdaa213c59fc953b9230e560d8374aba133b572b864ec55e" dependencies = [ "arrow-array", "arrow-buffer", @@ -234,9 +235,9 @@ dependencies = [ [[package]] name = "arrow-json" -version = "28.0.0" +version = "29.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bd23fc8c6d251f96cd63b96fece56bbb9710ce5874a627cb786e2600673595a" +checksum = "1e22efab3ad70336057660c5e5f2b72e2417e3444c27cb42dc477d678ddd6979" dependencies = [ "arrow-array", "arrow-buffer", @@ -250,17 +251,31 @@ dependencies = [ "serde_json", ] +[[package]] +name = "arrow-ord" +version = "29.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e23b623332804a65ad11e7732c351896dcb132c19f8e25d99fdb13b00aae5206" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "num", +] + [[package]] name = "arrow-schema" -version = "28.0.0" +version = "29.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9f143882a80be168538a60e298546314f50f11f2a288c8d73e11108da39d26" +checksum = "69ef17c144f1253b9864f5a3e8f4c6f1e436bdd52394855d5942f132f776b64e" [[package]] name = "arrow-select" -version = "28.0.0" +version = "29.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520406331d4ad60075359524947ebd804e479816439af82bcb17f8d280d9b38c" +checksum = "e2accaf218ff107e3df0ee8f1e09b092249a1cc741c4377858a1470fd27d7096" dependencies = [ "arrow-array", "arrow-buffer", @@ -269,6 +284,21 @@ dependencies = [ "num", ] +[[package]] +name = "arrow-string" +version = "29.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a0954f9e1f45b04815ddacbde72899bf3c03a08fa6c0375f42178c4a01a510" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "regex", + "regex-syntax", +] + [[package]] name = "arrow_util" version = "0.1.0" @@ -1244,7 +1274,7 @@ dependencies = [ [[package]] name = "datafusion" version = "15.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=d33457c20c2b15d6a934e5b37ac9eb0d17e29145#d33457c20c2b15d6a934e5b37ac9eb0d17e29145" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=119f90f0a1e681639d630fe616bb4599e1d20dee#119f90f0a1e681639d630fe616bb4599e1d20dee" dependencies = [ "ahash 0.8.2", "arrow", @@ -1289,7 +1319,7 @@ dependencies = [ [[package]] name = "datafusion-common" version = "15.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=d33457c20c2b15d6a934e5b37ac9eb0d17e29145#d33457c20c2b15d6a934e5b37ac9eb0d17e29145" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=119f90f0a1e681639d630fe616bb4599e1d20dee#119f90f0a1e681639d630fe616bb4599e1d20dee" dependencies = [ "arrow", "chrono", @@ -1301,7 +1331,7 @@ dependencies = [ [[package]] name = "datafusion-expr" version = "15.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=d33457c20c2b15d6a934e5b37ac9eb0d17e29145#d33457c20c2b15d6a934e5b37ac9eb0d17e29145" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=119f90f0a1e681639d630fe616bb4599e1d20dee#119f90f0a1e681639d630fe616bb4599e1d20dee" dependencies = [ "ahash 0.8.2", "arrow", @@ -1313,7 +1343,7 @@ dependencies = [ [[package]] name = "datafusion-optimizer" version = "15.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=d33457c20c2b15d6a934e5b37ac9eb0d17e29145#d33457c20c2b15d6a934e5b37ac9eb0d17e29145" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=119f90f0a1e681639d630fe616bb4599e1d20dee#119f90f0a1e681639d630fe616bb4599e1d20dee" dependencies = [ "arrow", "async-trait", @@ -1328,7 +1358,7 @@ dependencies = [ [[package]] name = "datafusion-physical-expr" version = "15.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=d33457c20c2b15d6a934e5b37ac9eb0d17e29145#d33457c20c2b15d6a934e5b37ac9eb0d17e29145" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=119f90f0a1e681639d630fe616bb4599e1d20dee#119f90f0a1e681639d630fe616bb4599e1d20dee" dependencies = [ "ahash 0.8.2", "arrow", @@ -1357,7 +1387,7 @@ dependencies = [ [[package]] name = "datafusion-proto" version = "15.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=d33457c20c2b15d6a934e5b37ac9eb0d17e29145#d33457c20c2b15d6a934e5b37ac9eb0d17e29145" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=119f90f0a1e681639d630fe616bb4599e1d20dee#119f90f0a1e681639d630fe616bb4599e1d20dee" dependencies = [ "arrow", "chrono", @@ -1368,13 +1398,13 @@ dependencies = [ "parking_lot 0.12.1", "pbjson-build", "prost 0.11.3", - "prost-build 0.11.2", + "prost-build 0.11.3", ] [[package]] name = "datafusion-row" version = "15.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=d33457c20c2b15d6a934e5b37ac9eb0d17e29145#d33457c20c2b15d6a934e5b37ac9eb0d17e29145" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=119f90f0a1e681639d630fe616bb4599e1d20dee#119f90f0a1e681639d630fe616bb4599e1d20dee" dependencies = [ "arrow", "datafusion-common", @@ -1385,7 +1415,7 @@ dependencies = [ [[package]] name = "datafusion-sql" version = "15.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=d33457c20c2b15d6a934e5b37ac9eb0d17e29145#d33457c20c2b15d6a934e5b37ac9eb0d17e29145" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=119f90f0a1e681639d630fe616bb4599e1d20dee#119f90f0a1e681639d630fe616bb4599e1d20dee" dependencies = [ "arrow-schema", "datafusion-common", @@ -1813,7 +1843,7 @@ dependencies = [ "pbjson-types", "predicate", "prost 0.11.3", - "prost-build 0.11.2", + "prost-build 0.11.3", "query_functions", "serde", "snafu", @@ -1873,7 +1903,7 @@ dependencies = [ "hyper", "pin-project", "prost 0.11.3", - "prost-build 0.11.2", + "prost-build 0.11.3", "prost-types 0.11.2", "tokio", "tokio-stream", @@ -1888,7 +1918,7 @@ name = "grpc-binary-logger-proto" version = "0.1.0" dependencies = [ "prost 0.11.3", - "prost-build 0.11.2", + "prost-build 0.11.3", "prost-types 0.11.2", "tonic", "tonic-build", @@ -1900,7 +1930,7 @@ name = "grpc-binary-logger-test-proto" version = "0.1.0" dependencies = [ "prost 0.11.3", - "prost-build 0.11.2", + "prost-build 0.11.3", "prost-types 0.11.2", "tonic", "tonic-build", @@ -3660,9 +3690,9 @@ dependencies = [ [[package]] name = "parquet" -version = "28.0.0" +version = "29.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21433e9209111bb3720b747f2f137e0d115af1af0420a7a1c26b6e88227fa353" +checksum = "d906343fd18ace6b998d5074697743e8e9358efa8c3c796a1381b98cba813338" dependencies = [ "ahash 0.8.2", "arrow-array", @@ -3779,7 +3809,7 @@ dependencies = [ "pbjson", "pbjson-build", "prost 0.11.3", - "prost-build 0.11.2", + "prost-build 0.11.3", "serde", ] @@ -3931,7 +3961,7 @@ dependencies = [ "once_cell", "parking_lot 0.12.1", "prost 0.11.3", - "prost-build 0.11.2", + "prost-build 0.11.3", "prost-derive 0.11.2", "sha2", "smallvec", @@ -4131,9 +4161,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d8b442418ea0822409d9e7d047cbf1e7e9e1760b172bf9982cf29d517c93511" +checksum = "e330bf1316db56b12c2bcfa399e8edddd4821965ea25ddb2c134b610b1c1c604" dependencies = [ "bytes", "heck", @@ -5682,7 +5712,7 @@ checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4" dependencies = [ "prettyplease", "proc-macro2", - "prost-build 0.11.2", + "prost-build 0.11.3", "quote", "syn", ] @@ -6334,6 +6364,8 @@ version = "0.1.0" dependencies = [ "ahash 0.8.2", "arrow", + "arrow-ord", + "arrow-string", "base64 0.13.1", "bitflags", "byteorder", diff --git a/Cargo.toml b/Cargo.toml index b32859fd1e..9ce2f9bd62 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -112,12 +112,12 @@ edition = "2021" license = "MIT OR Apache-2.0" [workspace.dependencies] -arrow = { version = "28.0.0" } -arrow-flight = { version = "28.0.0" } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="d33457c20c2b15d6a934e5b37ac9eb0d17e29145", default-features = false } -datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="d33457c20c2b15d6a934e5b37ac9eb0d17e29145" } +arrow = { version = "29.0.0" } +arrow-flight = { version = "29.0.0" } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="119f90f0a1e681639d630fe616bb4599e1d20dee", default-features = false } +datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="119f90f0a1e681639d630fe616bb4599e1d20dee" } hashbrown = { version = "0.13.1" } -parquet = { version = "28.0.0" } +parquet = { version = "29.0.0" } # This profile optimizes for runtime performance and small binary size at the expense of longer # build times. It's most suitable for final release builds. diff --git a/iox_query/src/exec/seriesset/series.rs b/iox_query/src/exec/seriesset/series.rs index e28594dda3..eca17f32d8 100644 --- a/iox_query/src/exec/seriesset/series.rs +++ b/iox_query/src/exec/seriesset/series.rs @@ -285,7 +285,7 @@ impl SeriesSet { let tags = self.create_frame_tags(schema.field(index.value_index).name()); - let mut timestamps = compute::nullif( + let mut timestamps = compute::nullif::nullif( batch.column(index.timestamp_index), &compute::is_null(array).expect("is_null"), ) diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index af3d755ed0..3b17c1ac1f 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -16,8 +16,10 @@ license.workspace = true ### BEGIN HAKARI SECTION [dependencies] -ahash = { version = "0.8", default-features = false, features = ["compile-time-rng", "const-random", "getrandom", "runtime-rng"] } -arrow = { version = "28", features = ["arrow-csv", "arrow-ipc", "arrow-json", "comfy-table", "csv", "dyn_cmp_dict", "ipc", "json", "prettyprint"] } +ahash = { version = "0.8", default-features = false, features = ["getrandom", "runtime-rng"] } +arrow = { version = "29", features = ["arrow-csv", "arrow-ipc", "arrow-json", "comfy-table", "csv", "dyn_cmp_dict", "ipc", "json", "prettyprint"] } +arrow-ord = { version = "29", default-features = false, features = ["dyn_cmp_dict"] } +arrow-string = { version = "29", default-features = false, features = ["dyn_cmp_dict"] } base64 = { version = "0.13", features = ["std"] } bitflags = { version = "1" } byteorder = { version = "1", features = ["std"] } @@ -25,7 +27,7 @@ bytes = { version = "1", features = ["std"] } chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "iana-time-zone", "serde", "std", "winapi"] } crossbeam-utils = { version = "0.8", features = ["std"] } crypto-common = { version = "0.1", default-features = false, features = ["std"] } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "d33457c20c2b15d6a934e5b37ac9eb0d17e29145", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "119f90f0a1e681639d630fe616bb4599e1d20dee", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] } digest = { version = "0.10", features = ["alloc", "block-buffer", "core-api", "mac", "std", "subtle"] } either = { version = "1", features = ["use_std"] } fixedbitset = { version = "0.4", features = ["std"] } @@ -52,7 +54,7 @@ num-traits = { version = "0.2", features = ["i128", "libm", "std"] } object_store = { git = "https://github.com/apache/arrow-rs.git", rev = "f5c165acc0e6cc4b34e0eaea006aab7e5bd28d66", default-features = false, features = ["aws", "azure", "base64", "cloud", "gcp", "getrandom", "quick-xml", "rand", "reqwest", "ring", "rustls-pemfile", "serde", "serde_json"] } once_cell = { version = "1", features = ["alloc", "parking_lot", "parking_lot_core", "race", "std"] } parking_lot = { version = "0.12", features = ["arc_lock"] } -parquet = { version = "28", features = ["arrow", "arrow-array", "arrow-buffer", "arrow-cast", "arrow-data", "arrow-ipc", "arrow-schema", "arrow-select", "async", "base64", "brotli", "experimental", "flate2", "futures", "lz4", "snap", "tokio", "zstd"] } +parquet = { version = "29", features = ["arrow", "arrow-array", "arrow-buffer", "arrow-cast", "arrow-data", "arrow-ipc", "arrow-schema", "arrow-select", "async", "base64", "brotli", "experimental", "flate2", "futures", "lz4", "snap", "tokio", "zstd"] } predicates = { version = "2", features = ["diff", "difflib", "float-cmp", "normalize-line-endings", "regex"] } prost = { version = "0.11", features = ["prost-derive", "std"] } prost-types = { version = "0.11", features = ["std"] }
54a08853fe5b5b39dbdddf0218c5cd40a585814c
Dom Dwyer
2023-07-04 18:03:16
split write / query tests
Split the write & query integration tests into their own modules for clarity.
null
test(ingester): split write / query tests Split the write & query integration tests into their own modules for clarity.
diff --git a/ingester/tests/query.rs b/ingester/tests/query.rs new file mode 100644 index 0000000000..43133b31e4 --- /dev/null +++ b/ingester/tests/query.rs @@ -0,0 +1,87 @@ +use arrow_util::assert_batches_sorted_eq; +use data_types::PartitionKey; +use ingester_query_grpc::influxdata::iox::ingester::v1::IngesterQueryRequest; +use ingester_test_ctx::TestContextBuilder; +use metric::{DurationHistogram, U64Histogram}; + +// Write data to an ingester through the RPC interface and query the data, validating the contents. +#[tokio::test] +async fn write_query() { + let namespace_name = "write_query_test_namespace"; + let mut ctx = TestContextBuilder::default().build().await; + let ns = ctx.ensure_namespace(namespace_name, None).await; + + // Initial write + let partition_key = PartitionKey::from("1970-01-01"); + ctx.write_lp( + namespace_name, + "bananas greatness=\"unbounded\" 10", + partition_key.clone(), + 0, + ) + .await; + + // A subsequent write with a non-contiguous sequence number to a different table. + ctx.write_lp( + namespace_name, + "cpu bar=2 20\ncpu bar=3 30", + partition_key.clone(), + 7, + ) + .await; + + // And a third write that appends more data to the table in the initial + // write. + ctx.write_lp( + namespace_name, + "bananas count=42 200", + partition_key.clone(), + 42, + ) + .await; + + // Perform a query to validate the actual data buffered. + let data: Vec<_> = ctx + .query(IngesterQueryRequest { + namespace_id: ns.id.get(), + table_id: ctx.table_id(namespace_name, "bananas").await.get(), + columns: vec![], + predicate: None, + }) + .await + .expect("query request failed"); + + let expected = vec![ + "+-------+-----------+--------------------------------+", + "| count | greatness | time |", + "+-------+-----------+--------------------------------+", + "| | unbounded | 1970-01-01T00:00:00.000000010Z |", + "| 42.0 | | 1970-01-01T00:00:00.000000200Z |", + "+-------+-----------+--------------------------------+", + ]; + assert_batches_sorted_eq!(&expected, &data); + + // Assert various ingest metrics. + let hist = ctx + .get_metric::<DurationHistogram, _>( + "ingester_dml_sink_apply_duration", + &[("handler", "write_apply"), ("result", "success")], + ) + .fetch(); + assert_eq!(hist.sample_count(), 3); + + // Read metrics + let hist = ctx + .get_metric::<DurationHistogram, _>( + "ingester_query_stream_duration", + &[("request", "complete")], + ) + .fetch(); + assert_eq!(hist.sample_count(), 1); + + let hist = ctx + .get_metric::<U64Histogram, _>("ingester_query_result_row", &[]) + .fetch(); + assert_eq!(hist.sample_count(), 1); + assert_eq!(hist.total, 2); +} diff --git a/ingester/tests/write.rs b/ingester/tests/write.rs index 05de79f0e5..2cb48fd1ee 100644 --- a/ingester/tests/write.rs +++ b/ingester/tests/write.rs @@ -10,88 +10,6 @@ use metric::{ use parquet_file::ParquetFilePath; use std::{sync::Arc, time::Duration}; -// Write data to an ingester through the RPC interface and query the data, validating the contents. -#[tokio::test] -async fn write_query() { - let namespace_name = "write_query_test_namespace"; - let mut ctx = TestContextBuilder::default().build().await; - let ns = ctx.ensure_namespace(namespace_name, None).await; - - // Initial write - let partition_key = PartitionKey::from("1970-01-01"); - ctx.write_lp( - namespace_name, - "bananas greatness=\"unbounded\" 10", - partition_key.clone(), - 0, - ) - .await; - - // A subsequent write with a non-contiguous sequence number to a different table. - ctx.write_lp( - namespace_name, - "cpu bar=2 20\ncpu bar=3 30", - partition_key.clone(), - 7, - ) - .await; - - // And a third write that appends more data to the table in the initial - // write. - ctx.write_lp( - namespace_name, - "bananas count=42 200", - partition_key.clone(), - 42, - ) - .await; - - // Perform a query to validate the actual data buffered. - let data: Vec<_> = ctx - .query(IngesterQueryRequest { - namespace_id: ns.id.get(), - table_id: ctx.table_id(namespace_name, "bananas").await.get(), - columns: vec![], - predicate: None, - }) - .await - .expect("query request failed"); - - let expected = vec![ - "+-------+-----------+--------------------------------+", - "| count | greatness | time |", - "+-------+-----------+--------------------------------+", - "| | unbounded | 1970-01-01T00:00:00.000000010Z |", - "| 42.0 | | 1970-01-01T00:00:00.000000200Z |", - "+-------+-----------+--------------------------------+", - ]; - assert_batches_sorted_eq!(&expected, &data); - - // Assert various ingest metrics. - let hist = ctx - .get_metric::<DurationHistogram, _>( - "ingester_dml_sink_apply_duration", - &[("handler", "write_apply"), ("result", "success")], - ) - .fetch(); - assert_eq!(hist.sample_count(), 3); - - // Read metrics - let hist = ctx - .get_metric::<DurationHistogram, _>( - "ingester_query_stream_duration", - &[("request", "complete")], - ) - .fetch(); - assert_eq!(hist.sample_count(), 1); - - let hist = ctx - .get_metric::<U64Histogram, _>("ingester_query_result_row", &[]) - .fetch(); - assert_eq!(hist.sample_count(), 1); - assert_eq!(hist.total, 2); -} - // Write data to an ingester through the RPC interface and persist the data. #[tokio::test] async fn write_persist() {
2fd2d05ef6b1b0062aa3e4378a97f98f9885b847
Carol (Nichols || Goulding)
2022-12-08 15:13:39
Identify each run of an ingester with a Uuid
And send that UUID in the Flight response for queries to that ingester run. Fixes #6333.
null
feat: Identify each run of an ingester with a Uuid And send that UUID in the Flight response for queries to that ingester run. Fixes #6333.
diff --git a/generated_types/protos/influxdata/iox/ingester/v1/query.proto b/generated_types/protos/influxdata/iox/ingester/v1/query.proto index 372a27b13f..f9f31f63f7 100644 --- a/generated_types/protos/influxdata/iox/ingester/v1/query.proto +++ b/generated_types/protos/influxdata/iox/ingester/v1/query.proto @@ -81,6 +81,11 @@ message IngesterQueryResponseMetadata { // // This field is currently NOT used by the ingester but will be soon. PartitionStatus status = 8; + + // UUID of this ingester instance. + // + // This field is currently NOT used by the ingester but will be soon. + string ingester_uuid = 9; } // Status of a partition that has unpersisted data. diff --git a/influxdb_iox/tests/end_to_end_cases/ingester.rs b/influxdb_iox/tests/end_to_end_cases/ingester.rs index 275543e367..1029610be3 100644 --- a/influxdb_iox/tests/end_to_end_cases/ingester.rs +++ b/influxdb_iox/tests/end_to_end_cases/ingester.rs @@ -53,7 +53,8 @@ async fn ingester_flight_api() { partition_id, status: Some(PartitionStatus { parquet_max_sequence_number: None, - }) + }), + ingester_uuid: String::new(), }, ); @@ -87,6 +88,92 @@ async fn ingester_flight_api() { }); } +#[tokio::test] +async fn ingester2_flight_api() { + test_helpers::maybe_start_logging(); + let database_url = maybe_skip_integration!(); + + let table_name = "mytable"; + + // Set up cluster + let mut cluster = MiniCluster::create_non_shared_rpc_write(database_url).await; + + // Write some data into the v2 HTTP API ============== + let lp = format!("{},tag1=A,tag2=B val=42i 123456", table_name); + let response = cluster.write_to_router(lp).await; + assert_eq!(response.status(), StatusCode::NO_CONTENT); + + let mut querier_flight = influxdb_iox_client::flight::low_level::Client::< + influxdb_iox_client::flight::generated_types::IngesterQueryRequest, + >::new(cluster.ingester().ingester_grpc_connection(), None); + + let query = IngesterQueryRequest::new( + cluster.namespace_id().await, + cluster.table_id(table_name).await, + vec![], + Some(::predicate::EMPTY_PREDICATE), + ); + + let mut performed_query = querier_flight + .perform_query(query.clone().try_into().unwrap()) + .await + .unwrap(); + + let (msg, app_metadata) = performed_query.next().await.unwrap().unwrap(); + msg.unwrap_none(); + + let ingester_uuid = app_metadata.ingester_uuid.clone(); + assert!(!ingester_uuid.is_empty()); + + let (msg, _) = performed_query.next().await.unwrap().unwrap(); + let schema = msg.unwrap_schema(); + + let mut query_results = vec![]; + while let Some((msg, _md)) = performed_query.next().await.unwrap() { + let batch = msg.unwrap_record_batch(); + query_results.push(batch); + } + + let expected = [ + "+------+------+--------------------------------+-----+", + "| tag1 | tag2 | time | val |", + "+------+------+--------------------------------+-----+", + "| A | B | 1970-01-01T00:00:00.000123456Z | 42 |", + "+------+------+--------------------------------+-----+", + ]; + assert_batches_sorted_eq!(&expected, &query_results); + + // Also ensure that the schema of the batches matches what is + // reported by the performed_query. + query_results.iter().enumerate().for_each(|(i, b)| { + assert_eq!( + schema, + b.schema(), + "Schema mismatch for returned batch {}", + i + ); + }); + + // Ensure the ingester UUID is the same in the next query + let mut performed_query = querier_flight + .perform_query(query.clone().try_into().unwrap()) + .await + .unwrap(); + let (msg, app_metadata) = performed_query.next().await.unwrap().unwrap(); + msg.unwrap_none(); + assert_eq!(app_metadata.ingester_uuid, ingester_uuid); + + // Restart the ingester and ensure it gets a new UUID + cluster.restart_ingester().await; + let mut performed_query = querier_flight + .perform_query(query.try_into().unwrap()) + .await + .unwrap(); + let (msg, app_metadata) = performed_query.next().await.unwrap().unwrap(); + msg.unwrap_none(); + assert_ne!(app_metadata.ingester_uuid, ingester_uuid); +} + #[tokio::test] async fn ingester_flight_api_namespace_not_found() { test_helpers::maybe_start_logging(); diff --git a/ingester/src/server/grpc/query.rs b/ingester/src/server/grpc/query.rs index 057c71b8fb..330aa93777 100644 --- a/ingester/src/server/grpc/query.rs +++ b/ingester/src/server/grpc/query.rs @@ -323,6 +323,8 @@ impl Stream for GetStream { .parquet_max_sequence_number .map(|x| x.get()), }), + // This is only used in ingester2. + ingester_uuid: String::new(), }; prost::Message::encode(&app_metadata, &mut bytes) .context(SerializationSnafu)?; @@ -412,6 +414,8 @@ mod tests { status: Some(proto::PartitionStatus { parquet_max_sequence_number: None, }), + // This is only used in ingester2. + ingester_uuid: String::new(), }, }), Ok(DecodedFlightData { @@ -453,6 +457,8 @@ mod tests { status: Some(proto::PartitionStatus { parquet_max_sequence_number: None, }), + // This is only used in ingester2. + ingester_uuid: String::new(), }, }), Err(tonic::Code::Internal), diff --git a/ingester2/src/server/grpc/query.rs b/ingester2/src/server/grpc/query.rs index e8f615423f..426613cdbf 100644 --- a/ingester2/src/server/grpc/query.rs +++ b/ingester2/src/server/grpc/query.rs @@ -21,6 +21,7 @@ use thiserror::Error; use tokio::sync::{Semaphore, TryAcquireError}; use tonic::{Request, Response, Streaming}; use trace::{ctx::SpanContext, span::SpanExt}; +use uuid::Uuid; use crate::query::{response::QueryResponse, QueryError, QueryExec}; @@ -107,6 +108,8 @@ pub(crate) struct FlightService<Q> { /// Number of queries rejected due to lack of available `request_sem` /// permit. query_request_limit_rejected: U64Counter, + + ingester_uuid: Uuid, } impl<Q> FlightService<Q> { @@ -126,6 +129,7 @@ impl<Q> FlightService<Q> { query_handler, request_sem: Semaphore::new(max_simultaneous_requests), query_request_limit_rejected, + ingester_uuid: Uuid::new_v4(), } } } @@ -197,7 +201,10 @@ where ) .await?; - let output = FlightFrameCodec::new(FlatIngesterQueryResponseStream::from(response)); + let output = FlightFrameCodec::new( + FlatIngesterQueryResponseStream::from(response), + self.ingester_uuid, + ); Ok(Response::new(Box::pin(output) as Self::DoGetStream)) } @@ -350,14 +357,16 @@ struct FlightFrameCodec { inner: Pin<Box<dyn Stream<Item = Result<FlatIngesterQueryResponse, ArrowError>> + Send>>, done: bool, buffer: Vec<FlightData>, + ingester_uuid: Uuid, } impl FlightFrameCodec { - fn new(inner: FlatIngesterQueryResponseStream) -> Self { + fn new(inner: FlatIngesterQueryResponseStream, ingester_uuid: Uuid) -> Self { Self { inner, done: false, buffer: vec![], + ingester_uuid, } } } @@ -400,6 +409,7 @@ impl Stream for FlightFrameCodec { status: Some(proto::PartitionStatus { parquet_max_sequence_number: status.parquet_max_sequence_number, }), + ingester_uuid: this.ingester_uuid.to_string(), }; prost::Message::encode(&app_metadata, &mut bytes).map_err(Error::from)?; @@ -460,7 +470,7 @@ mod tests { #[tokio::test] async fn test_get_stream_empty() { - assert_get_stream(vec![], vec![]).await; + assert_get_stream(Uuid::new_v4(), vec![], vec![]).await; } #[tokio::test] @@ -470,8 +480,10 @@ mod tests { .to_arrow(Projection::All) .unwrap(); let schema = batch.schema(); + let ingester_uuid = Uuid::new_v4(); assert_get_stream( + ingester_uuid, vec![ Ok(FlatIngesterQueryResponse::StartPartition { partition_id: PartitionId::new(1), @@ -490,6 +502,7 @@ mod tests { status: Some(proto::PartitionStatus { parquet_max_sequence_number: None, }), + ingester_uuid: ingester_uuid.to_string(), }, }), Ok(DecodedFlightData { @@ -507,7 +520,9 @@ mod tests { #[tokio::test] async fn test_get_stream_shortcuts_err() { + let ingester_uuid = Uuid::new_v4(); assert_get_stream( + ingester_uuid, vec![ Ok(FlatIngesterQueryResponse::StartPartition { partition_id: PartitionId::new(1), @@ -531,6 +546,7 @@ mod tests { status: Some(proto::PartitionStatus { parquet_max_sequence_number: None, }), + ingester_uuid: ingester_uuid.to_string(), }, }), Err(tonic::Code::Internal), @@ -547,6 +563,7 @@ mod tests { .unwrap(); assert_get_stream( + Uuid::new_v4(), vec![Ok(FlatIngesterQueryResponse::RecordBatch { batch })], vec![ Ok(DecodedFlightData { @@ -572,11 +589,12 @@ mod tests { } async fn assert_get_stream( + ingester_uuid: Uuid, inputs: Vec<Result<FlatIngesterQueryResponse, ArrowError>>, expected: Vec<Result<DecodedFlightData, tonic::Code>>, ) { let inner = Box::pin(futures::stream::iter(inputs)); - let stream = FlightFrameCodec::new(inner); + let stream = FlightFrameCodec::new(inner, ingester_uuid); let actual: Vec<_> = stream.collect().await; assert_eq!(actual.len(), expected.len()); diff --git a/querier/src/ingester/mod.rs b/querier/src/ingester/mod.rs index 97cee9ca44..ffb6ba3ae1 100644 --- a/querier/src/ingester/mod.rs +++ b/querier/src/ingester/mod.rs @@ -1392,6 +1392,7 @@ mod tests { status: Some(PartitionStatus { parquet_max_sequence_number: None, }), + ingester_uuid: String::new(), }, ))], }), @@ -1422,6 +1423,7 @@ mod tests { IngesterQueryResponseMetadata { partition_id: 1, status: None, + ingester_uuid: String::new(), }, ))], }), @@ -1447,6 +1449,7 @@ mod tests { status: Some(PartitionStatus { parquet_max_sequence_number: None, }), + ingester_uuid: String::new(), }, )), Ok(( @@ -1456,6 +1459,7 @@ mod tests { status: Some(PartitionStatus { parquet_max_sequence_number: None, }), + ingester_uuid: String::new(), }, )), Ok(( @@ -1465,6 +1469,7 @@ mod tests { status: Some(PartitionStatus { parquet_max_sequence_number: None, }), + ingester_uuid: String::new(), }, )), ], @@ -1544,6 +1549,7 @@ mod tests { status: Some(PartitionStatus { parquet_max_sequence_number: Some(11), }), + ingester_uuid: String::new(), }, )), Ok(( @@ -1573,6 +1579,7 @@ mod tests { status: Some(PartitionStatus { parquet_max_sequence_number: Some(21), }), + ingester_uuid: String::new(), }, )), Ok(( @@ -1597,6 +1604,7 @@ mod tests { status: Some(PartitionStatus { parquet_max_sequence_number: Some(31), }), + ingester_uuid: String::new(), }, )), Ok(( @@ -1776,6 +1784,7 @@ mod tests { status: Some(PartitionStatus { parquet_max_sequence_number: Some(11), }), + ingester_uuid: String::new(), }, )), Ok(( diff --git a/query_tests/src/scenarios/util.rs b/query_tests/src/scenarios/util.rs index eda7400e90..ee7b74bcce 100644 --- a/query_tests/src/scenarios/util.rs +++ b/query_tests/src/scenarios/util.rs @@ -1031,6 +1031,8 @@ impl QueryDataAdapter { .parquet_max_sequence_number .map(|x| x.get()), }), + // Only used in ingester2. + ingester_uuid: String::new(), }, ), FlatIngesterQueryResponse::StartSnapshot { schema } => (
1ce6a24c3f2ce471cb61ba0af1874c290e5689f3
Paul Dix
2025-01-06 17:32:17
Implement WAL plugin test API (#25704)
* feat: Implement WAL plugin test API This implements the WAL plugin test API. It also introduces a new API for the Python plugins to be called, get their data, and call back into the database server. There are some things that I'll want to address in follow on work: * CLI tests, but will wait on #25737 to land for a refactor of the CLI here * Would be better to hook the Python logging to call back into the plugin return state like here: https://pyo3.rs/v0.23.3/ecosystem/logging.html#the-python-to-rust-direction * We should only load the LineBuilder interface once in a module, rather than on every execution of a WAL plugin * More tests all around But I want to get this in so that the actual plugin and trigger system can get udated to build around this model. * refactor: PR feedback
null
feat: Implement WAL plugin test API (#25704) * feat: Implement WAL plugin test API This implements the WAL plugin test API. It also introduces a new API for the Python plugins to be called, get their data, and call back into the database server. There are some things that I'll want to address in follow on work: * CLI tests, but will wait on #25737 to land for a refactor of the CLI here * Would be better to hook the Python logging to call back into the plugin return state like here: https://pyo3.rs/v0.23.3/ecosystem/logging.html#the-python-to-rust-direction * We should only load the LineBuilder interface once in a module, rather than on every execution of a WAL plugin * More tests all around But I want to get this in so that the actual plugin and trigger system can get udated to build around this model. * refactor: PR feedback
diff --git a/Cargo.lock b/Cargo.lock index 698ac530d9..c6198d194e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3060,6 +3060,7 @@ dependencies = [ "influxdb-line-protocol", "influxdb3_cache", "influxdb3_catalog", + "influxdb3_client", "influxdb3_id", "influxdb3_process", "influxdb3_sys_events", @@ -3084,6 +3085,7 @@ dependencies = [ "parquet_file", "pin-project-lite", "pretty_assertions", + "pyo3", "schema", "secrecy", "serde", @@ -3213,6 +3215,7 @@ dependencies = [ "influxdb-line-protocol", "influxdb3_cache", "influxdb3_catalog", + "influxdb3_client", "influxdb3_id", "influxdb3_py_api", "influxdb3_telemetry", diff --git a/influxdb3/Cargo.toml b/influxdb3/Cargo.toml index 457163c454..43da370c3b 100644 --- a/influxdb3/Cargo.toml +++ b/influxdb3/Cargo.toml @@ -74,7 +74,7 @@ tokio_console = ["console-subscriber", "tokio/tracing", "observability_deps/rele # Use jemalloc as the default allocator. jemalloc_replacing_malloc = ["influxdb3_process/jemalloc_replacing_malloc"] -system-py = ["influxdb3_write/system-py"] +system-py = ["influxdb3_write/system-py", "influxdb3_server/system-py"] [dev-dependencies] # Core Crates diff --git a/influxdb3/src/commands/common.rs b/influxdb3/src/commands/common.rs index cb189ec23d..41ce275b48 100644 --- a/influxdb3/src/commands/common.rs +++ b/influxdb3/src/commands/common.rs @@ -24,7 +24,32 @@ pub struct InfluxDb3Config { pub auth_token: Option<Secret<String>>, } -/// A clap argument privided as a list of items separated by `SEPARATOR`, which by default is a ',' +// A clap argument provided as a key/value pair separated by `SEPARATOR`, which by default is a '=' +#[derive(Debug, Clone)] +pub struct SeparatedKeyValue<K, V, const SEPARATOR: char = '='>(pub (K, V)); + +impl<K, V, const SEPARATOR: char> FromStr for SeparatedKeyValue<K, V, SEPARATOR> +where + K: FromStr<Err: Into<anyhow::Error>>, + V: FromStr<Err: Into<anyhow::Error>>, +{ + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + let mut parts = s.split(SEPARATOR); + let key = parts.next().ok_or_else(|| anyhow::anyhow!("missing key"))?; + let value = parts + .next() + .ok_or_else(|| anyhow::anyhow!("missing value"))?; + + Ok(Self(( + key.parse().map_err(Into::into)?, + value.parse().map_err(Into::into)?, + ))) + } +} + +/// A clap argument provided as a list of items separated by `SEPARATOR`, which by default is a ',' #[derive(Debug, Clone)] pub struct SeparatedList<T, const SEPARATOR: char = ','>(pub Vec<T>); diff --git a/influxdb3/src/commands/plugin_test/mod.rs b/influxdb3/src/commands/plugin_test/mod.rs new file mode 100644 index 0000000000..45ad961b39 --- /dev/null +++ b/influxdb3/src/commands/plugin_test/mod.rs @@ -0,0 +1,21 @@ +use std::error::Error; + +pub mod wal; + +#[derive(Debug, clap::Parser)] +pub(crate) struct Config { + #[clap(subcommand)] + command: Command, +} + +#[derive(Debug, clap::Parser)] +enum Command { + /// Test a plugin triggered by WAL writes + Wal(wal::Config), +} + +pub(crate) async fn command(config: Config) -> Result<(), Box<dyn Error>> { + match config.command { + Command::Wal(config) => wal::command(config).await, + } +} diff --git a/influxdb3/src/commands/plugin_test/wal.rs b/influxdb3/src/commands/plugin_test/wal.rs new file mode 100644 index 0000000000..981b0351ab --- /dev/null +++ b/influxdb3/src/commands/plugin_test/wal.rs @@ -0,0 +1,71 @@ +use crate::commands::common::{InfluxDb3Config, SeparatedKeyValue, SeparatedList}; +use influxdb3_client::plugin_development::WalPluginTestRequest; +use secrecy::ExposeSecret; +use std::collections::HashMap; +use std::error::Error; + +#[derive(Debug, clap::Parser)] +pub struct Config { + #[clap(flatten)] + influxdb3_config: InfluxDb3Config, + + #[clap(flatten)] + wal_plugin_test: WalPluginTest, +} + +#[derive(Debug, clap::Parser)] +pub struct WalPluginTest { + /// The name of the plugin, which should match its file name on the server `<plugin-dir>/<name>.py` + #[clap(short = 'n', long = "name")] + pub name: String, + /// If given, pass this line protocol as input + #[clap(long = "lp")] + pub input_lp: Option<String>, + /// If given, pass this file of LP as input from on the server `<plugin-dir>/<name>_test/<input-file>` + #[clap(long = "file")] + pub input_file: Option<String>, + /// If given pass this map of string key/value pairs as input arguments + #[clap(long = "input-arguments")] + pub input_arguments: Option<SeparatedList<SeparatedKeyValue<String, String>>>, +} + +impl From<WalPluginTest> for WalPluginTestRequest { + fn from(val: WalPluginTest) -> Self { + let input_arguments = val.input_arguments.map(|a| { + a.into_iter() + .map(|SeparatedKeyValue((k, v))| (k, v)) + .collect::<HashMap<String, String>>() + }); + + Self { + name: val.name, + input_lp: val.input_lp, + input_file: val.input_file, + input_arguments, + } + } +} + +pub(super) async fn command(config: Config) -> Result<(), Box<dyn Error>> { + let InfluxDb3Config { + host_url, + auth_token, + .. + } = config.influxdb3_config; + + let wal_plugin_test_request: WalPluginTestRequest = config.wal_plugin_test.into(); + + let mut client = influxdb3_client::Client::new(host_url)?; + if let Some(t) = auth_token { + client = client.with_auth_token(t.expose_secret()); + } + let response = client.wal_plugin_test(wal_plugin_test_request).await?; + + let res = serde_json::to_string_pretty(&response) + .expect("serialize wal plugin test response as JSON"); + + // pretty print the response + println!("{}", res); + + Ok(()) +} diff --git a/influxdb3/src/commands/serve.rs b/influxdb3/src/commands/serve.rs index 9424f142f4..48902617af 100644 --- a/influxdb3/src/commands/serve.rs +++ b/influxdb3/src/commands/serve.rs @@ -37,7 +37,10 @@ use observability_deps::tracing::*; use panic_logging::SendPanicsToTracing; use parquet_file::storage::{ParquetStorage, StorageId}; use std::{num::NonZeroUsize, sync::Arc}; -use std::{path::Path, str::FromStr}; +use std::{ + path::{Path, PathBuf}, + str::FromStr, +}; use thiserror::Error; use tokio::net::TcpListener; use tokio::time::Instant; @@ -287,6 +290,10 @@ pub struct Config { action )] pub meta_cache_eviction_interval: humantime::Duration, + + /// The local directory that has python plugins and their test files. + #[clap(long = "plugin-dir", env = "INFLUXDB3_PLUGIN_DIR", action)] + pub plugin_dir: Option<PathBuf>, } /// Specified size of the Parquet cache in megabytes (MB) @@ -481,6 +488,7 @@ pub async fn command(config: Config) -> Result<()> { wal_config, parquet_cache, metric_registry: Arc::clone(&metrics), + plugin_dir: config.plugin_dir, }) .await .map_err(|e| Error::WriteBufferInit(e.into()))?; diff --git a/influxdb3/src/main.rs b/influxdb3/src/main.rs index 287e1233f7..a8b3bb52e0 100644 --- a/influxdb3/src/main.rs +++ b/influxdb3/src/main.rs @@ -25,6 +25,7 @@ mod commands { pub mod last_cache; pub mod manage; pub mod meta_cache; + pub mod plugin_test; pub mod processing_engine; pub mod query; pub mod serve; @@ -105,6 +106,9 @@ enum Command { /// Manage table (delete only for the moment) Table(commands::manage::table::Config), + + /// Test Python plugins for processing WAL writes, persistence Snapshots, requests, or scheduled tasks. + PluginTest(commands::plugin_test::Config), } fn main() -> Result<(), std::io::Error> { @@ -187,6 +191,12 @@ fn main() -> Result<(), std::io::Error> { std::process::exit(ReturnCode::Failure as _) } } + Some(Command::PluginTest(config)) => { + if let Err(e) = commands::plugin_test::command(config).await { + eprintln!("Plugin Test command failed: {e}"); + std::process::exit(ReturnCode::Failure as _) + } + } } }); diff --git a/influxdb3_client/Cargo.toml b/influxdb3_client/Cargo.toml index d3dd6211cd..5e7d53cb37 100644 --- a/influxdb3_client/Cargo.toml +++ b/influxdb3_client/Cargo.toml @@ -14,6 +14,7 @@ bytes.workspace = true reqwest.workspace = true secrecy.workspace = true serde.workspace = true +serde_json.workspace = true thiserror.workspace = true url.workspace = true diff --git a/influxdb3_client/src/lib.rs b/influxdb3_client/src/lib.rs index a72a403ffa..7a997aa0fe 100644 --- a/influxdb3_client/src/lib.rs +++ b/influxdb3_client/src/lib.rs @@ -1,7 +1,10 @@ +pub mod plugin_development; + use std::{ collections::HashMap, fmt::Display, num::NonZeroUsize, string::FromUtf8Error, time::Duration, }; +use crate::plugin_development::{WalPluginTestRequest, WalPluginTestResponse}; use bytes::Bytes; use iox_query_params::StatementParam; use reqwest::{Body, IntoUrl, Method, StatusCode}; @@ -697,6 +700,35 @@ impl Client { } } + /// Make a request to the `POST /api/v3/plugin_test/wal` API + pub async fn wal_plugin_test( + &self, + wal_plugin_test_request: WalPluginTestRequest, + ) -> Result<WalPluginTestResponse> { + let api_path = "/api/v3/plugin_test/wal"; + + let url = self.base_url.join(api_path)?; + + let mut req = self.http_client.post(url).json(&wal_plugin_test_request); + + if let Some(token) = &self.auth_token { + req = req.bearer_auth(token.expose_secret()); + } + let resp = req + .send() + .await + .map_err(|src| Error::request_send(Method::POST, api_path, src))?; + + if resp.status().is_success() { + resp.json().await.map_err(Error::Json) + } else { + Err(Error::ApiError { + code: resp.status(), + message: resp.text().await.map_err(Error::Text)?, + }) + } + } + /// Send a `/ping` request to the target `influxdb3` server to check its /// status and gather `version` and `revision` information pub async fn ping(&self) -> Result<PingResponse> { diff --git a/influxdb3_client/src/plugin_development.rs b/influxdb3_client/src/plugin_development.rs new file mode 100644 index 0000000000..afe05ed360 --- /dev/null +++ b/influxdb3_client/src/plugin_development.rs @@ -0,0 +1,21 @@ +//! Request structs for the /api/v3/plugin_test API + +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +/// Request definition for `POST /api/v3/plugin_test/wal` API +#[derive(Debug, Serialize, Deserialize)] +pub struct WalPluginTestRequest { + pub name: String, + pub input_lp: Option<String>, + pub input_file: Option<String>, + pub input_arguments: Option<HashMap<String, String>>, +} + +/// Response definition for `POST /api/v3/plugin_test/wal` API +#[derive(Debug, Serialize, Deserialize)] +pub struct WalPluginTestResponse { + pub log_lines: Vec<String>, + pub database_writes: HashMap<String, Vec<String>>, + pub errors: Vec<String>, +} diff --git a/influxdb3_py_api/src/system_py.rs b/influxdb3_py_api/src/system_py.rs index 2c0b7e1caf..2a4dcef6be 100644 --- a/influxdb3_py_api/src/system_py.rs +++ b/influxdb3_py_api/src/system_py.rs @@ -1,10 +1,14 @@ -use influxdb3_catalog::catalog::{DatabaseSchema, TableDefinition}; +use influxdb3_catalog::catalog::{Catalog, DatabaseSchema, TableDefinition}; use influxdb3_wal::{FieldData, Row, WriteBatch}; use parking_lot::Mutex; use pyo3::exceptions::PyValueError; use pyo3::prelude::{PyAnyMethods, PyModule, PyModuleMethods}; -use pyo3::{pyclass, pymethods, pymodule, Bound, IntoPyObject, PyErr, PyObject, PyResult, Python}; +use pyo3::types::{PyDict, PyList}; +use pyo3::{ + pyclass, pymethods, pymodule, Bound, IntoPyObject, PyAny, PyErr, PyObject, PyResult, Python, +}; use schema::InfluxColumnType; +use std::collections::HashMap; use std::ffi::CString; use std::sync::Arc; @@ -183,6 +187,321 @@ impl PyWriteBatch { } } +#[pyclass] +#[derive(Debug)] +struct PyPluginCallApi { + _schema: Arc<DatabaseSchema>, + _catalog: Arc<Catalog>, + return_state: Arc<Mutex<PluginReturnState>>, +} + +#[derive(Debug, Default)] +pub struct PluginReturnState { + pub log_lines: Vec<LogLine>, + pub write_back_lines: Vec<String>, + pub write_db_lines: HashMap<String, Vec<String>>, +} + +impl PluginReturnState { + pub fn log(&self) -> Vec<String> { + self.log_lines.iter().map(|l| l.to_string()).collect() + } +} + +pub enum LogLine { + Info(String), + Warn(String), + Error(String), +} + +impl std::fmt::Display for LogLine { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + LogLine::Info(s) => write!(f, "INFO: {}", s), + LogLine::Warn(s) => write!(f, "WARN: {}", s), + LogLine::Error(s) => write!(f, "ERROR: {}", s), + } + } +} + +impl std::fmt::Debug for LogLine { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + std::fmt::Display::fmt(self, f) + } +} + +#[pymethods] +impl PyPluginCallApi { + fn info(&self, line: &str) -> PyResult<()> { + self.return_state + .lock() + .log_lines + .push(LogLine::Info(line.to_string())); + Ok(()) + } + + fn warn(&self, line: &str) -> PyResult<()> { + self.return_state + .lock() + .log_lines + .push(LogLine::Warn(line.to_string())); + Ok(()) + } + + fn error(&self, line: &str) -> PyResult<()> { + self.return_state + .lock() + .log_lines + .push(LogLine::Error(line.to_string())); + Ok(()) + } + + fn write(&self, line_builder: &Bound<'_, PyAny>) -> PyResult<()> { + // Get the built line from the LineBuilder object + let line = line_builder.getattr("build")?.call0()?; + let line_str = line.extract::<String>()?; + + // Add to write_back_lines + self.return_state.lock().write_back_lines.push(line_str); + + Ok(()) + } + + fn write_to_db(&self, db_name: &str, line_builder: &Bound<'_, PyAny>) -> PyResult<()> { + let line = line_builder.getattr("build")?.call0()?; + let line_str = line.extract::<String>()?; + + self.return_state + .lock() + .write_db_lines + .entry(db_name.to_string()) + .or_default() + .push(line_str); + + Ok(()) + } +} + +// constant for the process writes call site string +const PROCESS_WRITES_CALL_SITE: &str = "process_writes"; + +const LINE_BUILDER_CODE: &str = r#" +from typing import Optional +from collections import OrderedDict + +class InfluxDBError(Exception): + """Base exception for InfluxDB-related errors""" + pass + +class InvalidMeasurementError(InfluxDBError): + """Raised when measurement name is invalid""" + pass + +class InvalidKeyError(InfluxDBError): + """Raised when a tag or field key is invalid""" + pass + +class LineBuilder: + def __init__(self, measurement: str): + if ' ' in measurement: + raise InvalidMeasurementError("Measurement name cannot contain spaces") + self.measurement = measurement + self.tags: OrderedDict[str, str] = OrderedDict() + self.fields: OrderedDict[str, str] = OrderedDict() + self._timestamp_ns: Optional[int] = None + + def _validate_key(self, key: str, key_type: str) -> None: + """Validate that a key does not contain spaces, commas, or equals signs.""" + if not key: + raise InvalidKeyError(f"{key_type} key cannot be empty") + if ' ' in key: + raise InvalidKeyError(f"{key_type} key '{key}' cannot contain spaces") + if ',' in key: + raise InvalidKeyError(f"{key_type} key '{key}' cannot contain commas") + if '=' in key: + raise InvalidKeyError(f"{key_type} key '{key}' cannot contain equals signs") + + def tag(self, key: str, value: str) -> 'LineBuilder': + """Add a tag to the line protocol.""" + self._validate_key(key, "tag") + self.tags[key] = str(value) + return self + + def int64_field(self, key: str, value: int) -> 'LineBuilder': + """Add an integer field to the line protocol.""" + self._validate_key(key, "field") + self.fields[key] = f"{value}i" + return self + + def float64_field(self, key: str, value: float) -> 'LineBuilder': + """Add a float field to the line protocol.""" + self._validate_key(key, "field") + # Check if value has no decimal component + self.fields[key] = f"{int(value)}.0" if value % 1 == 0 else str(value) + return self + + def string_field(self, key: str, value: str) -> 'LineBuilder': + """Add a string field to the line protocol.""" + self._validate_key(key, "field") + # Escape quotes and backslashes in string values + escaped_value = value.replace('"', '\\"').replace('\\', '\\\\') + self.fields[key] = f'"{escaped_value}"' + return self + + def time_ns(self, timestamp_ns: int) -> 'LineBuilder': + """Set the timestamp in nanoseconds.""" + self._timestamp_ns = timestamp_ns + return self + + def build(self) -> str: + """Build the line protocol string.""" + # Start with measurement name (escape commas only) + line = self.measurement.replace(',', '\\,') + + # Add tags if present + if self.tags: + tags_str = ','.join( + f"{k}={v}" for k, v in self.tags.items() + ) + line += f",{tags_str}" + + # Add fields (required) + if not self.fields: + raise ValueError("At least one field is required") + + fields_str = ','.join( + f"{k}={v}" for k, v in self.fields.items() + ) + line += f" {fields_str}" + + # Add timestamp if present + if self._timestamp_ns is not None: + line += f" {self._timestamp_ns}" + + return line"#; + +pub fn execute_python_with_batch( + code: &str, + write_batch: &WriteBatch, + schema: Arc<DatabaseSchema>, + catalog: Arc<Catalog>, + args: Option<HashMap<String, String>>, +) -> PyResult<PluginReturnState> { + Python::with_gil(|py| { + // import the LineBuilder for use in the python code + let globals = PyDict::new(py); + + py.run( + &CString::new(LINE_BUILDER_CODE).unwrap(), + Some(&globals), + None, + )?; + + // convert the write batch into a python object + let mut table_batches = Vec::with_capacity(write_batch.table_chunks.len()); + + for (table_id, table_chunks) in &write_batch.table_chunks { + let table_def = schema.tables.get(table_id).unwrap(); + + let dict = PyDict::new(py); + dict.set_item("table_name", table_def.table_name.as_ref()) + .unwrap(); + + let mut rows: Vec<PyObject> = Vec::new(); + for chunk in table_chunks.chunk_time_to_chunk.values() { + for row in &chunk.rows { + let py_row = PyDict::new(py); + py_row.set_item("time", row.time).unwrap(); + let mut fields = Vec::with_capacity(row.fields.len()); + for field in &row.fields { + let field_name = table_def.column_id_to_name(&field.id).unwrap(); + if field_name.as_ref() == "time" { + continue; + } + let py_field = PyDict::new(py); + py_field.set_item("name", field_name.as_ref()).unwrap(); + + match &field.value { + FieldData::String(s) => { + py_field.set_item("value", s.as_str()).unwrap(); + } + FieldData::Integer(i) => { + py_field.set_item("value", i).unwrap(); + } + FieldData::UInteger(u) => { + py_field.set_item("value", u).unwrap(); + } + FieldData::Float(f) => { + py_field.set_item("value", f).unwrap(); + } + FieldData::Boolean(b) => { + py_field.set_item("value", b).unwrap(); + } + FieldData::Tag(t) => { + py_field.set_item("value", t.as_str()).unwrap(); + } + FieldData::Key(k) => { + py_field.set_item("value", k.as_str()).unwrap(); + } + FieldData::Timestamp(_) => { + // return an error, this shouldn't happen + return Err(PyValueError::new_err( + "Timestamps should be in the time field", + )); + } + }; + + fields.push(py_field.unbind()); + } + let fields = PyList::new(py, fields).unwrap(); + py_row.set_item("fields", fields.unbind()).unwrap(); + + rows.push(py_row.into()); + } + } + + let rows = PyList::new(py, rows).unwrap(); + + dict.set_item("rows", rows.unbind()).unwrap(); + table_batches.push(dict); + } + + let py_batches = PyList::new(py, table_batches).unwrap(); + + let api = PyPluginCallApi { + _schema: schema, + _catalog: catalog, + return_state: Default::default(), + }; + let return_state = Arc::clone(&api.return_state); + let local_api = api.into_pyobject(py)?; + + // turn args into an optional dict to pass into python + let args = args.map(|args| { + let dict = PyDict::new(py); + for (key, value) in args { + dict.set_item(key, value).unwrap(); + } + dict + }); + + // run the code and get the python function to call + py.run(&CString::new(code).unwrap(), Some(&globals), None)?; + let py_func = py.eval( + &CString::new(PROCESS_WRITES_CALL_SITE).unwrap(), + Some(&globals), + None, + )?; + py_func.call1((local_api, py_batches.unbind(), args))?; + + // swap with an empty return state to avoid cloning + let empty_return_state = PluginReturnState::default(); + let ret = std::mem::replace(&mut *return_state.lock(), empty_return_state); + + Ok(ret) + }) +} + // Module initialization #[pymodule] fn influxdb3_py_api(m: &Bound<'_, PyModule>) -> PyResult<()> { diff --git a/influxdb3_server/Cargo.toml b/influxdb3_server/Cargo.toml index ffd3fa8b2f..c1aa58fb96 100644 --- a/influxdb3_server/Cargo.toml +++ b/influxdb3_server/Cargo.toml @@ -32,6 +32,7 @@ tracker.workspace = true # Local Deps influxdb3_cache = { path = "../influxdb3_cache" } influxdb3_catalog = { path = "../influxdb3_catalog" } +influxdb3_client = { path = "../influxdb3_client" } influxdb3_id = { path = "../influxdb3_id" } influxdb3_process = { path = "../influxdb3_process", default-features = false } influxdb3_wal = { path = "../influxdb3_wal"} @@ -75,6 +76,15 @@ tonic.workspace = true tower.workspace = true unicode-segmentation.workspace = true +[dependencies.pyo3] +version = "0.23.3" +# this is necessary to automatically initialize the Python interpreter +features = ["auto-initialize"] +optional = true + +[features] +system-py = ["pyo3"] + [dev-dependencies] # Core Crates parquet.workspace = true diff --git a/influxdb3_server/src/http.rs b/influxdb3_server/src/http.rs index 7926eedfc0..3cc5151949 100644 --- a/influxdb3_server/src/http.rs +++ b/influxdb3_server/src/http.rs @@ -207,6 +207,9 @@ pub enum Error { #[error(transparent)] Catalog(#[from] CatalogError), + + #[error("Python plugins not enabled on this server")] + PythonPluginsNotEnabled, } #[derive(Debug, Error)] @@ -1098,6 +1101,31 @@ where .unwrap()) } + /// Endpoint for testing a plugin that will be trigger on WAL writes. + #[cfg(feature = "system-py")] + async fn test_processing_engine_wal_plugin( + &self, + req: Request<Body>, + ) -> Result<Response<Body>> { + let request: influxdb3_client::plugin_development::WalPluginTestRequest = + self.read_body_json(req).await?; + + let output = self.write_buffer.test_wal_plugin(request).await?; + let body = serde_json::to_string(&output)?; + + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from(body))?) + } + + #[cfg(not(feature = "system-py"))] + async fn test_processing_engine_wal_plugin( + &self, + _req: Request<Body>, + ) -> Result<Response<Body>> { + Err(Error::PythonPluginsNotEnabled) + } + async fn delete_database(&self, req: Request<Body>) -> Result<Response<Body>> { let query = req.uri().query().unwrap_or(""); let delete_req = serde_urlencoded::from_str::<DeleteDatabaseRequest>(query)?; @@ -1686,6 +1714,9 @@ pub(crate) async fn route_request<T: TimeProvider>( (Method::POST, "/api/v3/configure/table") => http_server.create_table(req).await, // TODO: make table delete to use path param (DELETE db/foodb/table/bar) (Method::DELETE, "/api/v3/configure/table") => http_server.delete_table(req).await, + (Method::POST, "/api/v3/plugin_test/wal") => { + http_server.test_processing_engine_wal_plugin(req).await + } _ => { let body = Body::from("not found"); Ok(Response::builder() diff --git a/influxdb3_server/src/lib.rs b/influxdb3_server/src/lib.rs index 732b9991d5..ecb2bd9659 100644 --- a/influxdb3_server/src/lib.rs +++ b/influxdb3_server/src/lib.rs @@ -816,6 +816,7 @@ mod tests { wal_config: WalConfig::test_config(), parquet_cache: Some(parquet_cache), metric_registry: Arc::clone(&metrics), + plugin_dir: None, }, ) .await diff --git a/influxdb3_server/src/query_executor/mod.rs b/influxdb3_server/src/query_executor/mod.rs index cc53c10c2e..1684b1f2c3 100644 --- a/influxdb3_server/src/query_executor/mod.rs +++ b/influxdb3_server/src/query_executor/mod.rs @@ -701,6 +701,7 @@ mod tests { }, parquet_cache: Some(parquet_cache), metric_registry: Default::default(), + plugin_dir: None, }) .await .unwrap(); diff --git a/influxdb3_write/Cargo.toml b/influxdb3_write/Cargo.toml index 6d0fa9b45c..22a7f6ee17 100644 --- a/influxdb3_write/Cargo.toml +++ b/influxdb3_write/Cargo.toml @@ -26,6 +26,7 @@ schema.workspace = true # Local deps influxdb3_cache = { path = "../influxdb3_cache" } influxdb3_catalog = { path = "../influxdb3_catalog" } +influxdb3_client = { path = "../influxdb3_client" } influxdb3_id = { path = "../influxdb3_id" } influxdb3_test_helpers = { path = "../influxdb3_test_helpers" } influxdb3_wal = { path = "../influxdb3_wal" } diff --git a/influxdb3_write/src/write_buffer/mod.rs b/influxdb3_write/src/write_buffer/mod.rs index cd6602a6bf..9e844206b3 100644 --- a/influxdb3_write/src/write_buffer/mod.rs +++ b/influxdb3_write/src/write_buffer/mod.rs @@ -17,6 +17,8 @@ use crate::{ BufferedWriteRequest, Bufferer, ChunkContainer, LastCacheManager, MetaCacheManager, ParquetFile, PersistedSnapshot, Precision, WriteBuffer, WriteLineError, }; +#[cfg(feature = "system-py")] +use anyhow::Context; use async_trait::async_trait; use data_types::{ ChunkId, ChunkOrder, ColumnType, NamespaceName, NamespaceNameError, PartitionHashId, @@ -57,6 +59,7 @@ use parquet_file::storage::ParquetExecInput; use plugins::ProcessingEngineManager; use queryable_buffer::QueryableBufferArgs; use schema::Schema; +use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; use thiserror::Error; @@ -65,6 +68,7 @@ use tokio::sync::watch::Receiver; #[cfg(feature = "system-py")] use crate::write_buffer::plugins::PluginContext; +use influxdb3_client::plugin_development::{WalPluginTestRequest, WalPluginTestResponse}; #[derive(Debug, Error)] pub enum Error { @@ -128,6 +132,12 @@ pub enum Error { #[error("error in metadata cache: {0}")] MetaCacheError(#[from] meta_cache::ProviderError), + + #[error("error: {0}")] + AnyhowError(#[from] anyhow::Error), + + #[error("reading plugin file: {0}")] + ReadPluginError(#[from] std::io::Error), } pub type Result<T, E = Error> = std::result::Result<T, E>; @@ -155,6 +165,8 @@ pub struct WriteBufferImpl { metrics: WriteMetrics, meta_cache: Arc<MetaCacheProvider>, last_cache: Arc<LastCacheProvider>, + #[allow(dead_code)] + plugin_dir: Option<PathBuf>, } /// The maximum number of snapshots to load on start @@ -171,6 +183,7 @@ pub struct WriteBufferImplArgs { pub wal_config: WalConfig, pub parquet_cache: Option<Arc<dyn ParquetCacheOracle>>, pub metric_registry: Arc<Registry>, + pub plugin_dir: Option<PathBuf>, } impl WriteBufferImpl { @@ -185,6 +198,7 @@ impl WriteBufferImpl { wal_config, parquet_cache, metric_registry, + plugin_dir, }: WriteBufferImplArgs, ) -> Result<Arc<Self>> { // load snapshots and replay the wal into the in memory buffer @@ -243,6 +257,7 @@ impl WriteBufferImpl { persisted_files, buffer: queryable_buffer, metrics: WriteMetrics::new(&metric_registry), + plugin_dir, }); let write_buffer: Arc<dyn WriteBuffer> = result.clone(); let triggers = result.catalog().triggers(); @@ -362,6 +377,13 @@ impl WriteBufferImpl { Ok(chunks) } + + #[cfg(feature = "system-py")] + fn read_plugin_code(&self, name: &str) -> Result<String> { + let plugin_dir = self.plugin_dir.clone().context("plugin dir not set")?; + let path = plugin_dir.join(format!("{}.py", name)); + Ok(std::fs::read_to_string(path)?) + } } pub fn parquet_chunk_from_file( @@ -1081,6 +1103,28 @@ impl ProcessingEngineManager for WriteBufferImpl { .await?; Ok(()) } + + #[cfg_attr(not(feature = "system-py"), allow(unused))] + async fn test_wal_plugin( + &self, + request: WalPluginTestRequest, + ) -> crate::Result<WalPluginTestResponse, Error> { + #[cfg(feature = "system-py")] + { + // create a copy of the catalog so we don't modify the original + let catalog = Arc::new(Catalog::from_inner(self.catalog.clone_inner())); + let now = self.time_provider.now(); + + let code = self.read_plugin_code(&request.name)?; + + return Ok(plugins::run_test_wal_plugin(now, catalog, code, request).unwrap()); + } + + #[cfg(not(feature = "system-py"))] + Err(Error::AnyhowError(anyhow::anyhow!( + "system-py feature not enabled" + ))) + } } #[allow(unused)] @@ -1171,6 +1215,7 @@ mod tests { wal_config: WalConfig::test_config(), parquet_cache: Some(Arc::clone(&parquet_cache)), metric_registry: Default::default(), + plugin_dir: None, }) .await .unwrap(); @@ -1255,6 +1300,7 @@ mod tests { }, parquet_cache: Some(Arc::clone(&parquet_cache)), metric_registry: Default::default(), + plugin_dir: None, }) .await .unwrap(); @@ -1323,6 +1369,7 @@ mod tests { }, parquet_cache: wbuf.parquet_cache.clone(), metric_registry: Default::default(), + plugin_dir: None, }) .await .unwrap() @@ -1550,6 +1597,7 @@ mod tests { }, parquet_cache: write_buffer.parquet_cache.clone(), metric_registry: Default::default(), + plugin_dir: None, }) .await .unwrap(); @@ -2814,6 +2862,7 @@ mod tests { wal_config, parquet_cache, metric_registry: Arc::clone(&metric_registry), + plugin_dir: None, }) .await .unwrap(); diff --git a/influxdb3_write/src/write_buffer/plugins.rs b/influxdb3_write/src/write_buffer/plugins.rs index f75c23df3f..46c24bdffb 100644 --- a/influxdb3_write/src/write_buffer/plugins.rs +++ b/influxdb3_write/src/write_buffer/plugins.rs @@ -1,5 +1,6 @@ use crate::write_buffer::PluginEvent; use crate::{write_buffer, WriteBuffer}; +use influxdb3_client::plugin_development::{WalPluginTestRequest, WalPluginTestResponse}; use influxdb3_wal::{PluginType, TriggerDefinition, TriggerSpecificationDefinition}; use std::fmt::Debug; use std::sync::Arc; @@ -20,6 +21,9 @@ pub enum Error { #[error("failed to send shutdown message back")] FailedToShutdown, + + #[error(transparent)] + AnyhowError(#[from] anyhow::Error), } /// `[ProcessingEngineManager]` is used to interact with the processing engine, @@ -79,6 +83,11 @@ pub trait ProcessingEngineManager: Debug + Send + Sync + 'static { db_name: &str, trigger_name: &str, ) -> Result<(), write_buffer::Error>; + + async fn test_wal_plugin( + &self, + request: WalPluginTestRequest, + ) -> crate::Result<WalPluginTestResponse, write_buffer::Error>; } #[cfg(feature = "system-py")] @@ -216,3 +225,286 @@ mod python_plugin { } } } + +#[cfg(feature = "system-py")] +pub(crate) fn run_test_wal_plugin( + now_time: iox_time::Time, + catalog: Arc<influxdb3_catalog::catalog::Catalog>, + code: String, + request: WalPluginTestRequest, +) -> Result<WalPluginTestResponse, Error> { + use crate::write_buffer::validator::WriteValidator; + use crate::Precision; + use data_types::NamespaceName; + use influxdb3_wal::Gen1Duration; + + const TEST_NAMESPACE: &str = "_testdb"; + + let namespace = NamespaceName::new(TEST_NAMESPACE).unwrap(); + // parse the lp into a write batch + let validator = WriteValidator::initialize( + namespace.clone(), + Arc::clone(&catalog), + now_time.timestamp_nanos(), + )?; + let data = validator.v1_parse_lines_and_update_schema( + &request.input_lp.unwrap(), + false, + now_time, + Precision::Nanosecond, + )?; + let data = data.convert_lines_to_buffer(Gen1Duration::new_1m()); + let db = catalog.db_schema("_testdb").unwrap(); + + let plugin_return_state = influxdb3_py_api::system_py::execute_python_with_batch( + &code, + &data.valid_data, + db, + Arc::clone(&catalog), + request.input_arguments, + )?; + + // validate the generated output lines + let mut errors = Vec::new(); + + // first for the write back database + let validator = + WriteValidator::initialize(namespace, Arc::clone(&catalog), now_time.timestamp_nanos())?; + let lp = plugin_return_state.write_back_lines.join("\n"); + match validator.v1_parse_lines_and_update_schema(&lp, false, now_time, Precision::Nanosecond) { + Ok(data) => { + let data = data.convert_lines_to_buffer(Gen1Duration::new_1m()); + + for err in data.errors { + errors.push(format!("{:?}", err)); + } + } + Err(write_buffer::Error::ParseError(e)) => { + errors.push(format!("line protocol parse error on write back: {:?}", e)); + } + Err(e) => { + errors.push(format!( + "Failed to validate output lines on write back: {}", + e + )); + } + } + + // now for any other dbs that received writes + for (db_name, lines) in &plugin_return_state.write_db_lines { + let namespace = match NamespaceName::new(db_name.to_string()) { + Ok(namespace) => namespace, + Err(e) => { + errors.push(format!("database name {} is invalid: {}", db_name, e)); + continue; + } + }; + + let validator = WriteValidator::initialize( + namespace, + Arc::clone(&catalog), + now_time.timestamp_nanos(), + )?; + let lp = lines.join("\n"); + match validator.v1_parse_lines_and_update_schema( + &lp, + false, + now_time, + Precision::Nanosecond, + ) { + Ok(data) => { + let data = data.convert_lines_to_buffer(Gen1Duration::new_1m()); + for err in data.errors { + errors.push(format!("{:?}", err)); + } + } + Err(write_buffer::Error::ParseError(e)) => { + errors.push(format!( + "line protocol parse error on write to db {}: {:?}", + db_name, e + )); + } + Err(e) => { + errors.push(format!( + "Failed to validate output lines to db {}: {}", + db_name, e + )); + } + } + } + + let log_lines = plugin_return_state.log(); + let mut database_writes = plugin_return_state.write_db_lines; + database_writes.insert("_testdb".to_string(), plugin_return_state.write_back_lines); + + Ok(WalPluginTestResponse { + log_lines, + database_writes, + errors, + }) +} + +#[cfg(feature = "system-py")] +#[cfg(test)] +mod tests { + use super::*; + use crate::write_buffer::validator::WriteValidator; + use crate::Precision; + use data_types::NamespaceName; + use influxdb3_catalog::catalog::Catalog; + use iox_time::Time; + use std::collections::HashMap; + + #[test] + fn test_wal_plugin() { + let now = Time::from_timestamp_nanos(1); + let catalog = Catalog::new("foo".into(), "bar".into()); + let code = r#" +def process_writes(influxdb3_local, table_batches, args=None): + influxdb3_local.info("arg1: " + args["arg1"]) + + for table_batch in table_batches: + influxdb3_local.info("table: " + table_batch["table_name"]) + + for row in table_batch["rows"]: + influxdb3_local.info("row: " + str(row)) + + line = LineBuilder("some_table")\ + .tag("tag1", "tag1_value")\ + .tag("tag2", "tag2_value")\ + .int64_field("field1", 1)\ + .float64_field("field2", 2.0)\ + .string_field("field3", "number three") + influxdb3_local.write(line) + + other_line = LineBuilder("other_table") + other_line.int64_field("other_field", 1) + other_line.float64_field("other_field2", 3.14) + other_line.time_ns(1302) + + influxdb3_local.write_to_db("mytestdb", other_line) + + influxdb3_local.info("done")"#; + + let lp = [ + "cpu,host=A,region=west usage=1i,system=23.2 100", + "mem,host=B user=43.1 120", + ] + .join("\n"); + + let request = WalPluginTestRequest { + name: "test".into(), + input_lp: Some(lp), + input_file: None, + input_arguments: Some(HashMap::from([( + String::from("arg1"), + String::from("val1"), + )])), + }; + + let response = + run_test_wal_plugin(now, Arc::new(catalog), code.to_string(), request).unwrap(); + + let expected_log_lines = vec![ + "INFO: arg1: val1", + "INFO: table: cpu", + "INFO: row: {'time': 100, 'fields': [{'name': 'host', 'value': 'A'}, {'name': 'region', 'value': 'west'}, {'name': 'usage', 'value': 1}, {'name': 'system', 'value': 23.2}]}", + "INFO: table: mem", "INFO: row: {'time': 120, 'fields': [{'name': 'host', 'value': 'B'}, {'name': 'user', 'value': 43.1}]}", + "INFO: done", + ].into_iter().map(|s| s.to_string()).collect::<Vec<_>>(); + assert_eq!(response.log_lines, expected_log_lines); + + let expected_testdb_lines = vec![ + "some_table,tag1=tag1_value,tag2=tag2_value field1=1i,field2=2.0,field3=\"number three\"" + .to_string(), + ]; + assert_eq!( + response.database_writes.get("_testdb").unwrap(), + &expected_testdb_lines + ); + let expected_mytestdb_lines = + vec!["other_table other_field=1i,other_field2=3.14 1302".to_string()]; + assert_eq!( + response.database_writes.get("mytestdb").unwrap(), + &expected_mytestdb_lines + ); + } + + #[test] + fn test_wal_plugin_invalid_lines() { + // set up a catalog and write some data into it to create a schema + let now = Time::from_timestamp_nanos(1); + let catalog = Arc::new(Catalog::new("foo".into(), "bar".into())); + let namespace = NamespaceName::new("foodb").unwrap(); + let validator = WriteValidator::initialize( + namespace.clone(), + Arc::clone(&catalog), + now.timestamp_nanos(), + ) + .unwrap(); + let _data = validator + .v1_parse_lines_and_update_schema( + "cpu,host=A f1=10i 100", + false, + now, + Precision::Nanosecond, + ) + .unwrap(); + + let code = r#" +def process_writes(influxdb3_local, table_batches, args=None): + line = LineBuilder("some_table")\ + .tag("tag1", "tag1_value")\ + .tag("tag2", "tag2_value")\ + .int64_field("field1", 1)\ + .float64_field("field2", 2.0)\ + .string_field("field3", "number three") + influxdb3_local.write(line) + + cpu_valid = LineBuilder("cpu")\ + .tag("host", "A")\ + .int64_field("f1", 10) + influxdb3_local.write_to_db("foodb", cpu_valid) + + cpu_invalid = LineBuilder("cpu")\ + .tag("host", "A")\ + .string_field("f1", "not_an_int") + influxdb3_local.write_to_db("foodb", cpu_invalid)"#; + + let lp = ["mem,host=B user=43.1 120"].join("\n"); + + let request = WalPluginTestRequest { + name: "test".into(), + input_lp: Some(lp), + input_file: None, + input_arguments: None, + }; + + let reesponse = + run_test_wal_plugin(now, Arc::clone(&catalog), code.to_string(), request).unwrap(); + + let expected_testdb_lines = vec![ + "some_table,tag1=tag1_value,tag2=tag2_value field1=1i,field2=2.0,field3=\"number three\"" + .to_string(), + ]; + assert_eq!( + reesponse.database_writes.get("_testdb").unwrap(), + &expected_testdb_lines + ); + + // the lines should still come through in the output because that's what Python sent + let expected_foodb_lines = vec![ + "cpu,host=A f1=10i".to_string(), + "cpu,host=A f1=\"not_an_int\"".to_string(), + ]; + assert_eq!( + reesponse.database_writes.get("foodb").unwrap(), + &expected_foodb_lines + ); + + // there should be an error for the invalid line + assert_eq!(reesponse.errors.len(), 1); + let expected_error = "line protocol parse error on write to db foodb: WriteLineError { original_line: \"cpu,host=A f1=not_an_int\", line_number: 2, error_message: \"invalid field value in line protocol for field 'f1' on line 1: expected type iox::column_type::field::integer, but got iox::column_type::field::string\" }"; + assert_eq!(reesponse.errors[0], expected_error); + } +}
73f38077b626ff818c93c402e743af00474173ee
Nga Tran
2023-08-01 10:28:30
add sort_key_ids as array of bigints into catalog partition (#8375)
* feat: add sort_key_ids as array of bigints into catalog partition * chore: add comments * chore: remove comments to avoid changing them in the future due to checksum requirement ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
feat: add sort_key_ids as array of bigints into catalog partition (#8375) * feat: add sort_key_ids as array of bigints into catalog partition * chore: add comments * chore: remove comments to avoid changing them in the future due to checksum requirement --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/iox_catalog/migrations/20230731143030_add_column_sort_key_array_ids.sql b/iox_catalog/migrations/20230731143030_add_column_sort_key_array_ids.sql new file mode 100644 index 0000000000..a1468fb6be --- /dev/null +++ b/iox_catalog/migrations/20230731143030_add_column_sort_key_array_ids.sql @@ -0,0 +1,3 @@ +-- https://github.com/influxdata/influxdb_iox/issues/6401 + +ALTER TABLE partition ADD COLUMN IF NOT EXISTS sort_key_ids BIGINT[];
134ff2ef8375a500ce8f16027fb963fcb81c2a55
Andrew Lamb
2023-04-13 13:25:24
update DataFusion pin (right before arrow 37 update) (#7540)
* chore: update DataFusion pin * refactor: Update for deprecated API * chore: Run cargo hakari tasks ---------
Co-authored-by: CircleCI[bot] <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
chore: update DataFusion pin (right before arrow 37 update) (#7540) * chore: update DataFusion pin * refactor: Update for deprecated API * chore: Run cargo hakari tasks --------- Co-authored-by: CircleCI[bot] <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/Cargo.lock b/Cargo.lock index 27866e0417..fc3f01f7ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1443,10 +1443,12 @@ dependencies = [ [[package]] name = "datafusion" version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=34c9bce0146e0ebacfb10334ee7aef13fc8cc94f#34c9bce0146e0ebacfb10334ee7aef13fc8cc94f" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f033d9c3b2bb038777fdfa2f539e7130eb926cdc#f033d9c3b2bb038777fdfa2f539e7130eb926cdc" dependencies = [ "ahash 0.8.3", "arrow", + "arrow-array", + "arrow-schema", "async-compression", "async-trait", "bytes", @@ -1490,7 +1492,7 @@ dependencies = [ [[package]] name = "datafusion-common" version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=34c9bce0146e0ebacfb10334ee7aef13fc8cc94f#34c9bce0146e0ebacfb10334ee7aef13fc8cc94f" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f033d9c3b2bb038777fdfa2f539e7130eb926cdc#f033d9c3b2bb038777fdfa2f539e7130eb926cdc" dependencies = [ "arrow", "arrow-array", @@ -1504,7 +1506,7 @@ dependencies = [ [[package]] name = "datafusion-execution" version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=34c9bce0146e0ebacfb10334ee7aef13fc8cc94f#34c9bce0146e0ebacfb10334ee7aef13fc8cc94f" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f033d9c3b2bb038777fdfa2f539e7130eb926cdc#f033d9c3b2bb038777fdfa2f539e7130eb926cdc" dependencies = [ "dashmap", "datafusion-common", @@ -1521,7 +1523,7 @@ dependencies = [ [[package]] name = "datafusion-expr" version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=34c9bce0146e0ebacfb10334ee7aef13fc8cc94f#34c9bce0146e0ebacfb10334ee7aef13fc8cc94f" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f033d9c3b2bb038777fdfa2f539e7130eb926cdc#f033d9c3b2bb038777fdfa2f539e7130eb926cdc" dependencies = [ "ahash 0.8.3", "arrow", @@ -1532,7 +1534,7 @@ dependencies = [ [[package]] name = "datafusion-optimizer" version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=34c9bce0146e0ebacfb10334ee7aef13fc8cc94f#34c9bce0146e0ebacfb10334ee7aef13fc8cc94f" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f033d9c3b2bb038777fdfa2f539e7130eb926cdc#f033d9c3b2bb038777fdfa2f539e7130eb926cdc" dependencies = [ "arrow", "async-trait", @@ -1549,7 +1551,7 @@ dependencies = [ [[package]] name = "datafusion-physical-expr" version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=34c9bce0146e0ebacfb10334ee7aef13fc8cc94f#34c9bce0146e0ebacfb10334ee7aef13fc8cc94f" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f033d9c3b2bb038777fdfa2f539e7130eb926cdc#f033d9c3b2bb038777fdfa2f539e7130eb926cdc" dependencies = [ "ahash 0.8.3", "arrow", @@ -1580,7 +1582,7 @@ dependencies = [ [[package]] name = "datafusion-proto" version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=34c9bce0146e0ebacfb10334ee7aef13fc8cc94f#34c9bce0146e0ebacfb10334ee7aef13fc8cc94f" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f033d9c3b2bb038777fdfa2f539e7130eb926cdc#f033d9c3b2bb038777fdfa2f539e7130eb926cdc" dependencies = [ "arrow", "chrono", @@ -1594,7 +1596,7 @@ dependencies = [ [[package]] name = "datafusion-row" version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=34c9bce0146e0ebacfb10334ee7aef13fc8cc94f#34c9bce0146e0ebacfb10334ee7aef13fc8cc94f" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f033d9c3b2bb038777fdfa2f539e7130eb926cdc#f033d9c3b2bb038777fdfa2f539e7130eb926cdc" dependencies = [ "arrow", "datafusion-common", @@ -1605,7 +1607,7 @@ dependencies = [ [[package]] name = "datafusion-sql" version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=34c9bce0146e0ebacfb10334ee7aef13fc8cc94f#34c9bce0146e0ebacfb10334ee7aef13fc8cc94f" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=f033d9c3b2bb038777fdfa2f539e7130eb926cdc#f033d9c3b2bb038777fdfa2f539e7130eb926cdc" dependencies = [ "arrow", "arrow-schema", diff --git a/Cargo.toml b/Cargo.toml index fabff9df69..27fbc91c27 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,8 +116,8 @@ license = "MIT OR Apache-2.0" [workspace.dependencies] arrow = { version = "36.0.0" } arrow-flight = { version = "36.0.0" } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="34c9bce0146e0ebacfb10334ee7aef13fc8cc94f", default-features = false } -datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="34c9bce0146e0ebacfb10334ee7aef13fc8cc94f" } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="f033d9c3b2bb038777fdfa2f539e7130eb926cdc", default-features = false } +datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="f033d9c3b2bb038777fdfa2f539e7130eb926cdc" } hashbrown = { version = "0.13.2" } parquet = { version = "36.0.0" } tonic = { version = "0.8.3", features = ["tls", "tls-webpki-roots"] } diff --git a/iox_query/src/physical_optimizer/projection_pushdown.rs b/iox_query/src/physical_optimizer/projection_pushdown.rs index e15c0a8569..4fcf6ab550 100644 --- a/iox_query/src/physical_optimizer/projection_pushdown.rs +++ b/iox_query/src/physical_optimizer/projection_pushdown.rs @@ -169,12 +169,14 @@ impl PhysicalOptimizerRule for ProjectionPushdown { &column_names, Arc::clone(child_sort.input()), |plan| { - Ok(Arc::new(SortExec::new_with_partitioning( - reassign_sort_exprs_columns(child_sort.expr(), &plan.schema())?, - plan, - child_sort.preserve_partitioning(), - child_sort.fetch(), - ))) + Ok(Arc::new( + SortExec::new( + reassign_sort_exprs_columns(child_sort.expr(), &plan.schema())?, + plan, + ) + .with_preserve_partitioning(child_sort.preserve_partitioning()) + .with_fetch(child_sort.fetch()), + )) }, )?; @@ -930,7 +932,7 @@ mod tests { ProjectionExec::try_new( vec![(expr_col("tag1", &schema), String::from("tag1"))], Arc::new( - SortExec::try_new( + SortExec::new( vec![PhysicalSortExpr { expr: expr_col("tag2", &schema), options: SortOptions { @@ -939,9 +941,8 @@ mod tests { }, }], Arc::new(TestExec::new(schema)), - Some(42), ) - .unwrap(), + .with_fetch(Some(42)), ), ) .unwrap(), @@ -971,18 +972,20 @@ mod tests { let plan = Arc::new( ProjectionExec::try_new( vec![(expr_col("tag1", &schema), String::from("tag1"))], - Arc::new(SortExec::new_with_partitioning( - vec![PhysicalSortExpr { - expr: expr_col("tag2", &schema), - options: SortOptions { - descending: true, - ..Default::default() - }, - }], - Arc::new(TestExec::new_with_partitions(schema, 2)), - true, - Some(42), - )), + Arc::new( + SortExec::new( + vec![PhysicalSortExpr { + expr: expr_col("tag2", &schema), + options: SortOptions { + descending: true, + ..Default::default() + }, + }], + Arc::new(TestExec::new_with_partitions(schema, 2)), + ) + .with_preserve_partitioning(true) + .with_fetch(Some(42)), + ), ) .unwrap(), ); diff --git a/iox_query/src/physical_optimizer/sort/parquet_sortness.rs b/iox_query/src/physical_optimizer/sort/parquet_sortness.rs index fdc2de956e..86a13ef3b0 100644 --- a/iox_query/src/physical_optimizer/sort/parquet_sortness.rs +++ b/iox_query/src/physical_optimizer/sort/parquet_sortness.rs @@ -197,12 +197,8 @@ mod tests { }; let inner = ParquetExec::new(base_config, None, None); let plan = Arc::new( - SortExec::try_new( - ordering(["col2", "col1"], &schema), - Arc::new(inner), - Some(42), - ) - .unwrap(), + SortExec::new(ordering(["col2", "col1"], &schema), Arc::new(inner)) + .with_fetch(Some(42)), ); let opt = ParquetSortness::default(); insta::assert_yaml_snapshot!( @@ -271,12 +267,11 @@ mod tests { infinite_source: false, }; let inner = ParquetExec::new(base_config, None, None); - let plan = Arc::new(SortExec::new_with_partitioning( - ordering(["col2", "col1"], &schema), - Arc::new(inner), - true, - Some(42), - )); + let plan = Arc::new( + SortExec::new(ordering(["col2", "col1"], &schema), Arc::new(inner)) + .with_preserve_partitioning(true) + .with_fetch(Some(42)), + ); assert_unknown_partitioning(plan.output_partitioning(), 2); @@ -315,12 +310,8 @@ mod tests { }; let inner = ParquetExec::new(base_config, None, None); let plan = Arc::new( - SortExec::try_new( - ordering(["col2", "col1"], &schema), - Arc::new(inner), - Some(42), - ) - .unwrap(), + SortExec::new(ordering(["col2", "col1"], &schema), Arc::new(inner)) + .with_fetch(Some(42)), ); let opt = ParquetSortness::default(); insta::assert_yaml_snapshot!( @@ -354,12 +345,8 @@ mod tests { }; let inner = ParquetExec::new(base_config, None, None); let plan = Arc::new( - SortExec::try_new( - ordering(["col2", "col1"], &schema), - Arc::new(inner), - Some(42), - ) - .unwrap(), + SortExec::new(ordering(["col2", "col1"], &schema), Arc::new(inner)) + .with_fetch(Some(42)), ); let opt = ParquetSortness::default(); insta::assert_yaml_snapshot!( @@ -393,12 +380,8 @@ mod tests { }; let inner = ParquetExec::new(base_config, None, None); let plan = Arc::new( - SortExec::try_new( - ordering(["col2", "col1"], &schema), - Arc::new(inner), - Some(42), - ) - .unwrap(), + SortExec::new(ordering(["col2", "col1"], &schema), Arc::new(inner)) + .with_fetch(Some(42)), ); let opt = ParquetSortness::default(); insta::assert_yaml_snapshot!( @@ -432,12 +415,8 @@ mod tests { }; let inner = ParquetExec::new(base_config, None, None); let plan = Arc::new( - SortExec::try_new( - ordering(["col2", "col1"], &schema), - Arc::new(inner), - Some(42), - ) - .unwrap(), + SortExec::new(ordering(["col2", "col1"], &schema), Arc::new(inner)) + .with_fetch(Some(42)), ); let opt = ParquetSortness::default(); let mut config = ConfigOptions::default(); @@ -465,12 +444,8 @@ mod tests { let schema = schema(); let inner = EmptyExec::new(true, Arc::clone(&schema)); let plan = Arc::new( - SortExec::try_new( - ordering(["col2", "col1"], &schema), - Arc::new(inner), - Some(42), - ) - .unwrap(), + SortExec::new(ordering(["col2", "col1"], &schema), Arc::new(inner)) + .with_fetch(Some(42)), ); let opt = ParquetSortness::default(); insta::assert_yaml_snapshot!( @@ -532,12 +507,10 @@ mod tests { infinite_source: false, }; let plan = Arc::new(ParquetExec::new(base_config, None, None)); - let plan = Arc::new( - SortExec::try_new(ordering(["col2", "col1"], &schema), plan, Some(42)).unwrap(), - ); - let plan = Arc::new( - SortExec::try_new(ordering(["col1", "col2"], &schema), plan, Some(42)).unwrap(), - ); + let plan = + Arc::new(SortExec::new(ordering(["col2", "col1"], &schema), plan).with_fetch(Some(42))); + let plan = + Arc::new(SortExec::new(ordering(["col1", "col2"], &schema), plan).with_fetch(Some(42))); let opt = ParquetSortness::default(); insta::assert_yaml_snapshot!( OptimizationTest::new(plan, opt), diff --git a/iox_query/src/physical_optimizer/sort/redundant_sort.rs b/iox_query/src/physical_optimizer/sort/redundant_sort.rs index 4edefebb72..32f380cb0a 100644 --- a/iox_query/src/physical_optimizer/sort/redundant_sort.rs +++ b/iox_query/src/physical_optimizer/sort/redundant_sort.rs @@ -77,8 +77,7 @@ mod tests { None, None, )); - let plan = - Arc::new(SortExec::try_new(sort_expr(schema.as_ref()), input, Some(10)).unwrap()); + let plan = Arc::new(SortExec::new(sort_expr(schema.as_ref()), input).with_fetch(Some(10))); let opt = RedundantSort::default(); insta::assert_yaml_snapshot!( OptimizationTest::new(plan, opt), @@ -114,7 +113,7 @@ mod tests { None, None, )); - let plan = Arc::new(SortExec::try_new(sort_expr, input, Some(10)).unwrap()); + let plan = Arc::new(SortExec::new(sort_expr, input).with_fetch(Some(10))); let opt = RedundantSort::default(); insta::assert_yaml_snapshot!( OptimizationTest::new(plan, opt), diff --git a/iox_query/src/physical_optimizer/sort/sort_pushdown.rs b/iox_query/src/physical_optimizer/sort/sort_pushdown.rs index 59d62d651b..dec82a3f18 100644 --- a/iox_query/src/physical_optimizer/sort/sort_pushdown.rs +++ b/iox_query/src/physical_optimizer/sort/sort_pushdown.rs @@ -38,12 +38,9 @@ impl PhysicalOptimizerRule for SortPushdown { .children() .into_iter() .map(|plan| { - let new_sort_exec = SortExec::new_with_partitioning( - sort_exec.expr().to_vec(), - plan, - true, - sort_exec.fetch(), - ); + let new_sort_exec = SortExec::new(sort_exec.expr().to_vec(), plan) + .with_preserve_partitioning(true) + .with_fetch(sort_exec.fetch()); Arc::new(new_sort_exec) as _ }) .collect::<Vec<_>>(), @@ -90,12 +87,13 @@ mod tests { .map(|_| Arc::new(EmptyExec::new(true, Arc::clone(&schema))) as _) .collect(), )); - let plan = Arc::new(SortExec::new_with_partitioning( - sort_expr(schema.as_ref()), - Arc::new(UnionExec::new(vec![input])), - false, - Some(10), - )); + let plan = Arc::new( + SortExec::new( + sort_expr(schema.as_ref()), + Arc::new(UnionExec::new(vec![input])), + ) + .with_fetch(Some(10)), + ); let opt = SortPushdown::default(); insta::assert_yaml_snapshot!( OptimizationTest::new(plan, opt), @@ -126,12 +124,14 @@ mod tests { .map(|_| Arc::new(EmptyExec::new(true, Arc::clone(&schema))) as _) .collect(), )); - let plan = Arc::new(SortExec::new_with_partitioning( - sort_expr(schema.as_ref()), - Arc::new(UnionExec::new(vec![input])), - true, - Some(10), - )); + let plan = Arc::new( + SortExec::new( + sort_expr(schema.as_ref()), + Arc::new(UnionExec::new(vec![input])), + ) + .with_preserve_partitioning(true) + .with_fetch(Some(10)), + ); let opt = SortPushdown::default(); insta::assert_yaml_snapshot!( OptimizationTest::new(plan, opt), @@ -167,12 +167,11 @@ mod tests { FilterExec::try_new(Arc::new(Literal::new(ScalarValue::from(false))), plan).unwrap(), ); let plan = Arc::new(UnionExec::new(vec![plan])); - let plan = Arc::new(SortExec::new_with_partitioning( - sort_expr(schema.as_ref()), - plan, - true, - Some(10), - )); + let plan = Arc::new( + SortExec::new(sort_expr(schema.as_ref()), plan) + .with_preserve_partitioning(true) + .with_fetch(Some(10)), + ); assert_unknown_partitioning(plan.output_partitioning(), 2); diff --git a/iox_query/src/provider.rs b/iox_query/src/provider.rs index 44e512e145..f289cfb227 100644 --- a/iox_query/src/provider.rs +++ b/iox_query/src/provider.rs @@ -312,7 +312,7 @@ impl TableProvider for ChunkTableProvider { // Sort after filter to reduce potential work. let plan = if let Some(output_sort_key) = self.output_sort_key.as_ref() { let sort_exprs = arrow_sort_key_exprs(output_sort_key, &self.arrow_schema()); - Arc::new(SortExec::try_new(sort_exprs, plan, None)?) + Arc::new(SortExec::new(sort_exprs, plan)) } else { plan }; diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 43ea141624..e9a8215463 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -30,9 +30,9 @@ bytes = { version = "1" } chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "serde"] } crossbeam-utils = { version = "0.8" } crypto-common = { version = "0.1", default-features = false, features = ["std"] } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34c9bce0146e0ebacfb10334ee7aef13fc8cc94f" } -datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34c9bce0146e0ebacfb10334ee7aef13fc8cc94f", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] } -datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "34c9bce0146e0ebacfb10334ee7aef13fc8cc94f", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "f033d9c3b2bb038777fdfa2f539e7130eb926cdc" } +datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "f033d9c3b2bb038777fdfa2f539e7130eb926cdc", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] } +datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "f033d9c3b2bb038777fdfa2f539e7130eb926cdc", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] } digest = { version = "0.10", features = ["mac", "std"] } either = { version = "1" } fixedbitset = { version = "0.4" }
b6e8b9cc5ef952aead9a58052e2cbeca62662429
Fraser Savage
2023-09-04 16:14:57
Clearly document why `UnableToReadNextOps` is returned
Nothing currently relies on this public error variant, but for automatic recovery we need the WAL to provide a contract that this error is returned only when the next operation in the WAL is unsalvageable.
null
docs(wal): Clearly document why `UnableToReadNextOps` is returned Nothing currently relies on this public error variant, but for automatic recovery we need the WAL to provide a contract that this error is returned only when the next operation in the WAL is unsalvageable.
diff --git a/wal/src/lib.rs b/wal/src/lib.rs index ae0bfe8848..46e88d957c 100644 --- a/wal/src/lib.rs +++ b/wal/src/lib.rs @@ -109,6 +109,8 @@ pub enum Error { source: blocking::ReaderError, }, + /// This error indicates that the next entry cannot be read from the + /// segment file. UnableToReadNextOps { source: blocking::ReaderError, },
f0832818eed5ad169227ada4fa5cc9c494e00215
Dom Dwyer
2023-06-01 17:44:30
invalid strftime partition template
An integration test asserting that a router returns an error when attempting to partition a write with an invalid strftime partition formatter, rather than panicking.
null
test(router): invalid strftime partition template An integration test asserting that a router returns an error when attempting to partition a write with an invalid strftime partition formatter, rather than panicking.
diff --git a/router/tests/grpc.rs b/router/tests/grpc.rs index ed75755aff..f015b543c1 100644 --- a/router/tests/grpc.rs +++ b/router/tests/grpc.rs @@ -14,8 +14,11 @@ use generated_types::influxdata::{ use hyper::StatusCode; use iox_catalog::interface::{Error as CatalogError, SoftDeletedRows}; use iox_time::{SystemProvider, TimeProvider}; +use mutable_batch::PartitionKeyError; use router::{ - dml_handlers::{CachedServiceProtectionLimit, DmlError, RetentionError, SchemaError}, + dml_handlers::{ + CachedServiceProtectionLimit, DmlError, PartitionError, RetentionError, SchemaError, + }, namespace_resolver::{self, NamespaceCreationError}, server::http::Error, }; @@ -940,6 +943,45 @@ async fn test_table_create() { }) } +#[tokio::test] +async fn test_invalid_strftime_partition_template() { + // Initialise a TestContext without a namespace autocreation policy. + let ctx = TestContextBuilder::default().build().await; + + // Explicitly create a namespace with a custom partition template. + let req = CreateNamespaceRequest { + name: "bananas_test".to_string(), + retention_period_ns: None, + partition_template: Some(PartitionTemplate { + parts: vec![TemplatePart { + part: Some(template_part::Part::TimeFormat("%3F".into())), + }], + }), + }; + ctx.grpc_delegate() + .namespace_service() + .create_namespace(Request::new(req)) + .await + .unwrap() + .into_inner() + .namespace + .unwrap(); + + // Write, which implicitly creates the table with the namespace's custom partition template + let lp = "plantains,tag1=A,tag2=B val=42i".to_string(); + let got = ctx.write_lp("bananas", "test", lp).await; + assert_matches!( + got, + Err(Error::DmlHandler(DmlError::Partition( + PartitionError::Partitioner(PartitionKeyError::InvalidStrftime(_)) + ))) + ); + + // Check the ingester did not observe any writes. + let writes = ctx.write_calls(); + assert!(writes.is_empty()); +} + #[tokio::test] async fn test_namespace_partition_template_implicit_table_creation() { // Initialise a TestContext without a namespace autocreation policy.
f5ae1bc64e080bdac84e748dda0f1947db5a0bdf
Jamie Strandboge
2025-02-18 09:58:17
detect python runtime for PipManager and venv setup (#26029)
* fix: consider PYTHONHOME when detecting PipManager * fix: consider VIRTUAL_ENV when setting path to python from venv
null
fix: detect python runtime for PipManager and venv setup (#26029) * fix: consider PYTHONHOME when detecting PipManager * fix: consider VIRTUAL_ENV when setting path to python from venv
diff --git a/influxdb3/src/commands/serve.rs b/influxdb3/src/commands/serve.rs index d4d102d8bd..a62113f88f 100644 --- a/influxdb3/src/commands/serve.rs +++ b/influxdb3/src/commands/serve.rs @@ -46,9 +46,13 @@ use object_store::ObjectStore; use observability_deps::tracing::*; use panic_logging::SendPanicsToTracing; use parquet_file::storage::{ParquetStorage, StorageId}; +use std::env; use std::process::Command; use std::{num::NonZeroUsize, sync::Arc, time::Duration}; -use std::{path::Path, str::FromStr}; +use std::{ + path::{Path, PathBuf}, + str::FromStr, +}; use thiserror::Error; use tokio::net::TcpListener; use tokio::time::Instant; @@ -669,7 +673,29 @@ fn determine_package_manager() -> Arc<dyn PythonEnvironmentManager> { } // Check for pip second - if let Ok(output) = Command::new("pip").arg("--version").output() { + // XXX: put this somewhere common + let python_exe_bn = if cfg!(windows) { + "python.exe" + } else { + "python3" + }; + let python_exe = if let Ok(v) = env::var("PYTHONHOME") { + // honor PYTHONHOME (set earlier for python standalone). python build + // standalone has bin/python3 on OSX/Linx and python.exe on Windows + let mut path = PathBuf::from(v); + if !cfg!(windows) { + path.push("bin"); + } + path.push(python_exe_bn); + path + } else { + PathBuf::from(python_exe_bn) + }; + + if let Ok(output) = Command::new(python_exe) + .args(["-m", "pip", "--version"]) + .output() + { if output.status.success() { return Arc::new(PipManager); } diff --git a/influxdb3_processing_engine/src/environment.rs b/influxdb3_processing_engine/src/environment.rs index 5d7122ff34..cd922b84ab 100644 --- a/influxdb3_processing_engine/src/environment.rs +++ b/influxdb3_processing_engine/src/environment.rs @@ -1,6 +1,7 @@ use crate::environment::PluginEnvironmentError::PluginEnvironmentDisabled; #[cfg(feature = "system-py")] use crate::virtualenv::{initialize_venv, VenvError}; +use std::env; use std::fmt::Debug; use std::path::{Path, PathBuf}; use std::process::Command; @@ -86,6 +87,36 @@ impl PythonEnvironmentManager for UVManager { } } +// XXX: put this somewhere common +fn find_python() -> PathBuf { + let python_exe_bn = if cfg!(windows) { + "python.exe" + } else { + "python3" + }; + if let Ok(v) = env::var("VIRTUAL_ENV") { + let mut path = PathBuf::from(v); + if cfg!(windows) { + path.push("Scripts"); + } else { + path.push("bin"); + } + path.push(python_exe_bn); + path + } else if let Ok(v) = env::var("PYTHONHOME") { + // honor PYTHONHOME (set earlier for python standalone). python build + // standalone has bin/python3 on OSX/Linx and python.exe on Windows + let mut path = PathBuf::from(v); + if !cfg!(windows) { + path.push("bin"); + } + path.push(python_exe_bn); + path + } else { + PathBuf::from(python_exe_bn) + } +} + impl PythonEnvironmentManager for PipManager { fn init_pyenv( &self, @@ -98,7 +129,8 @@ impl PythonEnvironmentManager for PipManager { }; if !is_valid_venv(venv_path) { - Command::new("python3") + let python_exe = find_python(); + Command::new(python_exe) .arg("-m") .arg("venv") .arg(venv_path) @@ -110,7 +142,10 @@ impl PythonEnvironmentManager for PipManager { Ok(()) } fn install_packages(&self, packages: Vec<String>) -> Result<(), PluginEnvironmentError> { - Command::new("pip") + let python_exe = find_python(); + Command::new(python_exe) + .arg("-m") + .arg("pip") .arg("install") .args(&packages) .output()?; @@ -120,7 +155,10 @@ impl PythonEnvironmentManager for PipManager { &self, requirements_path: String, ) -> Result<(), PluginEnvironmentError> { - Command::new("pip") + let python_exe = find_python(); + Command::new(python_exe) + .arg("-m") + .arg("pip") .args(["install", "-r", &requirements_path]) .output()?; Ok(()) diff --git a/influxdb3_processing_engine/src/virtualenv.rs b/influxdb3_processing_engine/src/virtualenv.rs index fb233aa83c..02eacd67ca 100644 --- a/influxdb3_processing_engine/src/virtualenv.rs +++ b/influxdb3_processing_engine/src/virtualenv.rs @@ -1,7 +1,8 @@ use observability_deps::tracing::debug; use pyo3::Python; +use std::env; use std::ffi::CString; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::process::Command; use std::sync::Once; use thiserror::Error; @@ -17,12 +18,24 @@ pub enum VenvError { } fn get_python_version() -> Result<(u8, u8), std::io::Error> { - // linux/osx have python3, but windows only has python - let python_exe = if cfg!(target_os = "windows") { - "python" + // XXX: put this somewhere common + let python_exe_bn = if cfg!(windows) { + "python.exe" } else { "python3" }; + let python_exe = if let Ok(v) = env::var("VIRTUAL_ENV") { + let mut path = PathBuf::from(v); + if cfg!(windows) { + path.push("Scripts"); + } else { + path.push("bin"); + } + path.push(python_exe_bn); + path + } else { + PathBuf::from(python_exe_bn) + }; let output = Command::new(python_exe) .args([
f411f5d6d5e00dfbbe9461269a97c53244224e6f
Marco Neumann
2023-04-24 11:46:54
update DF to `181e5ccf2816ccaa05d8aaef0b375d4b7bbceece` (#7630)
I need the following two PRs: - https://github.com/apache/arrow-datafusion/pull/6045 - https://github.com/apache/arrow-datafusion/pull/6085
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
chore: update DF to `181e5ccf2816ccaa05d8aaef0b375d4b7bbceece` (#7630) I need the following two PRs: - https://github.com/apache/arrow-datafusion/pull/6045 - https://github.com/apache/arrow-datafusion/pull/6085 Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/Cargo.lock b/Cargo.lock index a71f4701db..4be019bcf4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1440,8 +1440,8 @@ dependencies = [ [[package]] name = "datafusion" -version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca#ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca" +version = "23.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=181e5ccf2816ccaa05d8aaef0b375d4b7bbceece#181e5ccf2816ccaa05d8aaef0b375d4b7bbceece" dependencies = [ "ahash 0.8.3", "arrow", @@ -1489,8 +1489,8 @@ dependencies = [ [[package]] name = "datafusion-common" -version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca#ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca" +version = "23.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=181e5ccf2816ccaa05d8aaef0b375d4b7bbceece#181e5ccf2816ccaa05d8aaef0b375d4b7bbceece" dependencies = [ "arrow", "arrow-array", @@ -1503,8 +1503,8 @@ dependencies = [ [[package]] name = "datafusion-execution" -version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca#ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca" +version = "23.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=181e5ccf2816ccaa05d8aaef0b375d4b7bbceece#181e5ccf2816ccaa05d8aaef0b375d4b7bbceece" dependencies = [ "dashmap", "datafusion-common", @@ -1520,8 +1520,8 @@ dependencies = [ [[package]] name = "datafusion-expr" -version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca#ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca" +version = "23.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=181e5ccf2816ccaa05d8aaef0b375d4b7bbceece#181e5ccf2816ccaa05d8aaef0b375d4b7bbceece" dependencies = [ "ahash 0.8.3", "arrow", @@ -1531,8 +1531,8 @@ dependencies = [ [[package]] name = "datafusion-optimizer" -version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca#ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca" +version = "23.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=181e5ccf2816ccaa05d8aaef0b375d4b7bbceece#181e5ccf2816ccaa05d8aaef0b375d4b7bbceece" dependencies = [ "arrow", "async-trait", @@ -1543,13 +1543,13 @@ dependencies = [ "hashbrown 0.13.2", "itertools", "log", - "regex-syntax 0.6.29", + "regex-syntax 0.7.1", ] [[package]] name = "datafusion-physical-expr" -version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca#ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca" +version = "23.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=181e5ccf2816ccaa05d8aaef0b375d4b7bbceece#181e5ccf2816ccaa05d8aaef0b375d4b7bbceece" dependencies = [ "ahash 0.8.3", "arrow", @@ -1567,6 +1567,7 @@ dependencies = [ "indexmap", "itertools", "lazy_static", + "libc", "md-5", "paste", "petgraph", @@ -1579,8 +1580,8 @@ dependencies = [ [[package]] name = "datafusion-proto" -version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca#ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca" +version = "23.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=181e5ccf2816ccaa05d8aaef0b375d4b7bbceece#181e5ccf2816ccaa05d8aaef0b375d4b7bbceece" dependencies = [ "arrow", "chrono", @@ -1593,8 +1594,8 @@ dependencies = [ [[package]] name = "datafusion-row" -version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca#ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca" +version = "23.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=181e5ccf2816ccaa05d8aaef0b375d4b7bbceece#181e5ccf2816ccaa05d8aaef0b375d4b7bbceece" dependencies = [ "arrow", "datafusion-common", @@ -1604,8 +1605,8 @@ dependencies = [ [[package]] name = "datafusion-sql" -version = "22.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca#ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca" +version = "23.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=181e5ccf2816ccaa05d8aaef0b375d4b7bbceece#181e5ccf2816ccaa05d8aaef0b375d4b7bbceece" dependencies = [ "arrow", "arrow-schema", diff --git a/Cargo.toml b/Cargo.toml index b2acce110c..539df59e59 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,8 +116,8 @@ license = "MIT OR Apache-2.0" arrow = { version = "37.0.0" } arrow-flight = { version = "37.0.0" } chrono-english = { git = "https://github.com/stevedonovan/chrono-english.git", rev = "def5941ebee24b55e1174eb18ab33d91603f907a" } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca", default-features = false } -datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca" } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="181e5ccf2816ccaa05d8aaef0b375d4b7bbceece", default-features = false } +datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="181e5ccf2816ccaa05d8aaef0b375d4b7bbceece" } hashbrown = { version = "0.13.2" } parquet = { version = "37.0.0" } tonic = { version = "0.9.2", features = ["tls", "tls-webpki-roots"] } diff --git a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected index 1010faaf8c..05ce29f3af 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected @@ -884,8 +884,8 @@ name: logical_plan name: physical_plan plan SortPreservingMergeExec: [iox::measurement@0 ASC NULLS LAST,tag0@2 ASC NULLS LAST,time@1 ASC NULLS LAST] - UnionExec - SortExec: expr=[iox::measurement@0 ASC NULLS LAST,tag0@2 ASC NULLS LAST,time@1 ASC NULLS LAST] + SortExec: expr=[iox::measurement@0 ASC NULLS LAST,tag0@2 ASC NULLS LAST,time@1 ASC NULLS LAST] + InterleaveExec ProjectionExec: expr=[m0 as iox::measurement, 0 as time, tag0@0 as tag0, COUNT(m0.f64)@1 as count, SUM(m0.f64)@2 as sum, STDDEV(m0.f64)@3 as stddev] AggregateExec: mode=FinalPartitioned, gby=[tag0@0 as tag0], aggr=[COUNT(m0.f64), SUM(m0.f64), STDDEV(m0.f64)] CoalesceBatchesExec: target_batch_size=8192 @@ -893,7 +893,6 @@ name: physical_plan RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 AggregateExec: mode=Partial, gby=[tag0@1 as tag0], aggr=[COUNT(m0.f64), SUM(m0.f64), STDDEV(m0.f64)] ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag0@1 ASC], projection=[f64, tag0] - SortExec: expr=[iox::measurement@0 ASC NULLS LAST,tag0@2 ASC NULLS LAST,time@1 ASC NULLS LAST] ProjectionExec: expr=[m1 as iox::measurement, 0 as time, tag0@0 as tag0, COUNT(m1.f64)@1 as count, SUM(m1.f64)@2 as sum, STDDEV(m1.f64)@3 as stddev] AggregateExec: mode=FinalPartitioned, gby=[tag0@0 as tag0], aggr=[COUNT(m1.f64), SUM(m1.f64), STDDEV(m1.f64)] CoalesceBatchesExec: target_batch_size=8192 diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 17962e680e..9998eeca98 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -30,9 +30,9 @@ bytes = { version = "1" } chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "serde"] } crossbeam-utils = { version = "0.8" } crypto-common = { version = "0.1", default-features = false, features = ["std"] } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca" } -datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] } -datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "ebb839068b9d99d3a3fea0a50a1e4baf4f1a5fca", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "181e5ccf2816ccaa05d8aaef0b375d4b7bbceece" } +datafusion-optimizer = { git = "https://github.com/apache/arrow-datafusion.git", rev = "181e5ccf2816ccaa05d8aaef0b375d4b7bbceece", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] } +datafusion-physical-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "181e5ccf2816ccaa05d8aaef0b375d4b7bbceece", default-features = false, features = ["crypto_expressions", "regex_expressions", "unicode_expressions"] } digest = { version = "0.10", features = ["mac", "std"] } either = { version = "1" } fixedbitset = { version = "0.4" }
2306c383f3e75880a08b80e2882fbf36eca6d3e7
Stuart Carnie
2022-11-23 11:33:49
Introduce InfluxQL to Flight (#6166)
* feat: Introduce InfluxQL to Flight All InfluxQL queries will fail with an error * chore: Temper protobuf lint * chore: Finalize flight.proto changes; fix tests * chore: Add tests for InfluxQL planner * chore: Update docs * chore: Update docs * chore: Rename back to original * chore: Use .into() rather than cast * chore: Use function rather than field * chore: Improved InfluxQL planner name * chore: Restore `impl Into<String>` argument * chore: Add a comment that Go clients are unable to execute InfluxQL * chore: Add a test for the `--lang` argument and InfluxQL
null
feat: Introduce InfluxQL to Flight (#6166) * feat: Introduce InfluxQL to Flight All InfluxQL queries will fail with an error * chore: Temper protobuf lint * chore: Finalize flight.proto changes; fix tests * chore: Add tests for InfluxQL planner * chore: Update docs * chore: Update docs * chore: Rename back to original * chore: Use .into() rather than cast * chore: Use function rather than field * chore: Improved InfluxQL planner name * chore: Restore `impl Into<String>` argument * chore: Add a comment that Go clients are unable to execute InfluxQL * chore: Add a test for the `--lang` argument and InfluxQL
diff --git a/Cargo.lock b/Cargo.lock index ece13fc7b1..ac328a66db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2481,6 +2481,7 @@ version = "0.1.0" dependencies = [ "arrow", "arrow_util", + "assert_matches", "async-trait", "chrono", "data_types", @@ -2489,6 +2490,7 @@ dependencies = [ "executor", "futures", "hashbrown 0.13.1", + "influxdb_influxql_parser", "itertools", "object_store", "observability_deps", @@ -4600,6 +4602,7 @@ dependencies = [ "arrow", "arrow-flight", "arrow_util", + "assert_matches", "bytes", "data_types", "datafusion", diff --git a/generated_types/protos/influxdata/iox/querier/v1/flight.proto b/generated_types/protos/influxdata/iox/querier/v1/flight.proto index 858f105e5a..ee66136837 100644 --- a/generated_types/protos/influxdata/iox/querier/v1/flight.proto +++ b/generated_types/protos/influxdata/iox/querier/v1/flight.proto @@ -9,6 +9,17 @@ message ReadInfo { // SQL query. string sql_query = 2; + + QueryType query_type = 3; + + enum QueryType { + // An unspecified query type. IOx may choose how to interpret sql_query. + QUERY_TYPE_UNSPECIFIED = 0; + // SQL query. + QUERY_TYPE_SQL = 1; + // InfluxQL query. + QUERY_TYPE_INFLUX_QL = 2; + } } // Response in "end-user to querier" flight response. diff --git a/influxdb_iox/src/commands/query.rs b/influxdb_iox/src/commands/query.rs index 4ee3e2a3cb..cf2898b1ed 100644 --- a/influxdb_iox/src/commands/query.rs +++ b/influxdb_iox/src/commands/query.rs @@ -1,6 +1,9 @@ use influxdb_iox_client::{ connection::Connection, - flight::{self, generated_types::ReadInfo}, + flight::{ + self, + generated_types::{read_info, ReadInfo}, + }, format::QueryOutputFormat, }; use std::str::FromStr; @@ -17,6 +20,13 @@ pub enum Error { pub type Result<T, E = Error> = std::result::Result<T, E>; +#[derive(clap::ValueEnum, Clone, Debug, PartialEq)] +#[clap(rename_all = "lower")] +enum QueryLanguage { + Sql, + InfluxQL, +} + /// Query the data with SQL #[derive(Debug, clap::Parser)] pub struct Config { @@ -31,14 +41,20 @@ pub struct Config { /// Optional format ('pretty', 'json', or 'csv') #[clap(short, long, default_value = "pretty", action)] format: String, + + /// Query type used + #[clap(short = 'l', long = "lang", default_value = "sql")] + query_lang: QueryLanguage, } pub async fn command(connection: Connection, config: Config) -> Result<()> { let mut client = flight::Client::new(connection); + let Config { namespace, format, query, + query_lang, } = config; let format = QueryOutputFormat::from_str(&format)?; @@ -47,6 +63,11 @@ pub async fn command(connection: Connection, config: Config) -> Result<()> { .perform_query(ReadInfo { namespace_name: namespace, sql_query: query, + query_type: match query_lang { + QueryLanguage::Sql => read_info::QueryType::Sql, + QueryLanguage::InfluxQL => read_info::QueryType::InfluxQl, + } + .into(), }) .await?; diff --git a/influxdb_iox/src/commands/sql/repl.rs b/influxdb_iox/src/commands/sql/repl.rs index c1b4e6808c..fb579bc14b 100644 --- a/influxdb_iox/src/commands/sql/repl.rs +++ b/influxdb_iox/src/commands/sql/repl.rs @@ -11,7 +11,9 @@ use snafu::{ResultExt, Snafu}; use super::repl_command::ReplCommand; use influxdb_iox_client::{ - connection::Connection, flight::generated_types::ReadInfo, format::QueryOutputFormat, + connection::Connection, + flight::generated_types::{read_info, ReadInfo}, + format::QueryOutputFormat, }; #[derive(Debug, Snafu)] @@ -398,6 +400,7 @@ async fn scrape_query( .perform_query(ReadInfo { namespace_name: db_name.to_string(), sql_query: query.to_string(), + query_type: read_info::QueryType::Sql.into(), }) .await .context(RunningRemoteQuerySnafu)?; diff --git a/influxdb_iox/tests/end_to_end_cases/all_in_one.rs b/influxdb_iox/tests/end_to_end_cases/all_in_one.rs index c78454c1e4..54c26beb11 100644 --- a/influxdb_iox/tests/end_to_end_cases/all_in_one.rs +++ b/influxdb_iox/tests/end_to_end_cases/all_in_one.rs @@ -2,7 +2,7 @@ use arrow_util::assert_batches_sorted_eq; use http::StatusCode; use iox_time::{SystemProvider, TimeProvider}; use test_helpers_end_to_end::{ - get_write_token, maybe_skip_integration, rand_name, run_query, wait_for_persisted, + get_write_token, maybe_skip_integration, rand_name, run_sql, wait_for_persisted, write_to_router, ServerFixture, TestConfig, }; @@ -34,7 +34,7 @@ async fn smoke() { // run query let sql = format!("select * from {}", table_name); - let batches = run_query(sql, namespace, all_in_one.querier_grpc_connection()).await; + let batches = run_sql(sql, namespace, all_in_one.querier_grpc_connection()).await; let expected = [ "+------+------+--------------------------------+-----+", @@ -79,7 +79,7 @@ async fn ephemeral_mode() { // run query // do not select time becasue it changes every time let sql = format!("select tag1, tag2, val from {}", table_name); - let batches = run_query(sql, namespace, all_in_one.querier_grpc_connection()).await; + let batches = run_sql(sql, namespace, all_in_one.querier_grpc_connection()).await; let expected = [ "+------+------+-----+", diff --git a/influxdb_iox/tests/end_to_end_cases/cli.rs b/influxdb_iox/tests/end_to_end_cases/cli.rs index dff0c47724..0693b60797 100644 --- a/influxdb_iox/tests/end_to_end_cases/cli.rs +++ b/influxdb_iox/tests/end_to_end_cases/cli.rs @@ -422,13 +422,15 @@ async fn write_and_query() { wait_for_query_result( state, "SELECT * from h2o_temperature order by time desc limit 10", + None, "| 51.3 | coyote_creek | CA | 55.1 | 1970-01-01T00:00:01.568756160Z |" ).await; - // data from 'read_filter.lp.gz' + // data from 'read_filter.lp.gz', specific query language type wait_for_query_result( state, "SELECT * from m0 order by time desc limit 10;", + Some(QueryLanguage::Sql), "| value1 | value9 | value9 | value49 | value0 | 2021-04-26T13:47:39.727574Z | 1 |" ).await; @@ -436,6 +438,7 @@ async fn write_and_query() { wait_for_query_result( state, "SELECT * from cpu where cpu = 'cpu2' order by time desc limit 10", + None, "cpu2 | MacBook-Pro-8.hsd1.ma.comcast.net | 2022-09-30T12:55:00Z" ).await; } @@ -486,9 +489,84 @@ async fn query_error_handling() { .await } +/// Test error handling for the query CLI command for InfluxQL queries +#[tokio::test] +async fn influxql_error_handling() { + test_helpers::maybe_start_logging(); + let database_url = maybe_skip_integration!(); + + let mut cluster = MiniCluster::create_shared(database_url).await; + + StepTest::new( + &mut cluster, + vec![ + Step::WriteLineProtocol("this_table_does_exist,tag=A val=\"foo\" 1".into()), + Step::Custom(Box::new(|state: &mut StepTestState| { + async { + let querier_addr = state.cluster().querier().querier_grpc_base().to_string(); + let namespace = state.cluster().namespace(); + + Command::cargo_bin("influxdb_iox") + .unwrap() + .arg("-h") + .arg(&querier_addr) + .arg("query") + .arg("--lang") + .arg("influxql") + .arg(namespace) + .arg("CREATE DATABASE foo") + .assert() + .failure() + .stderr(predicate::eq( + "Error querying: Error while planning query: This feature is not implemented: CREATE DATABASE\n", + )); + } + .boxed() + })), + ], + ) + .run() + .await +} + +#[allow(dead_code)] +#[derive(Clone, Copy)] +enum QueryLanguage { + Sql, + InfluxQL, +} + +impl ToString for QueryLanguage { + fn to_string(&self) -> String { + match self { + Self::Sql => "sql".to_string(), + Self::InfluxQL => "influxql".to_string(), + } + } +} + +trait AddQueryLanguage { + /// Add the query language option to the receiver. + fn add_query_lang(&mut self, query_lang: Option<QueryLanguage>) -> &mut Self; +} + +impl AddQueryLanguage for assert_cmd::Command { + fn add_query_lang(&mut self, query_lang: Option<QueryLanguage>) -> &mut Self { + match query_lang { + Some(lang) => self.arg("--lang").arg(lang.to_string()), + None => self, + } + } +} + /// Runs the specified query in a loop for up to 10 seconds, waiting /// for the specified output to appear -async fn wait_for_query_result(state: &mut StepTestState<'_>, query_sql: &str, expected: &str) { +async fn wait_for_query_result( + state: &mut StepTestState<'_>, + query_sql: &str, + query_lang: Option<QueryLanguage>, + expected: &str, +) { let querier_addr = state.cluster().querier().querier_grpc_base().to_string(); let namespace = state.cluster().namespace(); @@ -503,6 +581,7 @@ async fn wait_for_query_result(state: &mut StepTestState<'_>, query_sql: &str, e .arg("-h") .arg(&querier_addr) .arg("query") + .add_query_lang(query_lang) .arg(namespace) .arg(query_sql) .assert(); diff --git a/influxdb_iox/tests/end_to_end_cases/influxql.rs b/influxdb_iox/tests/end_to_end_cases/influxql.rs new file mode 100644 index 0000000000..3b6d09c671 --- /dev/null +++ b/influxdb_iox/tests/end_to_end_cases/influxql.rs @@ -0,0 +1,33 @@ +use test_helpers_end_to_end::{maybe_skip_integration, MiniCluster, Step, StepTest}; + +#[tokio::test] +async fn influxql_returns_error() { + test_helpers::maybe_start_logging(); + let database_url = maybe_skip_integration!(); + + let table_name = "the_table"; + + // Set up the cluster ==================================== + let mut cluster = MiniCluster::create_shared(database_url).await; + + StepTest::new( + &mut cluster, + vec![ + Step::WriteLineProtocol(format!( + "{},tag1=A,tag2=B val=42i 123456\n\ + {},tag1=A,tag2=C val=43i 123457", + table_name, table_name + )), + Step::WaitForReadable, + Step::AssertNotPersisted, + Step::InfluxQLExpectingError { + sql: format!("select * from {}", table_name), + expected_error_code: tonic::Code::InvalidArgument, + expected_message: + "Error while planning query: This feature is not implemented: SELECT".into(), + }, + ], + ) + .run() + .await +} diff --git a/influxdb_iox/tests/end_to_end_cases/mod.rs b/influxdb_iox/tests/end_to_end_cases/mod.rs index 73962915f5..fc497f4b62 100644 --- a/influxdb_iox/tests/end_to_end_cases/mod.rs +++ b/influxdb_iox/tests/end_to_end_cases/mod.rs @@ -6,6 +6,7 @@ mod cli; mod compactor; mod debug; mod error; +mod influxql; mod ingester; mod logging; mod metrics; diff --git a/influxdb_iox/tests/end_to_end_cases/querier.rs b/influxdb_iox/tests/end_to_end_cases/querier.rs index 74b92b898d..c06266028c 100644 --- a/influxdb_iox/tests/end_to_end_cases/querier.rs +++ b/influxdb_iox/tests/end_to_end_cases/querier.rs @@ -9,8 +9,8 @@ use futures::FutureExt; use predicates::prelude::*; use test_helpers::assert_contains; use test_helpers_end_to_end::{ - maybe_skip_integration, run_query, try_run_query, GrpcRequestBuilder, MiniCluster, Step, - StepTest, StepTestState, TestConfig, + maybe_skip_integration, run_sql, try_run_sql, GrpcRequestBuilder, MiniCluster, Step, StepTest, + StepTestState, TestConfig, }; #[tokio::test] @@ -260,7 +260,7 @@ async fn ingester_panic_1() { async move { // Ingester panics but querier will retry. let sql = format!("select * from {} where tag2='B'", table_name); - let batches = run_query( + let batches = run_sql( sql, state.cluster().namespace(), state.cluster().querier().querier_grpc_connection(), @@ -365,7 +365,7 @@ async fn ingester_panic_2() { loop { let sql = format!("select tag,val,time from {} where tag='A'", table_name); - let batches = run_query( + let batches = run_sql( sql, state.cluster().namespace(), state.cluster().querier().querier_grpc_connection(), @@ -616,7 +616,7 @@ async fn oom_protection() { Step::Custom(Box::new(move |state: &mut StepTestState| { async move { let sql = format!("select * from {}", table_name); - let err = try_run_query( + let err = try_run_sql( sql, state.cluster().namespace(), state.cluster().querier().querier_grpc_connection(), diff --git a/influxdb_iox_client/src/client/flight/mod.rs b/influxdb_iox_client/src/client/flight/mod.rs index 0aa9fd18f3..3e9453a1a3 100644 --- a/influxdb_iox_client/src/client/flight/mod.rs +++ b/influxdb_iox_client/src/client/flight/mod.rs @@ -85,6 +85,7 @@ pub enum Error { /// flight::{ /// Client, /// generated_types::ReadInfo, +/// generated_types::read_info, /// }, /// }; /// @@ -99,6 +100,7 @@ pub enum Error { /// .perform_query(ReadInfo { /// namespace_name: "my_database".to_string(), /// sql_query: "select * from cpu_load".to_string(), +/// query_type: read_info::QueryType::Sql.into(), /// }) /// .await /// .expect("query request should work"); diff --git a/iox_query/Cargo.toml b/iox_query/Cargo.toml index 9d2746084c..cfd669a2fe 100644 --- a/iox_query/Cargo.toml +++ b/iox_query/Cargo.toml @@ -25,6 +25,7 @@ datafusion_util = { path = "../datafusion_util" } executor = { path = "../executor"} futures = "0.3" hashbrown = { workspace = true } +influxdb_influxql_parser = { path = "../influxdb_influxql_parser" } itertools = "0.10.5" object_store = "0.5.1" observability_deps = { path = "../observability_deps" } @@ -41,3 +42,4 @@ workspace-hack = { path = "../workspace-hack"} [dev-dependencies] # In alphabetical order test_helpers = { path = "../test_helpers" } +assert_matches = "1" \ No newline at end of file diff --git a/iox_query/src/frontend.rs b/iox_query/src/frontend.rs index ebf0eb4e49..d442b448c0 100644 --- a/iox_query/src/frontend.rs +++ b/iox_query/src/frontend.rs @@ -1,4 +1,5 @@ pub mod common; +pub mod influxql; pub mod influxrpc; pub mod reorg; pub mod sql; diff --git a/iox_query/src/frontend/influxql.rs b/iox_query/src/frontend/influxql.rs new file mode 100644 index 0000000000..988d748c37 --- /dev/null +++ b/iox_query/src/frontend/influxql.rs @@ -0,0 +1,46 @@ +use std::sync::Arc; + +use crate::exec::context::IOxSessionContext; +use crate::plan::influxql::InfluxQLToLogicalPlan; +use crate::{debug, DataFusionError, QueryNamespace}; +use datafusion::{error::Result, physical_plan::ExecutionPlan}; +use influxdb_influxql_parser::parse_statements; + +/// This struct can create plans for running SQL queries against databases +#[derive(Debug, Default)] +pub struct InfluxQLQueryPlanner {} + +impl InfluxQLQueryPlanner { + pub fn new() -> Self { + Self::default() + } + + /// Plan an InfluxQL query against the catalogs registered with `ctx`, and return a + /// DataFusion physical execution plan that runs on the query executor. + pub async fn query( + &self, + database: Arc<dyn QueryNamespace>, + query: &str, + ctx: &IOxSessionContext, + ) -> Result<Arc<dyn ExecutionPlan>> { + let ctx = ctx.child_ctx("query"); + debug!(text=%query, "planning InfluxQL query"); + + let mut statements = parse_statements(query) + .map_err(|e| DataFusionError::External(format!("{}", e).into()))?; + + if statements.len() != 1 { + return Err(DataFusionError::NotImplemented( + "The context currently only supports a single SQL statement".to_string(), + )); + } + + let planner = InfluxQLToLogicalPlan::new(&ctx, database); + let logical_plan = planner.statement_to_plan(statements.pop().unwrap())?; + debug!(plan=%logical_plan.display_graphviz(), "logical plan"); + + // This would only work for SELECT statements at the moment, as the schema queries do + // not return ExecutionPlan + ctx.create_physical_plan(&logical_plan).await + } +} diff --git a/iox_query/src/plan.rs b/iox_query/src/plan.rs index 693ff90cdc..af576a8266 100644 --- a/iox_query/src/plan.rs +++ b/iox_query/src/plan.rs @@ -1,3 +1,4 @@ pub mod fieldlist; +pub mod influxql; pub mod seriesset; pub mod stringset; diff --git a/iox_query/src/plan/influxql.rs b/iox_query/src/plan/influxql.rs new file mode 100644 index 0000000000..ba1b8c41ed --- /dev/null +++ b/iox_query/src/plan/influxql.rs @@ -0,0 +1,111 @@ +use crate::{DataFusionError, IOxSessionContext, QueryNamespace}; +use datafusion::common::Result; +use datafusion::execution::context::SessionState; +use datafusion::logical_expr::LogicalPlan; +use influxdb_influxql_parser::statement::Statement; +use std::sync::Arc; + +/// InfluxQL query planner +#[allow(unused)] +#[derive(Debug)] +pub struct InfluxQLToLogicalPlan<'a> { + ctx: &'a IOxSessionContext, + state: SessionState, + database: Arc<dyn QueryNamespace>, +} + +impl<'a> InfluxQLToLogicalPlan<'a> { + pub fn new(ctx: &'a IOxSessionContext, database: Arc<dyn QueryNamespace>) -> Self { + Self { + ctx, + state: ctx.inner().state(), + database, + } + } + + pub fn statement_to_plan(&self, statement: Statement) -> Result<LogicalPlan> { + match statement { + Statement::CreateDatabase(_) => { + Err(DataFusionError::NotImplemented("CREATE DATABASE".into())) + } + Statement::Delete(_) => Err(DataFusionError::NotImplemented("DELETE".into())), + Statement::DropMeasurement(_) => { + Err(DataFusionError::NotImplemented("DROP MEASUREMENT".into())) + } + Statement::Explain(_) => Err(DataFusionError::NotImplemented("EXPLAIN".into())), + Statement::Select(_) => Err(DataFusionError::NotImplemented("SELECT".into())), + Statement::ShowDatabases(_) => { + Err(DataFusionError::NotImplemented("SHOW DATABASES".into())) + } + Statement::ShowMeasurements(_) => { + Err(DataFusionError::NotImplemented("SHOW MEASUREMENTS".into())) + } + Statement::ShowRetentionPolicies(_) => Err(DataFusionError::NotImplemented( + "SHOW RETENTION POLICIES".into(), + )), + Statement::ShowTagKeys(_) => { + Err(DataFusionError::NotImplemented("SHOW TAG KEYS".into())) + } + Statement::ShowTagValues(_) => { + Err(DataFusionError::NotImplemented("SHOW TAG VALUES".into())) + } + Statement::ShowFieldKeys(_) => { + Err(DataFusionError::NotImplemented("SHOW FIELD KEYS".into())) + } + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::exec::{Executor, ExecutorType}; + use crate::test::{TestChunk, TestDatabase}; + use assert_matches::assert_matches; + use influxdb_influxql_parser::parse_statements; + use predicate::rpc_predicate::QueryNamespaceMeta; + + fn logical_plan(sql: &str) -> Result<LogicalPlan> { + let mut statements = parse_statements(sql).unwrap(); + // setup a db + let chunk0 = Arc::new( + TestChunk::new("h2o") + .with_id(0) + .with_tag_column("foo") + .with_tag_column("bar") + .with_i64_field_column("i64_field") + .with_i64_field_column("i64_field_2") + .with_time_column() + .with_one_row_of_data(), + ); + // index of columns in the above chunk: [bar, foo, i64_field, i64_field_2, time] + let executor = Arc::new(Executor::new(1)); + let ctx = executor.new_context(ExecutorType::Query); + let test_db = Arc::new(TestDatabase::new(Arc::clone(&executor))); + test_db.add_chunk("my_partition_key", Arc::clone(&chunk0)); + let table = "h2o"; + + let _schema = test_db.table_schema(table).unwrap(); + + let planner = InfluxQLToLogicalPlan::new(&ctx, test_db); + planner.statement_to_plan(statements.pop().unwrap()) + } + + /// Verify the list of unsupported statements. + /// + /// Some statements will remain unsupported, indefinitely. + #[test] + fn test_unsupported_statements() { + assert_matches!(logical_plan("CREATE DATABASE foo"), Err(DataFusionError::NotImplemented(s)) => assert_eq!(s, "CREATE DATABASE")); + assert_matches!(logical_plan("DELETE FROM foo"), Err(DataFusionError::NotImplemented(s)) => assert_eq!(s, "DELETE")); + assert_matches!(logical_plan("DROP MEASUREMENT foo"), Err(DataFusionError::NotImplemented(s)) => assert_eq!(s, "DROP MEASUREMENT")); + assert_matches!(logical_plan("EXPLAIN SELECT bar FROM foo"), Err(DataFusionError::NotImplemented(s)) => assert_eq!(s, "EXPLAIN")); + assert_matches!(logical_plan("SELECT bar FROM foo"), Err(DataFusionError::NotImplemented(s)) => assert_eq!(s, "SELECT")); + assert_matches!(logical_plan("SHOW DATABASES"), Err(DataFusionError::NotImplemented(s)) => assert_eq!(s, "SHOW DATABASES")); + assert_matches!(logical_plan("SHOW MEASUREMENTS"), Err(DataFusionError::NotImplemented(s)) => assert_eq!(s, "SHOW MEASUREMENTS")); + assert_matches!(logical_plan("SHOW RETENTION POLICIES"), Err(DataFusionError::NotImplemented(s)) => assert_eq!(s, "SHOW RETENTION POLICIES")); + assert_matches!(logical_plan("SHOW TAG KEYS"), Err(DataFusionError::NotImplemented(s)) => assert_eq!(s, "SHOW TAG KEYS")); + assert_matches!(logical_plan("SHOW TAG VALUES WITH KEY = bar"), Err(DataFusionError::NotImplemented(s)) => assert_eq!(s, "SHOW TAG VALUES")); + assert_matches!(logical_plan("SHOW FIELD KEYS"), Err(DataFusionError::NotImplemented(s)) => assert_eq!(s, "SHOW FIELD KEYS")); + } +} diff --git a/service_common/src/planner.rs b/service_common/src/planner.rs index 67de772667..8f5b439367 100644 --- a/service_common/src/planner.rs +++ b/service_common/src/planner.rs @@ -10,6 +10,7 @@ use iox_query::{ }; pub use datafusion::error::{DataFusionError as Error, Result}; +use iox_query::frontend::influxql::InfluxQLQueryPlanner; use predicate::rpc_predicate::InfluxRpcPredicate; /// Query planner that plans queries on a separate threadpool. @@ -43,6 +44,22 @@ impl Planner { .await } + /// Plan an InfluxQL query against the data in `database`, and return a + /// DataFusion physical execution plan. + pub async fn influxql( + &self, + database: Arc<dyn QueryNamespace>, + query: impl Into<String> + Send, + ) -> Result<Arc<dyn ExecutionPlan>> { + let planner = InfluxQLQueryPlanner::new(); + let query = query.into(); + let ctx = self.ctx.child_ctx("planner influxql"); + + self.ctx + .run(async move { planner.query(database, &query, &ctx).await }) + .await + } + /// Creates a plan as described on /// [`InfluxRpcPlanner::table_names`], on a separate threadpool pub async fn table_names<N>( diff --git a/service_grpc_flight/Cargo.toml b/service_grpc_flight/Cargo.toml index 66d62e4e6e..4128cfe5c1 100644 --- a/service_grpc_flight/Cargo.toml +++ b/service_grpc_flight/Cargo.toml @@ -34,3 +34,4 @@ workspace-hack = { path = "../workspace-hack"} [dev-dependencies] metric = { path = "../metric" } +assert_matches = "1" diff --git a/service_grpc_flight/src/lib.rs b/service_grpc_flight/src/lib.rs index b1cb3d6c56..d22a16d179 100644 --- a/service_grpc_flight/src/lib.rs +++ b/service_grpc_flight/src/lib.rs @@ -12,6 +12,7 @@ use data_types::NamespaceNameError; use datafusion::{error::DataFusionError, physical_plan::ExecutionPlan}; use futures::{SinkExt, Stream, StreamExt}; use generated_types::influxdata::iox::querier::v1 as proto; +use generated_types::influxdata::iox::querier::v1::read_info::QueryType; use iox_query::{ exec::{ExecutionContextProvider, IOxSessionContext}, QueryCompletedToken, QueryNamespace, @@ -22,7 +23,8 @@ use prost::Message; use serde::Deserialize; use service_common::{datafusion_error_to_tonic_code, planner::Planner, QueryNamespaceProvider}; use snafu::{ResultExt, Snafu}; -use std::{fmt::Debug, pin::Pin, sync::Arc, task::Poll, time::Instant}; +use std::fmt::{Display, Formatter}; +use std::{fmt, fmt::Debug, pin::Pin, sync::Arc, task::Poll, time::Instant}; use tokio::task::JoinHandle; use tonic::{Request, Response, Streaming}; use trace::{ctx::SpanContext, span::SpanExt}; @@ -123,7 +125,22 @@ type TonicStream<T> = Pin<Box<dyn Stream<Item = Result<T, tonic::Status>> + Send /// Body of the `Ticket` serialized and sent to the do_get endpoint. struct ReadInfo { namespace_name: String, - sql_query: String, + query: Query, +} + +#[derive(Deserialize, Debug, Clone)] +enum Query { + Sql(String), + InfluxQL(String), +} + +impl Display for Query { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Self::Sql(s) => fmt::Display::fmt(s, f), + Self::InfluxQL(s) => fmt::Display::fmt(s, f), + } + } } impl ReadInfo { @@ -131,13 +148,25 @@ impl ReadInfo { /// /// - <https://github.com/influxdata/influxdb-iox-client-go/commit/2e7a3b0bd47caab7f1a31a1bbe0ff54aa9486b7b> /// - <https://github.com/influxdata/influxdb-iox-client-go/commit/52f1a1b8d5bb8cc8dc2fe825f4da630ad0b9167c> + /// + /// Go clients are unable to execute InfluxQL queries until the JSON structure is updated + /// accordingly. fn decode_json(ticket: &[u8]) -> Result<Self> { let json_str = String::from_utf8(ticket.to_vec()).context(InvalidJsonTicketSnafu {})?; - let read_info: ReadInfo = + #[derive(Deserialize, Debug)] + struct ReadInfoJson { + namespace_name: String, + sql_query: String, + } + + let read_info: ReadInfoJson = serde_json::from_str(&json_str).context(InvalidQuerySnafu { query: &json_str })?; - Ok(read_info) + Ok(Self { + namespace_name: read_info.namespace_name, + query: Query::Sql(read_info.sql_query), // JSON is always SQL + }) } fn decode_protobuf(ticket: &[u8]) -> Result<Self> { @@ -145,8 +174,11 @@ impl ReadInfo { proto::ReadInfo::decode(Bytes::from(ticket.to_vec())).context(InvalidTicketSnafu {})?; Ok(Self { - namespace_name: read_info.namespace_name, - sql_query: read_info.sql_query, + namespace_name: read_info.namespace_name.clone(), + query: match read_info.query_type() { + QueryType::Unspecified | QueryType::Sql => Query::Sql(read_info.sql_query), + QueryType::InfluxQl => Query::InfluxQL(read_info.sql_query), + }, }) } } @@ -175,7 +207,7 @@ where &self, span_ctx: Option<SpanContext>, permit: InstrumentedAsyncOwnedSemaphorePermit, - sql_query: String, + query: Query, namespace: String, ) -> Result<Response<TonicStream<FlightData>>, tonic::Status> { let db = self @@ -185,12 +217,24 @@ where .ok_or_else(|| tonic::Status::not_found(format!("Unknown namespace: {namespace}")))?; let ctx = db.new_query_context(span_ctx); - let query_completed_token = db.record_query(&ctx, "sql", Box::new(sql_query.clone())); - - let physical_plan = Planner::new(&ctx) - .sql(sql_query) - .await - .context(PlanningSnafu)?; + let (query_completed_token, physical_plan) = match query { + Query::Sql(sql_query) => { + let token = db.record_query(&ctx, "sql", Box::new(sql_query.clone())); + let plan = Planner::new(&ctx) + .sql(sql_query) + .await + .context(PlanningSnafu)?; + (token, plan) + } + Query::InfluxQL(sql_query) => { + let token = db.record_query(&ctx, "influxql", Box::new(sql_query.clone())); + let plan = Planner::new(&ctx) + .influxql(db, sql_query) + .await + .context(PlanningSnafu)?; + (token, plan) + } + }; let output = GetStream::new(ctx, physical_plan, namespace, query_completed_token, permit).await?; @@ -239,7 +283,7 @@ where }; let ReadInfo { namespace_name, - sql_query, + query: sql_query, } = read_info?; let permit = self @@ -466,7 +510,9 @@ impl Stream for GetStream { #[cfg(test)] mod tests { + use assert_matches::assert_matches; use futures::Future; + use generated_types::influxdata::iox::querier::v1::read_info::QueryType; use metric::{Attributes, Metric, U64Gauge}; use service_common::test_util::TestDatabaseStore; use tokio::pin; @@ -488,7 +534,72 @@ mod tests { let read_info = ReadInfo::decode_json(&ticket.ticket).unwrap(); assert_eq!(read_info.namespace_name, "my_db"); - assert_eq!(read_info.sql_query, "SELECT 1;"); + assert_matches!(read_info.query, Query::Sql(query) => assert_eq!(query, "SELECT 1;")); + } + + #[test] + fn test_read_info_decoding() { + let mut buf = Vec::with_capacity(1024); + proto::ReadInfo::encode( + &proto::ReadInfo { + namespace_name: "<foo>_<bar>".to_string(), + sql_query: "SELECT 1".to_string(), + query_type: QueryType::Unspecified.into(), + }, + &mut buf, + ) + .unwrap(); + + let ri = ReadInfo::decode_protobuf(&buf).unwrap(); + assert_eq!(ri.namespace_name, "<foo>_<bar>"); + assert_matches!(ri.query, Query::Sql(query) => assert_eq!(query, "SELECT 1")); + + let mut buf = Vec::with_capacity(1024); + proto::ReadInfo::encode( + &proto::ReadInfo { + namespace_name: "<foo>_<bar>".to_string(), + sql_query: "SELECT 1".to_string(), + query_type: QueryType::Sql.into(), + }, + &mut buf, + ) + .unwrap(); + + let ri = ReadInfo::decode_protobuf(&buf).unwrap(); + assert_eq!(ri.namespace_name, "<foo>_<bar>"); + assert_matches!(ri.query, Query::Sql(query) => assert_eq!(query, "SELECT 1")); + + let mut buf = Vec::with_capacity(1024); + proto::ReadInfo::encode( + &proto::ReadInfo { + namespace_name: "<foo>_<bar>".to_string(), + sql_query: "SELECT 1".to_string(), + query_type: QueryType::InfluxQl.into(), + }, + &mut buf, + ) + .unwrap(); + + let ri = ReadInfo::decode_protobuf(&buf).unwrap(); + assert_eq!(ri.namespace_name, "<foo>_<bar>"); + assert_matches!(ri.query, Query::InfluxQL(query) => assert_eq!(query, "SELECT 1")); + + // Fallible + let mut buf = Vec::with_capacity(1024); + proto::ReadInfo::encode( + &proto::ReadInfo { + namespace_name: "<foo>_<bar>".to_string(), + sql_query: "SELECT 1".into(), + query_type: 3, + }, + &mut buf, + ) + .unwrap(); + + // Reverts to default (unspecified) for invalid query_type enumeration, and thus SQL + let ri = ReadInfo::decode_protobuf(&buf).unwrap(); + assert_eq!(ri.namespace_name, "<foo>_<bar>"); + assert_matches!(ri.query, Query::Sql(query) => assert_eq!(query, "SELECT 1")); } #[tokio::test] diff --git a/test_helpers_end_to_end/src/client.rs b/test_helpers_end_to_end/src/client.rs index 0bcca71dfa..fdaa4884de 100644 --- a/test_helpers_end_to_end/src/client.rs +++ b/test_helpers_end_to_end/src/client.rs @@ -5,6 +5,7 @@ use http::Response; use hyper::{Body, Client, Request}; use influxdb_iox_client::{ connection::Connection, + flight::generated_types::read_info::QueryType, flight::generated_types::ReadInfo, write_info::generated_types::{merge_responses, GetWriteInfoResponse, ShardStatus}, }; @@ -190,15 +191,14 @@ pub fn all_persisted(res: &GetWriteInfoResponse) -> bool { } /// Runs a query using the flight API on the specified connection. -/// -/// This is similar ot [`run_query`] but does NOT unwrap the result. pub async fn try_run_query( - sql: impl Into<String>, + sql_query: impl Into<String>, + query_type: QueryType, namespace: impl Into<String>, querier_connection: Connection, ) -> Result<Vec<RecordBatch>, influxdb_iox_client::flight::Error> { - let namespace = namespace.into(); - let sql = sql.into(); + let sql_query = sql_query.into(); + let namespace_name = namespace.into(); let mut client = influxdb_iox_client::flight::Client::new(querier_connection); @@ -207,23 +207,57 @@ pub async fn try_run_query( let mut response = client .perform_query(ReadInfo { - namespace_name: namespace, - sql_query: sql, + namespace_name, + sql_query, + query_type: query_type.into(), }) .await?; response.collect().await } -/// Runs a query using the flight API on the specified connection. +/// Runs a SQL query using the flight API on the specified connection. +/// +/// This is similar ot [`run_sql`] but does NOT unwrap the result. +pub async fn try_run_sql( + sql: impl Into<String>, + namespace: impl Into<String>, + querier_connection: Connection, +) -> Result<Vec<RecordBatch>, influxdb_iox_client::flight::Error> { + try_run_query(sql, QueryType::Sql, namespace, querier_connection).await +} + +/// Runs a SQL query using the flight API on the specified connection. +/// +/// Use [`try_run_sql`] if you want to check the error manually. +pub async fn run_sql( + sql: impl Into<String>, + namespace: impl Into<String>, + querier_connection: Connection, +) -> Vec<RecordBatch> { + try_run_sql(sql, namespace, querier_connection) + .await + .expect("Error executing query") +} + +/// Runs an InfluxQL query using the flight API on the specified connection. +pub async fn try_run_influxql( + sql: impl Into<String>, + namespace: impl Into<String>, + querier_connection: Connection, +) -> Result<Vec<RecordBatch>, influxdb_iox_client::flight::Error> { + try_run_query(sql, QueryType::InfluxQl, namespace, querier_connection).await +} + +/// Runs an InfluxQL query using the flight API on the specified connection. /// -/// Use [`try_run_query`] if you want to check the error manually. -pub async fn run_query( +/// Use [`try_run_influxql`] if you want to check the error manually. +pub async fn run_influxql( sql: impl Into<String>, namespace: impl Into<String>, querier_connection: Connection, ) -> Vec<RecordBatch> { - try_run_query(sql, namespace, querier_connection) + try_run_query(sql, QueryType::InfluxQl, namespace, querier_connection) .await .expect("Error executing query") } diff --git a/test_helpers_end_to_end/src/steps.rs b/test_helpers_end_to_end/src/steps.rs index 624b43ff0e..50ee2bf322 100644 --- a/test_helpers_end_to_end/src/steps.rs +++ b/test_helpers_end_to_end/src/steps.rs @@ -1,6 +1,6 @@ use crate::{ - get_write_token, run_query, token_is_persisted, try_run_query, wait_for_persisted, - wait_for_readable, MiniCluster, + get_write_token, run_sql, token_is_persisted, try_run_influxql, try_run_sql, + wait_for_persisted, wait_for_readable, MiniCluster, }; use arrow::record_batch::RecordBatch; use arrow_util::assert_batches_sorted_eq; @@ -93,7 +93,7 @@ pub enum Step { /// Run one hot and one cold compaction operation and wait for it to finish. Compact, - /// Run a query using the FlightSQL interface and verify that the + /// Run a SQL query using the FlightSQL interface and verify that the /// results match the expected results using the /// `assert_batches_eq!` macro Query { @@ -101,7 +101,7 @@ pub enum Step { expected: Vec<&'static str>, }, - /// Run a query that's expected to fail using the FlightSQL interface and verify that the + /// Run a SQL query that's expected to fail using the FlightSQL interface and verify that the /// request returns the expected error code and message QueryExpectingError { sql: String, @@ -109,7 +109,7 @@ pub enum Step { expected_message: String, }, - /// Run a query using the FlightSQL interface, and then verifies + /// Run a SQL query using the FlightSQL interface, and then verifies /// the results using the provided validation function on the /// results. /// @@ -120,6 +120,14 @@ pub enum Step { verify: Box<dyn Fn(Vec<RecordBatch>)>, }, + /// Run an InfluxQL query that's expected to fail using the FlightSQL interface and verify that the + /// request returns the expected error code and message + InfluxQLExpectingError { + sql: String, + expected_error_code: tonic::Code, + expected_message: String, + }, + /// Retrieve the metrics and verify the results using the provided /// validation function. /// @@ -148,6 +156,25 @@ impl<'a> StepTest<'a> { write_tokens: vec![], }; + fn check_flight_error( + err: influxdb_iox_client::flight::Error, + expected_error_code: tonic::Code, + expected_message: String, + ) { + if let influxdb_iox_client::flight::Error::GrpcError(status) = err { + assert_eq!( + status.code(), + expected_error_code, + "Wrong status code: {}\n\nStatus:\n{}", + status.code(), + status, + ); + assert_eq!(status.message(), expected_message); + } else { + panic!("Not a gRPC error: {err}"); + } + } + for (i, step) in steps.into_iter().enumerate() { info!("**** Begin step {} *****", i); match step { @@ -218,9 +245,9 @@ impl<'a> StepTest<'a> { info!("====Done running compaction"); } Step::Query { sql, expected } => { - info!("====Begin running query: {}", sql); + info!("====Begin running SQL query: {}", sql); // run query - let batches = run_query( + let batches = run_sql( sql, state.cluster.namespace(), state.cluster.querier().querier_grpc_connection(), @@ -234,9 +261,9 @@ impl<'a> StepTest<'a> { expected_error_code, expected_message, } => { - info!("====Begin running query expected to error: {}", sql); + info!("====Begin running SQL query expected to error: {}", sql); - let err = try_run_query( + let err = try_run_sql( sql, state.cluster().namespace(), state.cluster().querier().querier_grpc_connection(), @@ -244,25 +271,14 @@ impl<'a> StepTest<'a> { .await .unwrap_err(); - if let influxdb_iox_client::flight::Error::GrpcError(status) = err { - assert_eq!( - status.code(), - expected_error_code, - "Wrong status code: {}\n\nStatus:\n{}", - status.code(), - status, - ); - assert_eq!(status.message(), expected_message); - } else { - panic!("Not a gRPC error: {err}"); - } + check_flight_error(err, expected_error_code, expected_message); info!("====Done running"); } Step::VerifiedQuery { sql, verify } => { - info!("====Begin running verified query: {}", sql); + info!("====Begin running SQL verified query: {}", sql); // run query - let batches = run_query( + let batches = run_sql( sql, state.cluster.namespace(), state.cluster.querier().querier_grpc_connection(), @@ -271,6 +287,28 @@ impl<'a> StepTest<'a> { verify(batches); info!("====Done running"); } + Step::InfluxQLExpectingError { + sql, + expected_error_code, + expected_message, + } => { + info!( + "====Begin running InfluxQL query expected to error: {}", + sql + ); + + let err = try_run_influxql( + sql, + state.cluster().namespace(), + state.cluster().querier().querier_grpc_connection(), + ) + .await + .unwrap_err(); + + check_flight_error(err, expected_error_code, expected_message); + + info!("====Done running"); + } Step::VerifiedMetrics(verify) => { info!("====Begin validating metrics");
25ab23089895aae1e4871ea35a2882bdcf85356b
Andrew Lamb
2023-08-08 15:16:39
Use upstream implementation of XdbcTypeInfo builder, support filter by datatype (#8455)
* refactor(flightsql): Use upstream implementation of XdbyTypeInfo builder * fix: Update test ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
refactor(flightsql): Use upstream implementation of XdbcTypeInfo builder, support filter by datatype (#8455) * refactor(flightsql): Use upstream implementation of XdbyTypeInfo builder * fix: Update test --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/flightsql/src/planner.rs b/flightsql/src/planner.rs index 2ed5b80b1f..e2ffa0179c 100644 --- a/flightsql/src/planner.rs +++ b/flightsql/src/planner.rs @@ -20,7 +20,6 @@ use arrow_flight::{ use arrow_util::flight::prepare_schema_for_flight; use bytes::Bytes; use datafusion::{ - error::DataFusionError, logical_expr::{LogicalPlan, TableType}, physical_plan::ExecutionPlan, sql::TableReference, @@ -30,7 +29,7 @@ use observability_deps::tracing::debug; use once_cell::sync::Lazy; use prost::Message; -use crate::{error::*, sql_info::iox_sql_info_list, xdbc_type_info::TYPE_INFO_RECORD_BATCH}; +use crate::{error::*, sql_info::iox_sql_info_list, xdbc_type_info::xdbc_type_info_data}; use crate::{FlightSQLCommand, PreparedStatementHandle}; /// Logic for creating plans for various Flight messages against a query database @@ -218,9 +217,9 @@ impl FlightSQLPlanner { let plan = plan_get_table_types(ctx).await?; Ok(ctx.create_physical_plan(&plan).await?) } - FlightSQLCommand::CommandGetXdbcTypeInfo(CommandGetXdbcTypeInfo { data_type }) => { - debug!(?data_type, "Planning GetXdbcTypeInfo query"); - let plan = plan_get_xdbc_type_info(ctx, data_type).await?; + FlightSQLCommand::CommandGetXdbcTypeInfo(cmd) => { + debug!(?cmd, "Planning GetXdbcTypeInfo query"); + let plan = plan_get_xdbc_type_info(ctx, cmd).await?; Ok(ctx.create_physical_plan(&plan).await?) } FlightSQLCommand::ActionClosePreparedStatementRequest(_) @@ -471,15 +470,10 @@ async fn plan_get_table_types(ctx: &IOxSessionContext) -> Result<LogicalPlan> { /// Return a `LogicalPlan` for GetXdbcTypeInfo async fn plan_get_xdbc_type_info( ctx: &IOxSessionContext, - data_type: Option<i32>, + cmd: CommandGetXdbcTypeInfo, ) -> Result<LogicalPlan> { - match data_type { - None => Ok(ctx.batch_to_logical_plan(TYPE_INFO_RECORD_BATCH.clone())?), - // TODO chunchun: support search by data_type - Some(_data_type) => Err(Error::from(DataFusionError::NotImplemented( - "GetXdbcTypeInfo does not yet support filtering by data_type".to_string(), - ))), - } + let batch = cmd.into_builder(xdbc_type_info_data()).build()?; + Ok(ctx.batch_to_logical_plan(batch)?) } /// The schema for GetTableTypes diff --git a/flightsql/src/xdbc_type_info/mod.rs b/flightsql/src/xdbc_type_info/mod.rs index 6c96a5ff47..ed536473d6 100644 --- a/flightsql/src/xdbc_type_info/mod.rs +++ b/flightsql/src/xdbc_type_info/mod.rs @@ -56,200 +56,123 @@ //! //! [Arrow FlightSQL Specification]: https://github.com/apache/arrow/blob/9588da967c756b2923e213ccc067378ba6c90a86/format/FlightSql.proto#L1064-L1113 -mod value; - -use std::sync::Arc; - -use arrow::{ - array::{ - ArrayRef, BooleanArray, Int32Array, ListArray, ListBuilder, StringArray, StringBuilder, - }, - datatypes::{DataType, Field, Schema, SchemaRef}, - record_batch::RecordBatch, -}; +use arrow_flight::sql::metadata::{XdbcTypeInfo, XdbcTypeInfoData, XdbcTypeInfoDataBuilder}; +use arrow_flight::sql::{Nullable, Searchable, XdbcDataType, XdbcDatetimeSubcode}; use once_cell::sync::Lazy; -use value::{XdbcTypeInfo, ALL_DATA_TYPES}; - -/// The schema for GetXdbcTypeInfo -static GET_XDBC_TYPE_INFO_SCHEMA: Lazy<SchemaRef> = Lazy::new(|| { - Arc::new(Schema::new(vec![ - Field::new("type_name", DataType::Utf8, false), - Field::new("data_type", DataType::Int32, false), - Field::new("column_size", DataType::Int32, true), - Field::new("literal_prefix", DataType::Utf8, true), - Field::new("literal_suffix", DataType::Utf8, true), - Field::new( - "create_params", - DataType::List(Arc::new(Field::new("item", DataType::Utf8, false))), - true, - ), - Field::new("nullable", DataType::Int32, false), // Nullable enum: https://github.com/apache/arrow/blob/9588da967c756b2923e213ccc067378ba6c90a86/format/FlightSql.proto#L1014-L1029 - Field::new("case_sensitive", DataType::Boolean, false), - Field::new("searchable", DataType::Int32, false), // Searchable enum: https://github.com/apache/arrow/blob/9588da967c756b2923e213ccc067378ba6c90a86/format/FlightSql.proto#L1031-L1056 - Field::new("unsigned_attribute", DataType::Boolean, true), - Field::new("fixed_prec_scale", DataType::Boolean, false), - Field::new("auto_increment", DataType::Boolean, true), - Field::new("local_type_name", DataType::Utf8, true), - Field::new("minimum_scale", DataType::Int32, true), - Field::new("maximum_scale", DataType::Int32, true), - Field::new("sql_data_type", DataType::Int32, false), - Field::new("datetime_subcode", DataType::Int32, true), // XdbcDatetimeSubcode value: https://github.com/apache/arrow/blob/9588da967c756b2923e213ccc067378ba6c90a86/format/FlightSql.proto#L978-L1012 - Field::new("num_prec_radix", DataType::Int32, true), - Field::new("interval_precision", DataType::Int32, true), - ])) -}); - -pub static TYPE_INFO_RECORD_BATCH: Lazy<RecordBatch> = Lazy::new(|| { - let type_names: Vec<&str> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.type_name) - .collect(); - let type_name = Arc::new(StringArray::from(type_names)) as ArrayRef; - - let data_types: Vec<i32> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.data_type.clone() as i32) // case XdbcDataType enum to i32 - .collect(); - let data_type = Arc::new(Int32Array::from(data_types)) as ArrayRef; - - let column_sizes: Vec<Option<i32>> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.column_size) - .collect(); - let column_size = Arc::new(Int32Array::from(column_sizes)) as ArrayRef; - - let literal_prefixes: Vec<Option<&str>> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.literal_prefix as Option<&str>) - .collect(); - let literal_prefix = Arc::new(StringArray::from(literal_prefixes)) as ArrayRef; - - let literal_suffixes: Vec<Option<&str>> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.literal_suffix as Option<&str>) - .collect(); - let literal_suffix = Arc::new(StringArray::from(literal_suffixes)) as ArrayRef; - - let mut create_params_builder: ListBuilder<StringBuilder> = - ListBuilder::new(StringBuilder::new()); - ALL_DATA_TYPES.iter().for_each(|entry: &XdbcTypeInfo| { - match &entry.create_params { - Some(params) => { - params - .iter() - .for_each(|value| create_params_builder.values().append_value(value)); - create_params_builder.append(true); - } - None => create_params_builder.append(false), // create_params is nullable - } +pub(crate) fn xdbc_type_info_data() -> &'static XdbcTypeInfoData { + &XDBC_TYPE_INFO_DATA +} + +/// Data Types supported by DataFusion +/// <https://arrow.apache.org/datafusion/user-guide/sql/data_types.html> +static XDBC_TYPE_INFO_DATA: Lazy<XdbcTypeInfoData> = Lazy::new(|| { + let mut builder = XdbcTypeInfoDataBuilder::new(); + builder.append(XdbcTypeInfo { + type_name: "VARCHAR".to_string(), + data_type: XdbcDataType::XdbcVarchar, + column_size: Some(i32::MAX), // https://github.com/apache/arrow-datafusion/blob/3801d45fe5ea3d9b207488527b758a0264665263/datafusion/core/src/catalog/information_schema.rs#L532 + literal_prefix: Some("'".to_string()), + literal_suffix: Some("'".to_string()), + create_params: Some(vec!["length".to_string()]), + nullable: Nullable::NullabilityNullable, + case_sensitive: true, + searchable: Searchable::Full, + unsigned_attribute: None, + fixed_prec_scale: false, + auto_increment: None, + local_type_name: Some("VARCHAR".to_string()), + minimum_scale: None, + maximum_scale: None, + sql_data_type: XdbcDataType::XdbcVarchar, + datetime_subcode: None, + num_prec_radix: None, + interval_precision: None, + }); + builder.append(XdbcTypeInfo { + type_name: "INTEGER".to_string(), + data_type: XdbcDataType::XdbcInteger, + column_size: Some(32), // https://github.com/apache/arrow-datafusion/blob/3801d45fe5ea3d9b207488527b758a0264665263/datafusion/core/src/catalog/information_schema.rs#L563 + literal_prefix: None, + literal_suffix: None, + create_params: None, + nullable: Nullable::NullabilityNullable, + case_sensitive: false, + searchable: Searchable::Full, + unsigned_attribute: Some(false), + fixed_prec_scale: false, + auto_increment: Some(false), + local_type_name: Some("INTEGER".to_string()), + minimum_scale: None, + maximum_scale: None, + sql_data_type: XdbcDataType::XdbcInteger, + datetime_subcode: None, + num_prec_radix: Some(2), // https://github.com/apache/arrow-datafusion/blob/3801d45fe5ea3d9b207488527b758a0264665263/datafusion/core/src/catalog/information_schema.rs#L563 + interval_precision: None, + }); + builder.append(XdbcTypeInfo { + type_name: "FLOAT".to_string(), + data_type: XdbcDataType::XdbcFloat, + column_size: Some(24), // https://github.com/apache/arrow-datafusion/blob/3801d45fe5ea3d9b207488527b758a0264665263/datafusion/core/src/catalog/information_schema.rs#L568 + literal_prefix: None, + literal_suffix: None, + create_params: None, + nullable: Nullable::NullabilityNullable, + case_sensitive: false, + searchable: Searchable::Full, + unsigned_attribute: Some(false), + fixed_prec_scale: false, + auto_increment: Some(false), + local_type_name: Some("FLOAT".to_string()), + minimum_scale: None, + maximum_scale: None, + sql_data_type: XdbcDataType::XdbcFloat, + datetime_subcode: None, + num_prec_radix: Some(2), // https://github.com/apache/arrow-datafusion/blob/3801d45fe5ea3d9b207488527b758a0264665263/datafusion/core/src/catalog/information_schema.rs#L568 + interval_precision: None, + }); + builder.append(XdbcTypeInfo { + type_name: "TIMESTAMP".to_string(), + data_type: XdbcDataType::XdbcTimestamp, + column_size: Some(i32::MAX), // https://github.com/apache/arrow-datafusion/blob/4297547df6dc297d692ca82566cfdf135d4730b5/datafusion/proto/src/generated/prost.rs#L894 + literal_prefix: Some("'".to_string()), + literal_suffix: Some("'".to_string()), + create_params: None, + nullable: Nullable::NullabilityNullable, + case_sensitive: false, + searchable: Searchable::Full, + unsigned_attribute: None, + fixed_prec_scale: false, + auto_increment: None, + local_type_name: Some("TIMESTAMP".to_string()), + minimum_scale: None, + maximum_scale: None, + sql_data_type: XdbcDataType::XdbcTimestamp, + datetime_subcode: None, + num_prec_radix: None, + interval_precision: None, + }); + builder.append(XdbcTypeInfo { + type_name: "INTERVAL".to_string(), + data_type: XdbcDataType::XdbcInterval, + column_size: Some(i32::MAX), // https://github.com/apache/arrow-datafusion/blob/4297547df6dc297d692ca82566cfdf135d4730b5/datafusion/proto/src/generated/prost.rs#L1031-L1038 + literal_prefix: Some("'".to_string()), + literal_suffix: Some("'".to_string()), + create_params: None, + nullable: Nullable::NullabilityNullable, + case_sensitive: false, + searchable: Searchable::Full, + unsigned_attribute: None, + fixed_prec_scale: false, + auto_increment: None, + local_type_name: Some("INTERVAL".to_string()), + minimum_scale: None, + maximum_scale: None, + sql_data_type: XdbcDataType::XdbcInterval, + datetime_subcode: Some(XdbcDatetimeSubcode::XdbcSubcodeUnknown), + num_prec_radix: None, + interval_precision: None, // https://github.com/apache/arrow-datafusion/blob/6be75ff2dcc47128b78a695477512ba86c46373f/datafusion/core/src/catalog/information_schema.rs#L581-L582 }); - let (field, offsets, values, nulls) = create_params_builder.finish().into_parts(); - // Re-defined the field to be non-nullable - let new_field = Arc::new(field.as_ref().clone().with_nullable(false)); - let create_params = Arc::new(ListArray::new(new_field, offsets, values, nulls)) as ArrayRef; - - let nullabilities: Vec<i32> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.nullable.clone() as i32) // cast Nullable enum to i32 - .collect(); - let nullable = Arc::new(Int32Array::from(nullabilities)) as ArrayRef; - - let case_sensitivities: Vec<bool> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.case_sensitive) - .collect(); - let case_sensitive = Arc::new(BooleanArray::from(case_sensitivities)); - - let searchabilities: Vec<i32> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.searchable.clone() as i32) // cast Searchable enum to i32 - .collect(); - let searchable = Arc::new(Int32Array::from(searchabilities)) as ArrayRef; - - let unsigned_attributes: Vec<Option<bool>> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.unsigned_attribute as Option<bool>) - .collect(); - let unsigned_attribute = Arc::new(BooleanArray::from(unsigned_attributes)) as ArrayRef; - - let fixed_prec_scales: Vec<bool> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.fixed_prec_scale) - .collect(); - let fixed_prec_scale = Arc::new(BooleanArray::from(fixed_prec_scales)) as ArrayRef; - - let auto_increments: Vec<Option<bool>> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.auto_increment) - .collect(); - let auto_increment = Arc::new(BooleanArray::from(auto_increments)) as ArrayRef; - - let local_type_names: Vec<Option<&str>> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.local_type_name) - .collect(); - let local_type_name = Arc::new(StringArray::from(local_type_names)) as ArrayRef; - - let minimum_scales: Vec<Option<i32>> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.minimum_scale) - .collect(); - let minimum_scale = Arc::new(Int32Array::from(minimum_scales)) as ArrayRef; - - let maximum_scales: Vec<Option<i32>> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.maximum_scale) - .collect(); - let maximum_scale = Arc::new(Int32Array::from(maximum_scales)) as ArrayRef; - - let sql_data_types: Vec<i32> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.sql_data_type.clone() as i32) // case XdbcDataType enum to i32 - .collect(); - let sql_data_type = Arc::new(Int32Array::from(sql_data_types)) as ArrayRef; - - let datetime_subcodes: Vec<Option<i32>> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.datetime_subcode) - .collect(); - let datetime_subcode = Arc::new(Int32Array::from(datetime_subcodes)) as ArrayRef; - - let num_prec_radices: Vec<Option<i32>> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.num_prec_radix) - .collect(); - let num_prec_radix = Arc::new(Int32Array::from(num_prec_radices)) as ArrayRef; - - let interval_precisions: Vec<Option<i32>> = ALL_DATA_TYPES - .iter() - .map(|entry: &XdbcTypeInfo| entry.interval_precision) - .collect(); - let interval_precision = Arc::new(Int32Array::from(interval_precisions)) as ArrayRef; - RecordBatch::try_new( - Arc::clone(&GET_XDBC_TYPE_INFO_SCHEMA), - vec![ - type_name, - data_type, - column_size, - literal_prefix, - literal_suffix, - create_params, - nullable, - case_sensitive, - searchable, - unsigned_attribute, - fixed_prec_scale, - auto_increment, - local_type_name, - minimum_scale, - maximum_scale, - sql_data_type, - datetime_subcode, - num_prec_radix, - interval_precision, - ], - ) - .unwrap() + builder.build().expect("created XdbcTypeInfo") }); diff --git a/flightsql/src/xdbc_type_info/value.rs b/flightsql/src/xdbc_type_info/value.rs deleted file mode 100644 index 3a5fa4fa60..0000000000 --- a/flightsql/src/xdbc_type_info/value.rs +++ /dev/null @@ -1,283 +0,0 @@ -use once_cell::sync::Lazy; - -/// [Arrow FlightSQL Specification]: https://github.com/apache/arrow/blob/9588da967c756b2923e213ccc067378ba6c90a86/format/FlightSql.proto#L948-L973 -/// -/// Note: Some of the data types are not supported by DataFusion yet: -/// <https://arrow.apache.org/datafusion/user-guide/sql/data_types.html#unsupported-sql-types> -#[derive(Debug, Clone)] -#[allow(dead_code)] -pub enum XdbcDataType { - UnknownType = 0, - Char = 1, - Numeric = 2, - Decimal = 3, - Integer = 4, - Smallint = 5, - Float = 6, - Real = 7, - Double = 8, - Datetime = 9, // Not yet supported by DataFusion - Interval = 10, - Varchar = 12, - Date = 91, - Time = 92, - Timestamp = 93, - Longvarchar = -1, - Binary = -2, // Not yet supported by DataFusion - Varbinary = -3, // Not yet supported by DataFusion - Longvarbinary = -4, - Bigint = -5, - Tinyint = -6, - Bit = -7, - Wchar = -8, - Wvarchar = -9, // Not yet supported by DataFusion -} - -/// [Arrow FlightSQL Specification]: https://github.com/apache/arrow/blob/9588da967c756b2923e213ccc067378ba6c90a86/format/FlightSql.proto#L1014-L1029 -#[derive(Debug, Clone)] -#[allow(dead_code)] -pub enum Nullability { - // Indicates that the fields does not allow the use of null values. - NoNulls = 0, - // Indicates that the fields allow the use of null values. - Nullable = 1, - // Indicates that nullability of the fields can not be determined. - Unknown = 2, -} - -/// [Arrow FlightSQL Specification]: https://github.com/apache/arrow/blob/9588da967c756b2923e213ccc067378ba6c90a86/format/FlightSql.proto#L1031-L1056 -#[derive(Debug, Clone)] -#[allow(dead_code)] -pub enum Searchable { - // Indicates that column can not be used in a WHERE clause. - None = 0, - - // Indicates that the column can be used in a WHERE clause if it is using a - // LIKE operator. - Char = 1, - - // Indicates that the column can be used In a WHERE clause with any - // operator other than LIKE. - // - Allowed operators: comparison, quantified comparison, BETWEEN, - // DISTINCT, IN, MATCH, and UNIQUE. - Basic = 2, - - // Indicates that the column can be used in a WHERE clause using any operator. - Full = 3, -} - -/// Detailed subtype information for XDBC_TYPE_DATETIME and XDBC_TYPE_INTERVAL. -/// -/// [Arrow FlightSQL Specification]: https://github.com/apache/arrow/blob/9588da967c756b2923e213ccc067378ba6c90a86/format/FlightSql.proto#L978-L1012 -#[allow(dead_code)] -struct XdbcDatetimeSubcodeType { - // option allow_alias = true; // TODO chunchun: what to do with it? - unknown: i32, - year: i32, - date: i32, - time: i32, - month: i32, - timestamp: i32, - day: i32, - time_with_timezone: i32, - hour: i32, - timestamp_with_timezone: i32, - minute: i32, - second: i32, - year_to_month: i32, - day_to_hour: i32, - day_to_minute: i32, - day_to_second: i32, - hour_to_minute: i32, - hour_to_second: i32, - minute_to_second: i32, - interval_year: i32, - interval_month: i32, - interval_day: i32, - interval_hour: i32, - interval_minute: i32, - interval_second: i32, - interval_year_to_month: i32, - interval_day_to_hour: i32, - interval_day_to_minute: i32, - interval_day_to_second: i32, - interval_hour_to_minute: i32, - interval_hour_to_second: i32, - interval_minute_to_second: i32, -} - -#[allow(dead_code)] -static XDBC_DATETIME_SUBCODE: Lazy<XdbcDatetimeSubcodeType> = - Lazy::new(|| XdbcDatetimeSubcodeType { - unknown: 0, - year: 1, - date: 1, - time: 2, - month: 2, - timestamp: 3, - day: 3, - time_with_timezone: 4, - hour: 4, - timestamp_with_timezone: 5, - minute: 5, - second: 6, - year_to_month: 7, - day_to_hour: 8, - day_to_minute: 9, - day_to_second: 10, - hour_to_minute: 11, - hour_to_second: 12, - minute_to_second: 13, - interval_year: 101, - interval_month: 102, - interval_day: 103, - interval_hour: 104, - interval_minute: 105, - interval_second: 106, - interval_year_to_month: 107, - interval_day_to_hour: 108, - interval_day_to_minute: 109, - interval_day_to_second: 110, - interval_hour_to_minute: 111, - interval_hour_to_second: 112, - interval_minute_to_second: 113, - }); - -pub struct XdbcTypeInfo { - pub type_name: &'static str, - pub data_type: XdbcDataType, - // column_size: int32 (The maximum size supported by that column. - // In case of exact numeric types, this represents the maximum precision. - // In case of string types, this represents the character length. - // In case of datetime data types, this represents the length in characters of the string representation. - // NULL is returned for data types where column size is not applicable.) - pub column_size: Option<i32>, - pub literal_prefix: Option<&'static str>, - pub literal_suffix: Option<&'static str>, - pub create_params: Option<Vec<&'static str>>, - pub nullable: Nullability, - pub case_sensitive: bool, - pub searchable: Searchable, - pub unsigned_attribute: Option<bool>, - pub fixed_prec_scale: bool, - pub auto_increment: Option<bool>, - pub local_type_name: Option<&'static str>, - pub minimum_scale: Option<i32>, - pub maximum_scale: Option<i32>, - pub sql_data_type: XdbcDataType, - pub datetime_subcode: Option<i32>, // values are from XDBC_DATETIME_SUBCODE - pub num_prec_radix: Option<i32>, - pub interval_precision: Option<i32>, -} - -/// Data Types supported by DataFusion -/// <https://arrow.apache.org/datafusion/user-guide/sql/data_types.html> -pub static ALL_DATA_TYPES: Lazy<Vec<XdbcTypeInfo>> = Lazy::new(|| { - vec![ - XdbcTypeInfo { - type_name: "VARCHAR", - data_type: XdbcDataType::Varchar, - column_size: Some(i32::MAX), // https://github.com/apache/arrow-datafusion/blob/3801d45fe5ea3d9b207488527b758a0264665263/datafusion/core/src/catalog/information_schema.rs#L532 - literal_prefix: Some("'"), - literal_suffix: Some("'"), - create_params: Some(vec!["length"]), - nullable: Nullability::Nullable, - case_sensitive: true, - searchable: Searchable::Full, - unsigned_attribute: None, - fixed_prec_scale: false, - auto_increment: None, - local_type_name: Some("VARCHAR"), - minimum_scale: None, - maximum_scale: None, - sql_data_type: XdbcDataType::Varchar, - datetime_subcode: None, - num_prec_radix: None, - interval_precision: None, - }, - XdbcTypeInfo { - type_name: "INTEGER", - data_type: XdbcDataType::Integer, - column_size: Some(32), // https://github.com/apache/arrow-datafusion/blob/3801d45fe5ea3d9b207488527b758a0264665263/datafusion/core/src/catalog/information_schema.rs#L563 - literal_prefix: None, - literal_suffix: None, - create_params: None, - nullable: Nullability::Nullable, - case_sensitive: false, - searchable: Searchable::Full, - unsigned_attribute: Some(false), - fixed_prec_scale: false, - auto_increment: Some(false), - local_type_name: Some("INTEGER"), - minimum_scale: None, - maximum_scale: None, - sql_data_type: XdbcDataType::Integer, - datetime_subcode: None, - num_prec_radix: Some(2), // https://github.com/apache/arrow-datafusion/blob/3801d45fe5ea3d9b207488527b758a0264665263/datafusion/core/src/catalog/information_schema.rs#L563 - interval_precision: None, - }, - XdbcTypeInfo { - type_name: "FLOAT", - data_type: XdbcDataType::Float, - column_size: Some(24), // https://github.com/apache/arrow-datafusion/blob/3801d45fe5ea3d9b207488527b758a0264665263/datafusion/core/src/catalog/information_schema.rs#L568 - literal_prefix: None, - literal_suffix: None, - create_params: None, - nullable: Nullability::Nullable, - case_sensitive: false, - searchable: Searchable::Full, - unsigned_attribute: Some(false), - fixed_prec_scale: false, - auto_increment: Some(false), - local_type_name: Some("FLOAT"), - minimum_scale: None, - maximum_scale: None, - sql_data_type: XdbcDataType::Float, - datetime_subcode: None, - num_prec_radix: Some(2), // https://github.com/apache/arrow-datafusion/blob/3801d45fe5ea3d9b207488527b758a0264665263/datafusion/core/src/catalog/information_schema.rs#L568 - interval_precision: None, - }, - XdbcTypeInfo { - type_name: "TIMESTAMP", - data_type: XdbcDataType::Timestamp, - column_size: Some(i32::MAX), // https://github.com/apache/arrow-datafusion/blob/4297547df6dc297d692ca82566cfdf135d4730b5/datafusion/proto/src/generated/prost.rs#L894 - literal_prefix: Some("'"), - literal_suffix: Some("'"), - create_params: None, - nullable: Nullability::Nullable, - case_sensitive: false, - searchable: Searchable::Full, - unsigned_attribute: None, - fixed_prec_scale: false, - auto_increment: None, - local_type_name: Some("TIMESTAMP"), - minimum_scale: None, - maximum_scale: None, - sql_data_type: XdbcDataType::Timestamp, - datetime_subcode: None, - num_prec_radix: None, - interval_precision: None, - }, - XdbcTypeInfo { - type_name: "INTERVAL", - data_type: XdbcDataType::Interval, - column_size: Some(i32::MAX), // https://github.com/apache/arrow-datafusion/blob/4297547df6dc297d692ca82566cfdf135d4730b5/datafusion/proto/src/generated/prost.rs#L1031-L1038 - literal_prefix: Some("'"), - literal_suffix: Some("'"), - create_params: None, - nullable: Nullability::Nullable, - case_sensitive: false, - searchable: Searchable::Full, - unsigned_attribute: None, - fixed_prec_scale: false, - auto_increment: None, - local_type_name: Some("INTERVAL"), - minimum_scale: None, - maximum_scale: None, - sql_data_type: XdbcDataType::Interval, - datetime_subcode: Some(XDBC_DATETIME_SUBCODE.unknown), - num_prec_radix: None, - interval_precision: None, // https://github.com/apache/arrow-datafusion/blob/6be75ff2dcc47128b78a695477512ba86c46373f/datafusion/core/src/catalog/information_schema.rs#L581-L582 - }, - ] -}); diff --git a/influxdb_iox/tests/end_to_end_cases/flightsql.rs b/influxdb_iox/tests/end_to_end_cases/flightsql.rs index 7553bbf0fe..9931eea658 100644 --- a/influxdb_iox/tests/end_to_end_cases/flightsql.rs +++ b/influxdb_iox/tests/end_to_end_cases/flightsql.rs @@ -1174,15 +1174,24 @@ async fn flightsql_get_xdbc_type_info() { })), Step::Custom(Box::new(move |state: &mut StepTestState| { async move { + // test filter by type let mut client = flightsql_client(state.cluster()); - // TODO chunchun: search by data_type test case let data_type: Option<i32> = Some(6); - let err = client.get_xdbc_type_info(data_type).await.unwrap_err(); - - assert_matches!(err, FlightError::Tonic(..)); - assert_contains!(err.to_string(), "GetXdbcTypeInfo does not yet support filtering by data_type"); + let stream = client.get_xdbc_type_info(data_type).await.unwrap(); + let batches = collect_stream(stream).await; + insta::assert_yaml_snapshot!( + batches_to_sorted_lines(&batches), + @r###" + --- + - +-----------+-----------+-------------+----------------+----------------+---------------+----------+----------------+------------+--------------------+------------------+----------------+-----------------+---------------+---------------+---------------+------------------+----------------+--------------------+ + - "| type_name | data_type | column_size | literal_prefix | literal_suffix | create_params | nullable | case_sensitive | searchable | unsigned_attribute | fixed_prec_scale | auto_increment | local_type_name | minimum_scale | maximum_scale | sql_data_type | datetime_subcode | num_prec_radix | interval_precision |" + - +-----------+-----------+-------------+----------------+----------------+---------------+----------+----------------+------------+--------------------+------------------+----------------+-----------------+---------------+---------------+---------------+------------------+----------------+--------------------+ + - "| FLOAT | 6 | 24 | | | | 1 | false | 3 | false | false | false | FLOAT | | | 6 | | 2 | |" + - +-----------+-----------+-------------+----------------+----------------+---------------+----------+----------------+------------+--------------------+------------------+----------------+-----------------+---------------+---------------+---------------+------------------+----------------+--------------------+ + "### + ); } .boxed() })),
1df7a0d4fb2bc2ae38765e8c28092602824b5bf7
Andrew Lamb
2022-10-19 14:39:05
remove outdated observer sql repl mode (#5918)
* refactor: remove Observer mode from repl * chore: remove outdated SQL docs * fix: more update of sql docs
null
refactor: remove outdated observer sql repl mode (#5918) * refactor: remove Observer mode from repl * chore: remove outdated SQL docs * fix: more update of sql docs
diff --git a/docs/sql.md b/docs/sql.md index 278e75c4fe..977da2f6ec 100644 --- a/docs/sql.md +++ b/docs/sql.md @@ -32,7 +32,7 @@ Then run the `USE <your_namespace>` command ``` > use 810c5937734635d8_dbce66e3a6cbe757; You are now in remote mode, querying namespace 810c5937734635d8_dbce66e3a6cbe757 -810c5937734635d8_dbce66e3a6cbe757> +810c5937734635d8_dbce66e3a6cbe757> ``` Now, all queries will be run against the specified namespace (`810c5937734635d8_dbce66e3a6cbe757`) in this example @@ -67,31 +67,6 @@ Returned 1 row in 74.022768ms ``` -## Observer -In this mode queries are run *locally* against a cached unified view of the remote system tables - -``` -810c5937734635d8_dbce66e3a6cbe757> observer -; -Preparing local views of remote system tables -Loading system tables from 3 databases -... - Completed in 112.085784ms -You are now in Observer mode. - -SQL commands in this mode run against a cached unified view of -remote system tables in all remote databases. - -To see the unified tables available to you, try running -SHOW TABLES; - -To reload the most recent version of the database system tables, run -OBSERVER; - - -OBSERVER> -``` - # Query Cookbook This section contains some common and useful queries against IOx system tables @@ -108,9 +83,6 @@ my_db> show tables; | table_catalog | table_schema | table_name | table_type | +---------------+--------------------+-------------+------------+ | public | iox | query_count | BASE TABLE | -| public | system | chunks | BASE TABLE | -| public | system | columns | BASE TABLE | -| public | system | operations | BASE TABLE | | public | information_schema | tables | VIEW | | public | information_schema | columns | VIEW | +---------------+--------------------+-------------+------------+ @@ -149,96 +121,6 @@ Query execution complete in 39.046225ms ``` - -## System Tables - -Here are some interesting reports you can run when in `OBSERVER` mode: - -### Total storage size taken by each database - -```sql -SELECT - database_name, count(*) as num_chunks, - sum(memory_bytes)/1024/1024 as estimated_mb -FROM chunks -GROUP BY database_name -ORDER BY estimated_mb desc -LIMIT 20; -``` - -### Total estimated storage size by database and storage class -```sql -SELECT - database_name, storage, count(*) as num_chunks, - sum(memory_bytes)/1024/1024 as estimated_mb -FROM chunks -GROUP BY database_name, storage -ORDER BY estimated_mb desc -LIMIT 20; -``` - -### Total estimated storage size by database, table_name and storage class - -```sql -SELECT - database_name, table_name, storage, count(*) as num_chunks, - sum(memory_bytes)/1024/1024 as estimated_mb -FROM chunks -GROUP BY database_name, table_name, storage -ORDER BY estimated_mb desc -LIMIT 20; -``` - - -### Total row count by table - -```sql -SELECT database_name, table_name, sum(total_rows) as total_rows -FROM ( - SELECT database_name, table_name, max(row_count) as total_rows - FROM chunk_columns - GROUP BY database_name, partition_key, table_name -) -GROUP BY database_name, table_name -ORDER BY total_rows DESC -LIMIT 20; -``` - -### Total row count by partition and table - -```sql -SELECT database_name, partition_key, table_name, max(row_count) as total_rows -FROM chunk_columns -GROUP BY database_name, partition_key, table_name -ORDER BY total_rows DESC -LIMIT 20; -``` - -### Time range stored per table - -This query provides an estimate, by table, of how long of a time range -and the estimated number of rows per second it holds in IOx -(the `1,000,000,000` is the conversion from nanoseconds) - -```sql -select table_name, -1000000000.0 * total_rows / range as rows_per_sec, -range / 1000000000.0 as range_sec, -total_rows -from -(select table_name, - column_name, - sum(row_count) as total_rows, - max(cast(max_value as double)) - min(cast(min_value as double)) as range - from chunk_columns - where column_name = 'time' - group by table_name, column_name -) -where range > 0 -order by range_sec desc; -``` - - # SQL Reference Since IOx uses Apache Arrow's @@ -249,14 +131,7 @@ In this section, IOx specific SQL tables, commands, and extensions are documente ## System Tables -In addition to the SQL standard `information_schema`, IOx contains several *system tables* that provide access to IOx specific information. The information in each system table is scoped to that particular database. Cross database queries are not possible due to the design of IOx's security model. Another process, such as the `observer` mode in the IOx SQL client, must be used for queries on information that spans databases. - -### `system.chunks` -`system.chunks` contains information about each IOx storage chunk (which holds part of the data for a table). - -TODO: document each column, once they have stabilized. - -### `system.columns` -`system.columns` contains IOx specific schema information about each column in each table, such as which columns were loaded as tags, fields, and timestamps in the InfluxDB data model. +In addition to the SQL standard `information_schema`, IOx contains several *system tables* that provide access to IOx specific information. The information in each system table is scoped to that particular database. Cross database queries are not possible due to the design of IOx's security model. -TODO: document each column, once they have stabilized. +### `system.queries` +`system.queries` contains information about queries run against this IOx instance diff --git a/influxdb_iox/src/commands/sql.rs b/influxdb_iox/src/commands/sql.rs index 2a532b83df..adf550b0ec 100644 --- a/influxdb_iox/src/commands/sql.rs +++ b/influxdb_iox/src/commands/sql.rs @@ -5,7 +5,6 @@ use snafu::{ResultExt, Snafu}; use influxdb_iox_client::{connection::Connection, health}; -mod observer; mod repl; mod repl_command; diff --git a/influxdb_iox/src/commands/sql/observer.rs b/influxdb_iox/src/commands/sql/observer.rs deleted file mode 100644 index 6622e19bb8..0000000000 --- a/influxdb_iox/src/commands/sql/observer.rs +++ /dev/null @@ -1,296 +0,0 @@ -//! This module implements the "Observer" functionality of the SQL repl - -use arrow::{ - array::{Array, ArrayRef, StringArray}, - datatypes::{Field, Schema}, - record_batch::RecordBatch, -}; -use datafusion::{ - datasource::MemTable, - prelude::{SessionConfig, SessionContext}, -}; -use influxdb_iox_client::{connection::Connection, flight::generated_types::ReadInfo}; -use observability_deps::tracing::{debug, info}; -use snafu::{ResultExt, Snafu}; -use std::{collections::HashMap, sync::Arc, time::Instant}; - -#[derive(Debug, Snafu)] -pub enum Error { - #[snafu(display("Generic error"))] - Generic, - - #[snafu(display("Error loading remote state: {}", source))] - LoadingDatabaseNames { - source: influxdb_iox_client::error::Error, - }, - - #[snafu(display("Error running remote query: {}", source))] - RunningRemoteQuery { - source: influxdb_iox_client::flight::Error, - }, - - #[snafu(display("Error running observer query: {}", source))] - Query { - source: datafusion::error::DataFusionError, - }, -} - -pub type Result<T, E = Error> = std::result::Result<T, E>; - -/// The Observer contains a local DataFusion execution engine that has -/// pre-loaded with consolidated system table views. -pub struct Observer { - /// DataFusion execution context for executing queries - context: SessionContext, -} - -impl Observer { - /// Attempt to create a new observer instance, loading from the remote server - pub async fn try_new(connection: Connection) -> Result<Self> { - let mut context = - SessionContext::with_config(SessionConfig::new().with_information_schema(true)); - - load_remote_system_tables(&mut context, connection).await?; - - Ok(Self { context }) - } - - /// Runs the specified sql query locally against the preloaded context - pub async fn run_query(&mut self, sql: &str) -> Result<Vec<RecordBatch>> { - self.context - .sql(sql) - .await - .context(QuerySnafu)? - .collect() - .await - .context(QuerySnafu) - } - - pub fn help(&self) -> String { - r#"You are now in Observer mode. - -SQL commands in this mode run against a cached unified view of -remote system tables in all remote databases. - -To see the unified tables available to you, try running -SHOW TABLES; - -To reload the most recent version of the database system tables, run -OBSERVER; - -"# - .to_string() - } -} - -/// Copies the data from the remote tables across all databases in a -/// remote server into a local copy that also has an extra -/// `database_name` column for the database -async fn load_remote_system_tables( - context: &mut SessionContext, - connection: Connection, -) -> Result<()> { - // all prefixed with "system." - let table_names = vec!["queries"]; - - let start = Instant::now(); - - let mut namespace_client = influxdb_iox_client::namespace::Client::new(connection.clone()); - - let db_names: Vec<_> = namespace_client - .get_namespaces() - .await - .context(LoadingDatabaseNamesSnafu)? - .into_iter() - .map(|ns| ns.name) - .collect(); - - println!("Loading system tables from {} databases", db_names.len()); - - let tasks = db_names - .into_iter() - .flat_map(|db_name| { - let table_names = table_names.clone(); - let connection = connection.clone(); - table_names.into_iter().map(move |table_name| { - let table_name = table_name.to_string(); - let db_name = db_name.to_string(); - let connection = connection.clone(); - let sql = format!("select * from system.{}", table_name); - tokio::task::spawn(async move { - let mut client = influxdb_iox_client::flight::Client::new(connection); - let mut query_results = client - .perform_query(ReadInfo { - namespace_name: db_name.clone(), - sql_query: sql, - }) - .await - .context(RunningRemoteQuerySnafu)?; - - let mut batches = vec![]; - - while let Some(data) = query_results - .next() - .await - .context(RunningRemoteQuerySnafu)? - { - batches.push(data); - } - - let t: Result<RemoteSystemTable> = Ok(RemoteSystemTable { - db_name, - table_name, - batches, - }); - print!("."); // give some indication of progress - use std::io::Write; - std::io::stdout().flush().unwrap(); - t - }) - }) - }) - .collect::<Vec<_>>(); - - // now, get the results and combine them - let results = futures::future::join_all(tasks).await; - - let mut builder = AggregatedTableBuilder::new(); - results.into_iter().for_each(|result| { - match result { - Ok(Ok(table)) => { - builder.append(table); - } - // This is not a fatal error so log it and keep going - Ok(Err(e)) => { - println!("WARNING: Error running query: {}", e); - } - // This is not a fatal error so log it and keep going - Err(e) => { - println!("WARNING: Error running task: {}", e); - } - } - }); - - println!(); - println!(" Completed in {:?}", Instant::now() - start); - - builder.build(context); - - Ok(()) -} - -#[derive(Debug)] -/// Contains the results from a system table query for a specific database -struct RemoteSystemTable { - db_name: String, - table_name: String, - batches: Vec<RecordBatch>, -} - -#[derive(Debug, Default)] -/// Aggregates several table responses into a unified view -struct AggregatedTableBuilder { - tables: HashMap<String, VirtualTableBuilder>, -} - -impl AggregatedTableBuilder { - fn new() -> Self { - Self::default() - } - - /// Appends a table response to the aggregated tables being built - fn append(&mut self, t: RemoteSystemTable) { - let RemoteSystemTable { - db_name, - table_name, - batches, - } = t; - - let num_rows: usize = batches.iter().map(|b| b.num_rows()).sum(); - - info!(%table_name, %db_name, num_batches=batches.len(), %num_rows, "Aggregating results"); - - let table_builder = self - .tables - .entry(table_name.clone()) - .or_insert_with(|| VirtualTableBuilder::new(table_name)); - - table_builder.append_batches(&db_name, batches); - } - - /// register a table provider for this system table - fn build(self, ctx: &mut SessionContext) { - let Self { tables } = self; - - for (table_name, table_builder) in tables { - debug!(%table_name, "registering system table"); - table_builder.build(ctx); - } - } -} - -/// Creates a "virtual" version of `select * from <table>` which has a -/// "database_name" column pre-pended to all actual record batches -#[derive(Debug)] -struct VirtualTableBuilder { - table_name: String, - batches: Vec<RecordBatch>, -} - -impl VirtualTableBuilder { - pub fn new(table_name: impl Into<String>) -> Self { - let table_name = table_name.into(); - Self { - table_name, - batches: Vec::new(), - } - } - - /// Append batches from `select * from <system table>` to the - /// results being created - fn append_batches(&mut self, db_name: &str, new_batches: Vec<RecordBatch>) { - self.batches.extend(new_batches.into_iter().map(|batch| { - use std::iter::once; - - let array = - StringArray::from_iter_values(std::iter::repeat(db_name).take(batch.num_rows())); - let data_type = array.data_type().clone(); - let array = Arc::new(array) as ArrayRef; - - let new_columns = once(array) - .chain(batch.columns().iter().cloned()) - .collect::<Vec<ArrayRef>>(); - - let new_fields = once(Field::new("database_name", data_type, false)) - .chain(batch.schema().fields().iter().cloned()) - .collect::<Vec<Field>>(); - let new_schema = Arc::new(Schema::new(new_fields)); - - RecordBatch::try_new(new_schema, new_columns).expect("Creating new record batch") - })) - } - - /// register a table provider for this system table - fn build(self, ctx: &mut SessionContext) { - let Self { - table_name, - batches, - } = self; - - let schema = if batches.is_empty() { - panic!("No batches for ChunksTableBuilder"); - } else { - batches[0].schema() - }; - - let partitions = batches - .into_iter() - .map(|batch| vec![batch]) - .collect::<Vec<_>>(); - - let memtable = MemTable::try_new(schema, partitions).expect("creating memtable"); - - ctx.register_table(table_name.as_str(), Arc::new(memtable)) - .ok(); - } -} diff --git a/influxdb_iox/src/commands/sql/repl.rs b/influxdb_iox/src/commands/sql/repl.rs index 129367b906..c1b4e6808c 100644 --- a/influxdb_iox/src/commands/sql/repl.rs +++ b/influxdb_iox/src/commands/sql/repl.rs @@ -43,9 +43,6 @@ pub enum Error { source: influxdb_iox_client::flight::Error, }, - #[snafu(display("Error running observer query: {}", source))] - RunningObserverQuery { source: super::observer::Error }, - #[snafu(display("Cannot create REPL: {}", source))] ReplCreation { source: ReadlineError }, } @@ -55,9 +52,6 @@ pub type Result<T, E = Error> = std::result::Result<T, E>; enum QueryEngine { /// Run queries against the namespace on the remote server Remote(String), - - /// Run queries against a local `Observer` instance - Observer(super::observer::Observer), } struct RustylineHelper { @@ -168,9 +162,6 @@ pub struct Repl { /// Current prompt prompt: String, - /// Connection to the server - connection: Connection, - /// Client for interacting with IOx namespace API namespace_client: influxdb_iox_client::namespace::Client, @@ -192,7 +183,7 @@ impl Repl { /// Create a new Repl instance, connected to the specified URL pub fn new(connection: Connection) -> Result<Self> { let namespace_client = influxdb_iox_client::namespace::Client::new(connection.clone()); - let flight_client = influxdb_iox_client::flight::Client::new(connection.clone()); + let flight_client = influxdb_iox_client::flight::Client::new(connection); let mut rl = Editor::new().context(ReplCreationSnafu)?; rl.set_helper(Some(RustylineHelper::default())); @@ -209,7 +200,6 @@ impl Repl { Ok(Self { rl, prompt, - connection, namespace_client, flight_client, query_engine: None, @@ -227,12 +217,6 @@ impl Repl { ReplCommand::Help => { self.print_help(); } - ReplCommand::Observer {} => { - self.use_observer() - .await - .map_err(|e| println!("{}", e)) - .ok(); - } ReplCommand::ShowNamespaces => { self.list_namespaces() .await @@ -317,13 +301,6 @@ impl Repl { scrape_query(&mut self.flight_client, db_name, &sql).await? } - Some(QueryEngine::Observer(observer)) => { - info!("Running sql on local observer"); - observer - .run_query(&sql) - .await - .context(RunningObserverQuerySnafu)? - } }; let end = Instant::now(); @@ -355,22 +332,11 @@ impl Repl { self.set_query_engine(QueryEngine::Remote(db_name)); } - async fn use_observer(&mut self) -> Result<()> { - println!("Preparing local views of remote system tables"); - let observer = super::observer::Observer::try_new(self.connection.clone()) - .await - .context(RunningObserverQuerySnafu)?; - println!("{}", observer.help()); - self.set_query_engine(QueryEngine::Observer(observer)); - Ok(()) - } - fn set_query_engine(&mut self, query_engine: QueryEngine) { self.prompt = match &query_engine { QueryEngine::Remote(db_name) => { format!("{}> ", db_name) } - QueryEngine::Observer(_) => "OBSERVER> ".to_string(), }; self.query_engine = Some(query_engine) } diff --git a/influxdb_iox/src/commands/sql/repl_command.rs b/influxdb_iox/src/commands/sql/repl_command.rs index 56f310ed7f..7139dc0045 100644 --- a/influxdb_iox/src/commands/sql/repl_command.rs +++ b/influxdb_iox/src/commands/sql/repl_command.rs @@ -5,7 +5,6 @@ use observability_deps::tracing::{debug, warn}; pub enum ReplCommand { Help, ShowNamespaces, - Observer, SetFormat { format: String }, UseNamespace { db_name: String }, SqlCommand { sql: String }, @@ -61,7 +60,6 @@ impl TryFrom<&str> for ReplCommand { warn!(%extra_content, "ignoring tokens after 'help'"); Ok(Self::Help) } - ["observer"] => Ok(Self::Observer), ["exit"] => Ok(Self::Exit), ["quit"] => Ok(Self::Exit), ["use", "namespace"] => { @@ -104,8 +102,6 @@ USE NAMESPACE <name>: Set the current remote namespace to name SET FORMAT <format>: Set the output format to Pretty, csv or json -OBSERVER: Locally query unified queryable views of remote system tables - [EXIT | QUIT]: Quit this session and exit the program # Examples: use remote namespace foo @@ -118,20 +114,6 @@ USE foo; SHOW TABLES; ;; Show available tables SHOW COLUMNS FROM my_table; ;; Show columns in the table -;; Show storage usage across partitions and tables -SELECT - partition_key, table_name, storage, - count(*) as chunk_count, - sum(memory_bytes)/(1024*1024) as size_mb -FROM - system.chunks -GROUP BY - partition_key, table_name, storage -ORDER BY - size_mb DESC -LIMIT 20 -; - "# } } @@ -169,21 +151,6 @@ mod tests { assert_eq!(" help me; ".try_into(), expected); } - #[test] - fn observer() { - let expected = Ok(ReplCommand::Observer); - assert_eq!("observer;".try_into(), expected); - assert_eq!("observer".try_into(), expected); - assert_eq!(" observer".try_into(), expected); - assert_eq!(" observer ".try_into(), expected); - assert_eq!(" OBSERVER ".try_into(), expected); - assert_eq!(" Observer; ".try_into(), expected); - assert_eq!(" observer ; ".try_into(), expected); - - let expected = sql_cmd(" observer me; "); - assert_eq!(" observer me; ".try_into(), expected); - } - #[test] fn show_namespaces() { let expected = Ok(ReplCommand::ShowNamespaces);
3c0388fdea805a6794a52b1a1844ae712fb06bf3
Dom Dwyer
2023-06-14 15:55:33
"Merge pull request #7953 from influxdata/dom/partition-key-dedupe"
This reverts commit 5bce4477b703f1a02fd40427856645be78faeaf6, reversing changes made to 64fa17b3be325e014f2554e78302b89e7a49350b.
null
revert: "Merge pull request #7953 from influxdata/dom/partition-key-dedupe" This reverts commit 5bce4477b703f1a02fd40427856645be78faeaf6, reversing changes made to 64fa17b3be325e014f2554e78302b89e7a49350b.
diff --git a/mutable_batch/src/payload/partition.rs b/mutable_batch/src/payload/partition.rs index d30adea4c7..8652b01ee9 100644 --- a/mutable_batch/src/payload/partition.rs +++ b/mutable_batch/src/payload/partition.rs @@ -28,7 +28,7 @@ use self::strftime::StrftimeFormatter; /// An error generating a partition key for a row. #[allow(missing_copy_implementations)] -#[derive(Debug, Error, PartialEq, Eq, Clone)] +#[derive(Debug, Error, PartialEq, Eq)] pub enum PartitionKeyError { /// The partition template defines a [`Template::TimeFormat`] part, but the /// provided strftime formatter is invalid. @@ -75,7 +75,7 @@ pub fn partition_batch<'a>( #[derive(Debug)] #[allow(clippy::large_enum_variant)] enum Template<'a> { - TagValue(&'a Column, Option<i32>), + TagValue(&'a Column), TimeFormat(&'a [i64], StrftimeFormatter<'a>), /// This batch is missing a partitioning tag column. @@ -90,67 +90,26 @@ impl<'a> Template<'a> { idx: usize, ) -> Result<(), PartitionKeyError> { match self { - Template::TagValue(col, last_key) if col.valid.get(idx) => match &col.data { - ColumnData::Tag(col_data, dictionary, _) => { - let this_key = col_data[idx]; - - // Update the "is identical" tracking key for this new, - // potentially different key. - *last_key = Some(this_key); - - out.write_str( - encode_key_part(dictionary.lookup_id(this_key).unwrap()).as_ref(), - )? - } + Template::TagValue(col) if col.valid.get(idx) => match &col.data { + ColumnData::Tag(col_data, dictionary, _) => out.write_str(never_empty( + Cow::from(utf8_percent_encode( + dictionary.lookup_id(col_data[idx]).unwrap(), + &ENCODED_PARTITION_KEY_CHARS, + )) + .as_ref(), + ))?, _ => return Err(PartitionKeyError::TagValueNotTag(col.influx_type())), }, Template::TimeFormat(t, fmt) => fmt.render(t[idx], out)?, // Either a tag that has no value for this given row index, or the // batch does not contain this tag at all. - Template::TagValue(_, last_key) => { - // This row doesn't have a tag value, which should be carried - // forwards to be checked against the next row. - *last_key = None; + Template::TagValue(_) | Template::MissingTag => { out.write_str(PARTITION_KEY_VALUE_NULL_STR)? } - Template::MissingTag => out.write_str(PARTITION_KEY_VALUE_NULL_STR)?, } Ok(()) } - - /// Returns true if the partition key generated by `self` for `idx` will be - /// identical to the last generated key. - fn is_identical(&self, idx: usize) -> bool { - match self { - Template::TagValue(col, last_key) if col.valid.get(idx) => match &col.data { - ColumnData::Tag(col_data, _, _) => { - let this_key = col_data[idx]; - // Check if the dictionary key matches the last dictionary - // key, indicating the same value is going to be rendered. - last_key.map(|v| v == this_key).unwrap_or_default() - } - // This is an error, but for the purposes of identical checks, - // it is treated as not identical, causing the error to be - // raised when formatting is attempted. - _ => false, - }, - Template::TimeFormat(t, fmt) => { - // Check if the last value matches the current value, after - // optionally applying the precision reduction optimisation. - fmt.equals_last(t[idx]) - } - // The last row did not contain this key, and neither does this. - Template::TagValue(_, None) => true, - // The last row did contain a key, but this one does not (therefore - // it differs). - Template::TagValue(_, Some(_)) => false, - - // The batch does not contain this tag at all - it always matches - // with the previous row. - Template::MissingTag => true, - } - } } fn encode_key_part(s: &str) -> Cow<'_, str> { @@ -198,15 +157,11 @@ fn encode_key_part(s: &str) -> Cow<'_, str> { } } -/// Returns an iterator of partition keys for the given table batch. -/// -/// This function performs deduplication on returned keys; the returned iterator -/// yields [`Some`] containing the partition key string when a new key is -/// generated, and [`None`] when the generated key would equal the last key. +/// Returns an iterator of partition keys for the given table batch fn partition_keys<'a>( batch: &'a MutableBatch, template_parts: impl Iterator<Item = TemplatePart<'a>>, -) -> impl Iterator<Item = Option<Result<String, PartitionKeyError>>> + 'a { +) -> impl Iterator<Item = Result<String, PartitionKeyError>> + 'a { // Extract the timestamp data. let time = match batch.column(TIME_COLUMN_NAME).map(|v| &v.data) { Ok(ColumnData::I64(data, _)) => data.as_slice(), @@ -219,7 +174,7 @@ fn partition_keys<'a>( .map(|v| match v { TemplatePart::TagValue(col_name) => batch .column(col_name) - .map_or_else(|_| Template::MissingTag, |v| Template::TagValue(v, None)), + .map_or_else(|_| Template::MissingTag, Template::TagValue), TemplatePart::TimeFormat(fmt) => { Template::TimeFormat(time, StrftimeFormatter::new(fmt)) } @@ -235,74 +190,38 @@ fn partition_keys<'a>( // is temporarily allocated until the resulting string is shrunk down. let mut last_len = 5; - // The first row in a batch must always be evaluated to produce a key. - // - // Row 0 is guaranteed to exist, otherwise attempting to read the time - // column above would have caused a panic (no rows -> no time column). - let first = std::iter::once(Some(evaluate_template(&mut template, &mut last_len, 0))); - - // The subsequent rows in a batch may generate the same key, and therefore a - // dedupe check is used before allocating & populating the partition key. - let rest = (1..batch.row_count).map(move |idx| { - // Check if this partition key is going to be different from the - // last, short-circuiting the check if it is. - if template.iter().all(|t| t.is_identical(idx)) { - return None; - } + // Yield a partition key string for each row in `batch` + (0..batch.row_count).map(move |idx| { + let mut string = String::with_capacity(last_len); - Some(evaluate_template(&mut template, &mut last_len, idx)) - }); + // Evaluate each template part for this row + let template_len = template.len(); + for (col_idx, col) in template.iter_mut().enumerate() { + col.fmt_row(&mut string, idx)?; - first.chain(rest) -} - -/// Evaluate the partition template against the row indexed by `idx`. -/// -/// # Panics -/// -/// This method panics if `idx` exceeds the number of rows in the batch. -fn evaluate_template( - template: &mut [Template<'_>], - last_len: &mut usize, - idx: usize, -) -> Result<String, PartitionKeyError> { - let mut buf = String::with_capacity(*last_len); - let template_len = template.len(); - - // Evaluate each template part for this row - for (col_idx, col) in template.iter_mut().enumerate() { - // Evaluate the formatter for this template part against the row. - col.fmt_row(&mut buf, idx)?; - - // If this isn't the last element in the template, insert a field - // delimiter. - if col_idx + 1 != template_len { - buf.push(PARTITION_KEY_DELIMITER); + // If this isn't the last element in the template, insert a field + // delimiter. + if col_idx + 1 != template_len { + string.push(PARTITION_KEY_DELIMITER); + } } - } - *last_len = buf.len(); - Ok(buf) + last_len = string.len(); + string.shrink_to_fit(); + Ok(string) + }) } -/// Takes an iterator of [`Option`] and merges identical consecutive elements -/// together. -/// -/// Any [`None`] yielded by `iterator` is added to the range for the previous -/// [`Some`]. -fn range_encode<I, T>(mut iterator: I) -> impl Iterator<Item = (T, Range<usize>)> +/// Takes an iterator and merges consecutive elements together +fn range_encode<I>(mut iterator: I) -> impl Iterator<Item = (I::Item, Range<usize>)> where - I: Iterator<Item = Option<T>>, - T: Eq, + I: Iterator, + I::Item: Eq, { let mut last: Option<I::Item> = None; let mut range: Range<usize> = 0..0; std::iter::from_fn(move || loop { match (iterator.next(), last.take()) { - (Some(None), Some(v)) => { - range.end += 1; - last = Some(v); - } (Some(cur), Some(next)) => match cur == next { true => { range.end += 1; @@ -313,14 +232,14 @@ where range.start = range.end; range.end += 1; last = Some(cur); - return Some((next.unwrap(), t)); + return Some((next, t)); } }, (Some(cur), None) => { range.end += 1; last = Some(cur); } - (None, Some(next)) => return Some((next.unwrap(), range.clone())), + (None, Some(next)) => return Some((next, range.clone())), (None, None) => return None, } }) @@ -347,30 +266,6 @@ mod tests { StdRng::seed_from_u64(seed) } - /// Generates a vector of partition key strings, or an error. - /// - /// This function normalises the de-duplicated output of - /// [`partition_keys()`], returning the last observed key when the dedupe - /// [`partition_keys()`] process returns [`None`]. - fn generate_denormalised_keys<'a, 'b: 'a>( - batch: &'b MutableBatch, - template_parts: impl Iterator<Item = TemplatePart<'a>>, - ) -> Result<Vec<String>, PartitionKeyError> { - let mut last_ret = None; - partition_keys(batch, template_parts) - .map(|v| match v { - Some(this) => { - last_ret = Some(this.clone()); - this - } - None => last_ret - .as_ref() - .expect("must have observed prior key") - .clone(), - }) - .collect::<Result<Vec<_>, _>>() - } - /// A fixture test asserting the default partition key format, derived from /// the default partition key template. #[test] @@ -387,40 +282,15 @@ mod tests { let template_parts = TablePartitionTemplateOverride::try_new(None, &Default::default()).unwrap(); let keys: Vec<_> = partition_keys(&batch, template_parts.parts()) - .map(|v| v.expect("non-identical consecutive keys")) .collect::<Result<Vec<_>, _>>() .unwrap(); assert_eq!(keys, vec!["1970-01-01".to_string()]) } - #[test] - #[should_panic(expected = r#"error reading time column: ColumnNotFound { column: "time" }"#)] - fn test_zero_sized_batch() { - let batch = MutableBatch::new(); - - let template_parts = test_table_partition_override(vec![ - TemplatePart::TimeFormat("%Y-%m-%d %H:%M:%S"), - TemplatePart::TagValue("region"), - TemplatePart::TagValue("bananas"), - ]); - - let keys: Vec<_> = partition_batch(&batch, &template_parts).collect::<Vec<_>>(); - assert_eq!(keys, vec![]) - } - #[test] fn test_range_encode() { - let collected: Vec<_> = - range_encode(vec![5, 5, 5, 7, 2, 2, 3].into_iter().map(Some)).collect(); - assert_eq!(collected, vec![(5, 0..3), (7, 3..4), (2, 4..6), (3, 6..7)]) - } - - #[test] - fn test_range_encode_sparse() { - let collected: Vec<_> = - range_encode(vec![Some(5), None, None, Some(7), Some(2), None, Some(3)].into_iter()) - .collect(); + let collected: Vec<_> = range_encode(vec![5, 5, 5, 7, 2, 2, 3].into_iter()).collect(); assert_eq!(collected, vec![(5, 0..3), (7, 3..4), (2, 4..6), (3, 6..7)]) } @@ -431,7 +301,7 @@ mod tests { .take(1000) .collect(); - let rle: Vec<_> = range_encode(original.iter().cloned().map(Some)).collect(); + let rle: Vec<_> = range_encode(original.iter().cloned()).collect(); let mut last_range = rle[0].1.clone(); for (_, range) in &rle[1..] { @@ -474,7 +344,6 @@ mod tests { writer.commit(); let keys: Vec<_> = partition_keys(&batch, template_parts.into_iter()) - .map(|v| v.expect("non-identical consecutive keys")) .collect::<Result<Vec<_>, _>>() .unwrap(); @@ -490,64 +359,6 @@ mod tests { ) } - #[test] - fn test_sparse_representation() { - let mut batch = MutableBatch::new(); - let mut writer = Writer::new(&mut batch, 6); - - writer - .write_time( - "time", - vec![ - 1, - 1, - 1, - 1685971961464736000, - 1685971961464736000, - 1685971961464736000, - ] - .into_iter(), - ) - .unwrap(); - - writer - .write_tag( - "region", - Some(&[0b00111111]), - vec![ - "platanos", "platanos", "platanos", "platanos", "platanos", "bananas", - ] - .into_iter(), - ) - .unwrap(); - - let template_parts = [ - TemplatePart::TimeFormat("%Y-%m-%d %H:%M:%S"), - TemplatePart::TagValue("region"), - TemplatePart::TagValue("bananas"), // column not present - ]; - - writer.commit(); - - let mut iter = partition_keys(&batch, template_parts.into_iter()); - - assert_eq!( - iter.next().unwrap(), - Some(Ok("1970-01-01 00:00:00|platanos|!".to_string())) - ); - assert_eq!(iter.next().unwrap(), None); - assert_eq!(iter.next().unwrap(), None); - assert_eq!( - iter.next().unwrap(), - Some(Ok("2023-06-05 13:32:41|platanos|!".to_string())) - ); - assert_eq!(iter.next().unwrap(), None); - assert_eq!( - iter.next().unwrap(), - Some(Ok("2023-06-05 13:32:41|bananas|!".to_string())) - ); - } - #[test] fn partitioning_on_fields_panics() { let mut batch = MutableBatch::new(); @@ -569,7 +380,7 @@ mod tests { writer.commit(); - let got: Result<Vec<_>, _> = generate_denormalised_keys(&batch, template_parts.into_iter()); + let got: Result<Vec<_>, _> = partition_keys(&batch, template_parts.into_iter()).collect(); assert_matches::assert_matches!(got, Err(PartitionKeyError::TagValueNotTag(_))); } @@ -623,12 +434,8 @@ mod tests { writer.commit(); - // Generate the full set of partition keys, inserting the - // last observed value when the next key is identical to - // normalise the values. - let keys = generate_denormalised_keys(&batch, template.parts()) - .unwrap(); - assert_eq!(keys, vec![$want_key.to_string()], "generated key differs"); + let keys: Vec<_> = partition_keys(&batch, template.parts()).collect::<Result<Vec<_>, _>>().unwrap(); + assert_eq!(keys, vec![$want_key.to_string()]); // Reverse the encoding. let reversed = build_column_values(&template, &keys[0]); @@ -975,9 +782,7 @@ mod tests { .collect::<Vec<_>>(); let template = test_table_partition_override(template); - let ret = partition_keys(&batch, template.parts()) - .map(|v| v.expect("non-identical consecutive keys")) - .collect::<Result<Vec<_>, _>>(); + let ret = partition_keys(&batch, template.parts()).collect::<Result<Vec<_>, _>>(); assert_matches!(ret, Err(PartitionKeyError::InvalidStrftime)); } @@ -1074,7 +879,8 @@ mod tests { } writer.commit(); - let keys: Vec<_> = generate_denormalised_keys(&batch, template.parts()) + let keys: Vec<_> = partition_keys(&batch, template.parts()) + .collect::<Result<Vec<_>, _>>() .unwrap(); assert_eq!(keys.len(), 1); @@ -1155,9 +961,7 @@ mod tests { .unwrap(); writer.commit(); - let ret = partition_keys(&batch, template.parts()) - .map(|v| v.expect("non-identical consecutive keys")) - .collect::<Result<Vec<_>, _>>(); + let ret = partition_keys(&batch, template.parts()).collect::<Result<Vec<_>, _>>(); // The is allowed to succeed or fail under this test (but not // panic), and the returned error/value must match certain diff --git a/mutable_batch/src/payload/partition/strftime.rs b/mutable_batch/src/payload/partition/strftime.rs index 73c788256e..149cf2c1ba 100644 --- a/mutable_batch/src/payload/partition/strftime.rs +++ b/mutable_batch/src/payload/partition/strftime.rs @@ -20,8 +20,8 @@ const YMD_SPEC: &str = "%Y-%m-%d"; struct RingBuffer<const N: usize, T> { buf: [Option<T>; N], - /// Index into to the last wrote value. - last_idx: usize, + /// Index into to the next free/to-be-reused slot. + next_ptr: usize, } impl<const N: usize, T> Default for RingBuffer<N, T> @@ -31,7 +31,7 @@ where fn default() -> Self { Self { buf: [(); N].map(|_| Default::default()), // default init for non-const type - last_idx: N - 1, + next_ptr: Default::default(), } } } @@ -48,11 +48,11 @@ where /// /// This is an O(1) operation. fn next_slot(&mut self) -> &mut T { - // Advance the next slot pointer - self.last_idx += 1; - self.last_idx %= N; + let v = self.buf[self.next_ptr].get_or_insert_with(Default::default); - let v = self.buf[self.last_idx].get_or_insert_with(Default::default); + // Advance the next slot pointer + self.next_ptr += 1; + self.next_ptr %= N; v } @@ -74,11 +74,6 @@ where } None } - - /// Return the last wrote value, if any. - fn last(&self) -> Option<&'_ T> { - self.buf[self.last_idx].as_ref() - } } /// A strftime-like formatter of epoch timestamps with nanosecond granularity. @@ -231,19 +226,6 @@ impl<'a> StrftimeFormatter<'a> { } timestamp - (timestamp % DAY_NANOSECONDS) } - - /// Returns true if the output of rendering `timestamp` will match the last - /// rendered timestamp, after optionally applying the precision reduction - /// optimisation. - pub(crate) fn equals_last(&self, timestamp: i64) -> bool { - // Optionally apply the default format reduction optimisation. - let timestamp = self.maybe_reduce(timestamp); - - self.values - .last() - .map(|(ts, _)| *ts == timestamp) - .unwrap_or_default() - } } #[cfg(test)] @@ -299,18 +281,7 @@ mod tests { fmt.values.buf.as_slice(), [Some((42, _)), Some((12345, _)), None, None, None] ); - assert_eq!(fmt.values.last_idx, 1); - } - - #[test] - fn test_ring_buffer_equals_last() { - let mut b = RingBuffer::<4, _>::default(); - - assert!(b.find(|v| *v == 42).is_none()); - - *b.next_slot() = 42; - - assert_eq!(b.last(), Some(&42)); + assert_eq!(fmt.values.next_ptr, 2); } const FORMATTER_SPEC_PARTS: &[&str] = &[
dfecf570e6fda5551666afc35c4d6f81ef23befc
Trevor Hilton
2024-07-23 14:17:09
support `!=`, `IN`, and `NOT IN` predicates in last cache queries (#25175)
Part of #25174 This PR adds support for three more predicate types when querying the last cache: !=, IN, and NOT IN. Previously only = was supported. Existing tests were extended to check that these predicate types work as expected, both in the last_cache module and in the influxdb3_server crate. The latter was important to ensure that the new predicate logic works in the context of actual query parsing/execution.
null
feat: support `!=`, `IN`, and `NOT IN` predicates in last cache queries (#25175) Part of #25174 This PR adds support for three more predicate types when querying the last cache: !=, IN, and NOT IN. Previously only = was supported. Existing tests were extended to check that these predicate types work as expected, both in the last_cache module and in the influxdb3_server crate. The latter was important to ensure that the new predicate logic works in the context of actual query parsing/execution.
diff --git a/influxdb3_server/src/lib.rs b/influxdb3_server/src/lib.rs index b48f909411..905057c185 100644 --- a/influxdb3_server/src/lib.rs +++ b/influxdb3_server/src/lib.rs @@ -666,31 +666,79 @@ mod tests { .await; assert_eq!(resp.status(), StatusCode::OK); + struct TestCase { + query: &'static str, + expected: &'static str, + } + + let test_cases = [ + TestCase { + query: "SELECT * FROM last_cache('cpu') ORDER BY host", + expected: "\ + +------+--------+---------------------+-------+\n\ + | host | region | time | usage |\n\ + +------+--------+---------------------+-------+\n\ + | a | us | 1970-01-01T00:16:40 | 11.0 |\n\ + | b | us | 1970-01-01T00:16:40 | 22.0 |\n\ + | c | us | 1970-01-01T00:16:40 | 33.0 |\n\ + | d | ca | 1970-01-01T00:16:40 | 44.0 |\n\ + | e | ca | 1970-01-01T00:16:40 | 55.0 |\n\ + | f | eu | 1970-01-01T00:16:40 | 66.0 |\n\ + +------+--------+---------------------+-------+", + }, + TestCase { + query: "SELECT * FROM last_cache('cpu') WHERE region = 'us' ORDER BY host", + expected: "\ + +------+--------+---------------------+-------+\n\ + | host | region | time | usage |\n\ + +------+--------+---------------------+-------+\n\ + | a | us | 1970-01-01T00:16:40 | 11.0 |\n\ + | b | us | 1970-01-01T00:16:40 | 22.0 |\n\ + | c | us | 1970-01-01T00:16:40 | 33.0 |\n\ + +------+--------+---------------------+-------+", + }, + TestCase { + query: "SELECT * FROM last_cache('cpu') WHERE region != 'us' ORDER BY host", + expected: "\ + +------+--------+---------------------+-------+\n\ + | host | region | time | usage |\n\ + +------+--------+---------------------+-------+\n\ + | d | ca | 1970-01-01T00:16:40 | 44.0 |\n\ + | e | ca | 1970-01-01T00:16:40 | 55.0 |\n\ + | f | eu | 1970-01-01T00:16:40 | 66.0 |\n\ + +------+--------+---------------------+-------+", + }, + TestCase { + query: "SELECT * FROM last_cache('cpu') WHERE host IN ('a', 'b') ORDER BY host", + expected: "\ + +------+--------+---------------------+-------+\n\ + | host | region | time | usage |\n\ + +------+--------+---------------------+-------+\n\ + | a | us | 1970-01-01T00:16:40 | 11.0 |\n\ + | b | us | 1970-01-01T00:16:40 | 22.0 |\n\ + +------+--------+---------------------+-------+", + }, + TestCase { + query: "SELECT * FROM last_cache('cpu') WHERE host NOT IN ('a', 'b') ORDER BY host", + expected: "\ + +------+--------+---------------------+-------+\n\ + | host | region | time | usage |\n\ + +------+--------+---------------------+-------+\n\ + | c | us | 1970-01-01T00:16:40 | 33.0 |\n\ + | d | ca | 1970-01-01T00:16:40 | 44.0 |\n\ + | e | ca | 1970-01-01T00:16:40 | 55.0 |\n\ + | f | eu | 1970-01-01T00:16:40 | 66.0 |\n\ + +------+--------+---------------------+-------+", + }, + ]; + + for t in test_cases { + let res = query(&url, db_name, t.query, "pretty", None).await; + let body = body::to_bytes(res.into_body()).await.unwrap(); + let body = String::from_utf8(body.as_bytes().to_vec()).unwrap(); + assert_eq!(t.expected, body, "query failed: {}", t.query); + } // Query from the last cache: - let res = query( - &url, - db_name, - format!("SELECT * FROM last_cache('{tbl_name}') ORDER BY host"), - "pretty", - None, - ) - .await; - let body = body::to_bytes(res.into_body()).await.unwrap(); - let body = String::from_utf8(body.as_bytes().to_vec()).unwrap(); - assert_eq!( - "\ - +------+--------+---------------------+-------+\n\ - | host | region | time | usage |\n\ - +------+--------+---------------------+-------+\n\ - | a | us | 1970-01-01T00:16:40 | 11.0 |\n\ - | b | us | 1970-01-01T00:16:40 | 22.0 |\n\ - | c | us | 1970-01-01T00:16:40 | 33.0 |\n\ - | d | ca | 1970-01-01T00:16:40 | 44.0 |\n\ - | e | ca | 1970-01-01T00:16:40 | 55.0 |\n\ - | f | eu | 1970-01-01T00:16:40 | 66.0 |\n\ - +------+--------+---------------------+-------+", - body - ); shutdown.cancel(); } diff --git a/influxdb3_write/src/last_cache/mod.rs b/influxdb3_write/src/last_cache/mod.rs index 6e802c4235..88a2e6eff1 100644 --- a/influxdb3_write/src/last_cache/mod.rs +++ b/influxdb3_write/src/last_cache/mod.rs @@ -17,7 +17,7 @@ use arrow::{ error::ArrowError, }; use datafusion::{ - logical_expr::{BinaryExpr, Expr, Operator}, + logical_expr::{expr::InList, BinaryExpr, Expr, Operator}, scalar::ScalarValue, }; use hashbrown::{HashMap, HashSet}; @@ -637,20 +637,20 @@ impl LastCache { return Ok(vec![]); } let mut new_caches = vec![]; - 'cache_loop: for c in caches { + for c in caches { let Some(cache_key) = c.state.as_key() else { - continue 'cache_loop; + continue; }; if let Some(pred) = predicate { - let Some(next_state) = cache_key.evaluate_predicate(pred) else { - continue 'cache_loop; - }; - let mut additional_columns = c.additional_columns.clone(); - additional_columns.push((&cache_key.column_name, &pred.value)); - new_caches.push(ExtendedLastCacheState { - state: next_state, - additional_columns, - }); + let next_states = cache_key.evaluate_predicate(pred); + new_caches.extend(next_states.into_iter().map(|(state, value)| { + let mut additional_columns = c.additional_columns.clone(); + additional_columns.push((&cache_key.column_name, value)); + ExtendedLastCacheState { + state, + additional_columns, + } + })); } else { new_caches.extend(cache_key.value_map.iter().map(|(v, state)| { let mut additional_columns = c.additional_columns.clone(); @@ -679,37 +679,72 @@ impl LastCache { exprs .iter() .filter_map(|expr| { - if let Expr::BinaryExpr(BinaryExpr { left, op, right }) = expr { - if *op == Operator::Eq { - if let Expr::Column(c) = left.as_ref() { - let key = c.name.to_string(); - if !self.key_columns.contains(&key) { + match expr { + Expr::BinaryExpr(BinaryExpr { left, op, right }) => { + let key = if let Expr::Column(c) = left.as_ref() { + if !self.key_columns.contains(c.name()) { return None; } - return match right.as_ref() { - Expr::Literal(ScalarValue::Utf8(Some(v))) => Some(Predicate { - key, - value: KeyValue::String(v.to_owned()), - }), - Expr::Literal(ScalarValue::Boolean(Some(v))) => Some(Predicate { - key, - value: KeyValue::Bool(*v), - }), + c.name.to_string() + } else { + return None; + }; + let value = match right.as_ref() { + Expr::Literal(ScalarValue::Utf8(Some(v))) => { + KeyValue::String(v.to_owned()) + } + Expr::Literal(ScalarValue::Boolean(Some(v))) => KeyValue::Bool(*v), + // TODO: handle integer types that can be casted up to i64/u64: + Expr::Literal(ScalarValue::Int64(Some(v))) => KeyValue::Int(*v), + Expr::Literal(ScalarValue::UInt64(Some(v))) => KeyValue::UInt(*v), + _ => return None, + }; + match op { + Operator::Eq => Some(Predicate::new_eq(key, value)), + Operator::NotEq => Some(Predicate::new_not_eq(key, value)), + _ => None, + } + } + Expr::InList(InList { + expr, + list, + negated, + }) => { + let key = if let Expr::Column(c) = expr.as_ref() { + if !self.key_columns.contains(c.name()) { + return None; + } + c.name.to_string() + } else { + return None; + }; + let values: Vec<KeyValue> = list + .iter() + .filter_map(|e| match e { + Expr::Literal(ScalarValue::Utf8(Some(v))) => { + Some(KeyValue::String(v.to_owned())) + } + Expr::Literal(ScalarValue::Boolean(Some(v))) => { + Some(KeyValue::Bool(*v)) + } // TODO: handle integer types that can be casted up to i64/u64: - Expr::Literal(ScalarValue::Int64(Some(v))) => Some(Predicate { - key, - value: KeyValue::Int(*v), - }), - Expr::Literal(ScalarValue::UInt64(Some(v))) => Some(Predicate { - key, - value: KeyValue::UInt(*v), - }), + Expr::Literal(ScalarValue::Int64(Some(v))) => { + Some(KeyValue::Int(*v)) + } + Expr::Literal(ScalarValue::UInt64(Some(v))) => { + Some(KeyValue::UInt(*v)) + } _ => None, - }; + }) + .collect(); + if *negated { + Some(Predicate::new_not_in(key, values)) + } else { + Some(Predicate::new_in(key, values)) } } + _ => None, } - None }) .collect() } @@ -798,17 +833,45 @@ pub(crate) struct Predicate { /// The left-hand-side of the predicate key: String, /// The right-hand-side of the predicate - value: KeyValue, + kind: PredicateKind, } -#[cfg(test)] impl Predicate { - fn new(key: impl Into<String>, value: KeyValue) -> Self { + fn new_eq(key: impl Into<String>, value: KeyValue) -> Self { + Self { + key: key.into(), + kind: PredicateKind::Eq(value), + } + } + + fn new_not_eq(key: impl Into<String>, value: KeyValue) -> Self { Self { key: key.into(), - value, + kind: PredicateKind::NotEq(value), } } + + fn new_in(key: impl Into<String>, values: Vec<KeyValue>) -> Self { + Self { + key: key.into(), + kind: PredicateKind::In(values), + } + } + + fn new_not_in(key: impl Into<String>, values: Vec<KeyValue>) -> Self { + Self { + key: key.into(), + kind: PredicateKind::NotIn(values), + } + } +} + +#[derive(Debug, Clone)] +pub(crate) enum PredicateKind { + Eq(KeyValue), + NotEq(KeyValue), + In(Vec<KeyValue>), + NotIn(Vec<KeyValue>), } /// Represents the hierarchical last cache structure @@ -884,14 +947,37 @@ impl LastCacheKey { /// /// This assumes that a predicate for this [`LastCacheKey`]'s column was provided, and will panic /// otherwise. - fn evaluate_predicate(&self, predicate: &Predicate) -> Option<&LastCacheState> { + fn evaluate_predicate<'a: 'b, 'b>( + &'a self, + predicate: &'b Predicate, + ) -> Vec<(&'a LastCacheState, &'b KeyValue)> { if predicate.key != self.column_name { panic!( "attempted to evaluate unexpected predicate with key {} for column named {}", predicate.key, self.column_name ); } - self.value_map.get(&predicate.value) + match &predicate.kind { + PredicateKind::Eq(val) => self + .value_map + .get(val) + .map(|s| vec![(s, val)]) + .unwrap_or_default(), + PredicateKind::NotEq(val) => self + .value_map + .iter() + .filter_map(|(v, s)| (v != val).then_some((s, v))) + .collect(), + PredicateKind::In(vals) => vals + .iter() + .filter_map(|v| self.value_map.get(v).map(|s| (s, v))) + .collect(), + PredicateKind::NotIn(vals) => self + .value_map + .iter() + .filter_map(|(v, s)| (!vals.contains(v)).then_some((s, v))) + .collect(), + } } /// Remove expired values from any cache nested within this [`LastCacheKey`] @@ -908,7 +994,7 @@ impl LastCacheKey { /// A value for a key column in a [`LastCache`] #[derive(Debug, Clone, Eq, PartialEq, Hash)] -enum KeyValue { +pub(crate) enum KeyValue { String(String), Int(i64), UInt(u64), @@ -1476,7 +1562,7 @@ mod tests { .await .unwrap(); - let predicates = &[Predicate::new("host", KeyValue::string("a"))]; + let predicates = &[Predicate::new_eq("host", KeyValue::string("a"))]; // Check what is in the last cache: let batch = wbuf @@ -1600,8 +1686,8 @@ mod tests { // Predicate including both key columns only produces value columns from the cache TestCase { predicates: &[ - Predicate::new("region", KeyValue::string("us")), - Predicate::new("host", KeyValue::string("c")), + Predicate::new_eq("region", KeyValue::string("us")), + Predicate::new_eq("host", KeyValue::string("c")), ], expected: &[ "+--------+------+-----------------------------+-------+", @@ -1614,7 +1700,7 @@ mod tests { // Predicate on only region key column will have host column outputted in addition to // the value columns: TestCase { - predicates: &[Predicate::new("region", KeyValue::string("us"))], + predicates: &[Predicate::new_eq("region", KeyValue::string("us"))], expected: &[ "+--------+------+-----------------------------+-------+", "| region | host | time | usage |", @@ -1627,7 +1713,7 @@ mod tests { }, // Similar to previous, with a different region predicate: TestCase { - predicates: &[Predicate::new("region", KeyValue::string("ca"))], + predicates: &[Predicate::new_eq("region", KeyValue::string("ca"))], expected: &[ "+--------+------+-----------------------------+-------+", "| region | host | time | usage |", @@ -1641,7 +1727,7 @@ mod tests { // Predicate on only host key column will have region column outputted in addition to // the value columns: TestCase { - predicates: &[Predicate::new("host", KeyValue::string("a"))], + predicates: &[Predicate::new_eq("host", KeyValue::string("a"))], expected: &[ "+--------+------+-----------------------------+-------+", "| region | host | time | usage |", @@ -1670,7 +1756,7 @@ mod tests { // Using a non-existent key column as a predicate has no effect: // TODO: should this be an error? TestCase { - predicates: &[Predicate::new("container_id", KeyValue::string("12345"))], + predicates: &[Predicate::new_eq("container_id", KeyValue::string("12345"))], expected: &[ "+--------+------+-----------------------------+-------+", "| region | host | time | usage |", @@ -1686,28 +1772,73 @@ mod tests { }, // Using a non existent key column value yields empty result set: TestCase { - predicates: &[Predicate::new("region", KeyValue::string("eu"))], + predicates: &[Predicate::new_eq("region", KeyValue::string("eu"))], expected: &["++", "++"], }, // Using an invalid combination of key column values yields an empty result set: TestCase { predicates: &[ - Predicate::new("region", KeyValue::string("ca")), - Predicate::new("host", KeyValue::string("a")), + Predicate::new_eq("region", KeyValue::string("ca")), + Predicate::new_eq("host", KeyValue::string("a")), ], expected: &["++", "++"], }, // Using a non-existent key column value (for host column) also yields empty result set: TestCase { - predicates: &[Predicate::new("host", KeyValue::string("g"))], + predicates: &[Predicate::new_eq("host", KeyValue::string("g"))], expected: &["++", "++"], }, // Using an incorrect type for a key column value in predicate also yields empty result // set. TODO: should this be an error? TestCase { - predicates: &[Predicate::new("host", KeyValue::Bool(true))], + predicates: &[Predicate::new_eq("host", KeyValue::Bool(true))], expected: &["++", "++"], }, + // Using a != predicate + TestCase { + predicates: &[Predicate::new_not_eq("region", KeyValue::string("us"))], + expected: &[ + "+--------+------+-----------------------------+-------+", + "| region | host | time | usage |", + "+--------+------+-----------------------------+-------+", + "| ca | d | 1970-01-01T00:00:00.000001Z | 40.0 |", + "| ca | e | 1970-01-01T00:00:00.000001Z | 20.0 |", + "| ca | f | 1970-01-01T00:00:00.000001Z | 30.0 |", + "+--------+------+-----------------------------+-------+", + ], + }, + // Using an IN predicate: + TestCase { + predicates: &[Predicate::new_in( + "host", + vec![KeyValue::string("a"), KeyValue::string("b")], + )], + expected: &[ + "+--------+------+-----------------------------+-------+", + "| region | host | time | usage |", + "+--------+------+-----------------------------+-------+", + "| us | a | 1970-01-01T00:00:00.000001Z | 100.0 |", + "| us | b | 1970-01-01T00:00:00.000001Z | 80.0 |", + "+--------+------+-----------------------------+-------+", + ], + }, + // Using a NOT IN predicate: + TestCase { + predicates: &[Predicate::new_not_in( + "host", + vec![KeyValue::string("a"), KeyValue::string("b")], + )], + expected: &[ + "+--------+------+-----------------------------+-------+", + "| region | host | time | usage |", + "+--------+------+-----------------------------+-------+", + "| ca | d | 1970-01-01T00:00:00.000001Z | 40.0 |", + "| ca | e | 1970-01-01T00:00:00.000001Z | 20.0 |", + "| ca | f | 1970-01-01T00:00:00.000001Z | 30.0 |", + "| us | c | 1970-01-01T00:00:00.000001Z | 60.0 |", + "+--------+------+-----------------------------+-------+", + ], + }, ]; for t in test_cases { @@ -1808,8 +1939,8 @@ mod tests { let test_cases = [ TestCase { predicates: &[ - Predicate::new("region", KeyValue::string("us")), - Predicate::new("host", KeyValue::string("a")), + Predicate::new_eq("region", KeyValue::string("us")), + Predicate::new_eq("host", KeyValue::string("a")), ], expected: &[ "+--------+------+--------------------------------+-------+", @@ -1823,7 +1954,7 @@ mod tests { ], }, TestCase { - predicates: &[Predicate::new("region", KeyValue::string("us"))], + predicates: &[Predicate::new_eq("region", KeyValue::string("us"))], expected: &[ "+--------+------+--------------------------------+-------+", "| region | host | time | usage |", @@ -1840,7 +1971,7 @@ mod tests { ], }, TestCase { - predicates: &[Predicate::new("host", KeyValue::string("a"))], + predicates: &[Predicate::new_eq("host", KeyValue::string("a"))], expected: &[ "+--------+------+--------------------------------+-------+", "| region | host | time | usage |", @@ -1853,7 +1984,7 @@ mod tests { ], }, TestCase { - predicates: &[Predicate::new("host", KeyValue::string("b"))], + predicates: &[Predicate::new_eq("host", KeyValue::string("b"))], expected: &[ "+--------+------+--------------------------------+-------+", "| region | host | time | usage |", @@ -1949,8 +2080,8 @@ mod tests { // Check the cache for values: let predicates = &[ - Predicate::new("region", KeyValue::string("us")), - Predicate::new("host", KeyValue::string("a")), + Predicate::new_eq("region", KeyValue::string("us")), + Predicate::new_eq("host", KeyValue::string("a")), ]; // Check what is in the last cache: @@ -2001,7 +2132,7 @@ mod tests { .unwrap(); // Check the cache for values: - let predicates = &[Predicate::new("host", KeyValue::string("a"))]; + let predicates = &[Predicate::new_eq("host", KeyValue::string("a"))]; // Check what is in the last cache: let batches = wbuf @@ -2105,7 +2236,7 @@ mod tests { }, // Predicates on tag key column work as expected: TestCase { - predicates: &[Predicate::new("component_id", KeyValue::string("333"))], + predicates: &[Predicate::new_eq("component_id", KeyValue::string("333"))], expected: &[ "+--------------+--------+--------+------+---------+-----------------------------+", "| component_id | active | type | loc | reading | time |", @@ -2116,7 +2247,7 @@ mod tests { }, // Predicate on a non-string field key: TestCase { - predicates: &[Predicate::new("active", KeyValue::Bool(false))], + predicates: &[Predicate::new_eq("active", KeyValue::Bool(false))], expected: &[ "+--------------+--------+-------------+---------+---------+-----------------------------+", "| component_id | active | type | loc | reading | time |", @@ -2128,7 +2259,7 @@ mod tests { }, // Predicate on a string field key: TestCase { - predicates: &[Predicate::new("type", KeyValue::string("camera"))], + predicates: &[Predicate::new_eq("type", KeyValue::string("camera"))], expected: &[ "+--------------+--------+--------+-----------+---------+-----------------------------+", "| component_id | active | type | loc | reading | time |", @@ -2219,7 +2350,7 @@ mod tests { }, // Predicate on state column, which is part of the series key: TestCase { - predicates: &[Predicate::new("state", KeyValue::string("ca"))], + predicates: &[Predicate::new_eq("state", KeyValue::string("ca"))], expected: &[ "+-------+--------+-------+-------+-----------------------------+", "| state | county | farm | speed | time |", @@ -2235,7 +2366,7 @@ mod tests { }, // Predicate on county column, which is part of the series key: TestCase { - predicates: &[Predicate::new("county", KeyValue::string("napa"))], + predicates: &[Predicate::new_eq("county", KeyValue::string("napa"))], expected: &[ "+-------+--------+-------+-------+-----------------------------+", "| state | county | farm | speed | time |", @@ -2247,7 +2378,7 @@ mod tests { }, // Predicate on farm column, which is part of the series key: TestCase { - predicates: &[Predicate::new("farm", KeyValue::string("30-01"))], + predicates: &[Predicate::new_eq("farm", KeyValue::string("30-01"))], expected: &[ "+-------+--------+-------+-------+-----------------------------+", "| state | county | farm | speed | time |", @@ -2259,9 +2390,9 @@ mod tests { // Predicate on all series key columns: TestCase { predicates: &[ - Predicate::new("state", KeyValue::string("ca")), - Predicate::new("county", KeyValue::string("nevada")), - Predicate::new("farm", KeyValue::string("40-01")), + Predicate::new_eq("state", KeyValue::string("ca")), + Predicate::new_eq("county", KeyValue::string("nevada")), + Predicate::new_eq("farm", KeyValue::string("40-01")), ], expected: &[ "+-------+--------+-------+-------+-----------------------------+", @@ -2351,7 +2482,7 @@ mod tests { }, // Predicate on state column, which is part of the series key: TestCase { - predicates: &[Predicate::new("state", KeyValue::string("ca"))], + predicates: &[Predicate::new_eq("state", KeyValue::string("ca"))], expected: &[ "+--------+-------+-------+-------+-----------------------------+", "| county | farm | state | speed | time |", @@ -2367,7 +2498,7 @@ mod tests { }, // Predicate on county column, which is part of the series key: TestCase { - predicates: &[Predicate::new("county", KeyValue::string("napa"))], + predicates: &[Predicate::new_eq("county", KeyValue::string("napa"))], expected: &[ "+--------+-------+-------+-------+-----------------------------+", "| county | farm | state | speed | time |", @@ -2379,7 +2510,7 @@ mod tests { }, // Predicate on farm column, which is part of the series key: TestCase { - predicates: &[Predicate::new("farm", KeyValue::string("30-01"))], + predicates: &[Predicate::new_eq("farm", KeyValue::string("30-01"))], expected: &[ "+--------+-------+-------+-------+-----------------------------+", "| county | farm | state | speed | time |", @@ -2391,9 +2522,9 @@ mod tests { // Predicate on all series key columns: TestCase { predicates: &[ - Predicate::new("state", KeyValue::string("ca")), - Predicate::new("county", KeyValue::string("nevada")), - Predicate::new("farm", KeyValue::string("40-01")), + Predicate::new_eq("state", KeyValue::string("ca")), + Predicate::new_eq("county", KeyValue::string("nevada")), + Predicate::new_eq("farm", KeyValue::string("40-01")), ], expected: &[ "+--------+-------+-------+-------+-----------------------------+", @@ -2533,7 +2664,7 @@ mod tests { let test_cases = [ // Cache that has values in the zone columns should produce them: TestCase { - predicates: &[Predicate::new("game_id", KeyValue::string("4"))], + predicates: &[Predicate::new_eq("game_id", KeyValue::string("4"))], expected: &[ "+---------+-----------+-----------------------------+------+------+", "| game_id | player | time | type | zone |", @@ -2544,7 +2675,7 @@ mod tests { }, // Cache that does not have a zone column will produce it with nulls: TestCase { - predicates: &[Predicate::new("game_id", KeyValue::string("1"))], + predicates: &[Predicate::new_eq("game_id", KeyValue::string("1"))], expected: &[ "+---------+-----------+-----------------------------+------+------+", "| game_id | player | time | type | zone |", @@ -2658,7 +2789,7 @@ mod tests { let test_cases = [ // Can query on specific key column values: TestCase { - predicates: &[Predicate::new("t1", KeyValue::string("a"))], + predicates: &[Predicate::new_eq("t1", KeyValue::string("a"))], expected: &[ "+----+-----+--------------------------------+-----+-----+-----+", "| t1 | f1 | time | f2 | f3 | f4 |", @@ -2668,7 +2799,7 @@ mod tests { ], }, TestCase { - predicates: &[Predicate::new("t1", KeyValue::string("b"))], + predicates: &[Predicate::new_eq("t1", KeyValue::string("b"))], expected: &[ "+----+------+--------------------------------+----+------+------+", "| t1 | f1 | time | f2 | f3 | f4 |", @@ -2678,7 +2809,7 @@ mod tests { ], }, TestCase { - predicates: &[Predicate::new("t1", KeyValue::string("c"))], + predicates: &[Predicate::new_eq("t1", KeyValue::string("c"))], expected: &[ "+----+-------+--------------------------------+-------+-------+----+", "| t1 | f1 | time | f2 | f3 | f4 |",
a227366432142ab74a7868650c8ced3b5e088c6e
Marco Neumann
2022-10-24 16:36:08
do not project chunks in `TestDatabase::chunks` (#5960)
Databases are NOT required to project chunks (in practice this is only done by the querier for ingester-based chunks). Instead `iox_query` should (and already does) add the right stream adapters to project chunks or to create NULL-columns. Removing the special handling from the test setup makes it easier to understand and also less likely that `iox_query` starts to rely on this behavior. Helps with #5897.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
refactor: do not project chunks in `TestDatabase::chunks` (#5960) Databases are NOT required to project chunks (in practice this is only done by the querier for ingester-based chunks). Instead `iox_query` should (and already does) add the right stream adapters to project chunks or to create NULL-columns. Removing the special handling from the test setup makes it easier to understand and also less likely that `iox_query` starts to rely on this behavior. Helps with #5897. Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/iox_query/src/frontend/influxrpc.rs b/iox_query/src/frontend/influxrpc.rs index a373abb779..3692f42423 100644 --- a/iox_query/src/frontend/influxrpc.rs +++ b/iox_query/src/frontend/influxrpc.rs @@ -2074,7 +2074,7 @@ mod tests { executor.join().await; //////////////////////////// - // Test 2: no need_fields --> only PK + columns in predicate are return + // Test 2: no need_fields let need_fields = false; let test_db = Arc::new(TestDatabase::new(Arc::clone(&executor))); @@ -2090,14 +2090,15 @@ mod tests { assert_eq!(result[0].0, "h2o"); // table name assert_eq!(result[0].2.len(), 1); // returned chunks - // chunk schema includes only 3 columns of the table PK + cols in predicate + // chunk schema includes still includes everything (the test table implementation does NOT project chunks) let chunk = &result[0].2[0]; let chunk_schema = (*chunk.schema()).clone(); - assert_eq!(chunk_schema.len(), 3); + assert_eq!(chunk_schema.len(), 5); let chunk_schema = chunk_schema.sort_fields_by_name(); assert_eq!(chunk_schema.field(0).1.name(), "bar"); assert_eq!(chunk_schema.field(1).1.name(), "foo"); - assert_eq!(chunk_schema.field(2).1.name(), TIME_COLUMN_NAME); + assert_eq!(chunk_schema.field(2).1.name(), "i64_field"); + assert_eq!(chunk_schema.field(3).1.name(), "i64_field_2"); executor.join().await; } @@ -2205,7 +2206,6 @@ mod tests { assert_eq!(result[0].2.len(), 1); // returned chunks // Since no data, we do not do pushdown in the test chunk. - // the no-data returned chunk will include all columns of the table let chunk = &result[0].2[0]; let chunk_schema = (*chunk.schema()).clone(); assert_eq!(chunk_schema.len(), 5); @@ -2255,15 +2255,16 @@ mod tests { assert_eq!(result[0].0, "h2o"); // table name assert_eq!(result[0].2.len(), 1); // returned chunks - // chunk schema includes 4 columns: 3 cols of PK plus i64_field_2 + // chunk schema includes everything (test table does NOT perform any projection) let chunk = &result[0].2[0]; let chunk_schema = (*chunk.schema()).clone(); - assert_eq!(chunk_schema.len(), 4); + assert_eq!(chunk_schema.len(), 5); let chunk_schema = chunk_schema.sort_fields_by_name(); assert_eq!(chunk_schema.field(0).1.name(), "bar"); assert_eq!(chunk_schema.field(1).1.name(), "foo"); - assert_eq!(chunk_schema.field(2).1.name(), "i64_field_2"); - assert_eq!(chunk_schema.field(3).1.name(), TIME_COLUMN_NAME); + assert_eq!(chunk_schema.field(2).1.name(), "i64_field"); + assert_eq!(chunk_schema.field(3).1.name(), "i64_field_2"); + assert_eq!(chunk_schema.field(4).1.name(), TIME_COLUMN_NAME); executor.join().await; ///////////// @@ -2287,15 +2288,16 @@ mod tests { assert_eq!(result[0].0, "h2o"); // table name assert_eq!(result[0].2.len(), 1); // returned chunks - // chunk schema includes 4 columns: 3 cols of PK plus i64_field_1 + // chunk schema includes everything (test table does NOT perform any projection) let chunk = &result[0].2[0]; let chunk_schema = (*chunk.schema()).clone(); - assert_eq!(chunk_schema.len(), 4); + assert_eq!(chunk_schema.len(), 5); let chunk_schema = chunk_schema.sort_fields_by_name(); assert_eq!(chunk_schema.field(0).1.name(), "bar"); assert_eq!(chunk_schema.field(1).1.name(), "foo"); assert_eq!(chunk_schema.field(2).1.name(), "i64_field"); - assert_eq!(chunk_schema.field(3).1.name(), TIME_COLUMN_NAME); + assert_eq!(chunk_schema.field(3).1.name(), "i64_field_2"); + assert_eq!(chunk_schema.field(4).1.name(), TIME_COLUMN_NAME); executor.join().await; } diff --git a/iox_query/src/lib.rs b/iox_query/src/lib.rs index 7863e9750f..9672cbac37 100644 --- a/iox_query/src/lib.rs +++ b/iox_query/src/lib.rs @@ -149,10 +149,14 @@ pub type QueryText = Box<dyn std::fmt::Display + Send + Sync>; #[async_trait] pub trait QueryDatabase: QueryDatabaseMeta + Debug + Send + Sync { /// Returns a set of chunks within the partition with data that may match - /// the provided predicate. If possible, chunks which have no rows that can + /// the provided predicate. + /// + /// If possible, chunks which have no rows that can /// possibly match the predicate may be omitted. + /// /// If projection is None, returned chunks will include all columns of its original data. Otherwise, - /// returned chunks will includs PK columns (tags and time) and columns specified in the projection. + /// returned chunks will include PK columns (tags and time) and columns specified in the projection. Projecting + /// chunks here is optional and a mere optimization. The query subsystem does NOT rely on it. async fn chunks( &self, table_name: &str, diff --git a/iox_query/src/test.rs b/iox_query/src/test.rs index e7a0503f1c..9a6d59919f 100644 --- a/iox_query/src/test.rs +++ b/iox_query/src/test.rs @@ -108,54 +108,27 @@ impl QueryDatabase for TestDatabase { &self, table_name: &str, predicate: &Predicate, - projection: &Option<Vec<usize>>, + _projection: &Option<Vec<usize>>, _ctx: IOxSessionContext, ) -> Result<Vec<Arc<dyn QueryChunk>>, DataFusionError> { // save last predicate *self.chunks_predicate.lock() = predicate.clone(); let partitions = self.partitions.lock().clone(); - let chunks = partitions + Ok(partitions .values() .flat_map(|x| x.values()) - .filter(|x| x.table_name == table_name) + // filter by table + .filter(|c| c.table_name == table_name) + // only keep chunks if their statistics overlap + .filter(|c| { + !matches!( + predicate.apply_to_table_summary(&c.table_summary, c.schema.as_arrow()), + PredicateMatch::Zero + ) + }) .map(|x| Arc::clone(x) as Arc<dyn QueryChunk>) - .collect::<Vec<_>>(); - - // Return chunks with fewer columns if a projection is specified - let mut new_chunks = Vec::with_capacity(chunks.len()); - for c in chunks { - let schema = c.schema(); - let cols = schema.select_given_and_pk_columns(projection); - let cols = cols.iter().map(|c| c.as_str()).collect::<Vec<_>>(); - let selection = Selection::Some(&cols); - - let read_result = - c.read_filter(IOxSessionContext::with_testing(), predicate, selection); - if read_result.is_err() { - return Err(read_result.err().unwrap()); - } - let mut stream = read_result.unwrap(); - - let mut new_chunk = TestChunk::new(c.table_name()); - while let Some(b) = stream.next().await { - let b = b.expect("Error in stream"); - new_chunk.table_data.push(Arc::new(b)); - } - - let new_chunk = if !new_chunk.table_data.is_empty() { - let new_schema = Schema::try_from(new_chunk.table_data[0].schema()).unwrap(); - let new_chunk = new_chunk.add_schema_to_table(new_schema, true, None); - Arc::new(new_chunk) as _ - } else { - // No data, return the original empty chunk with the original schema - c - }; - - new_chunks.push(new_chunk); - } - - Ok(new_chunks) + .collect::<Vec<_>>()) } fn record_query(
ef158a664bcab7cf3028957b92290b609ea43c07
Dom Dwyer
2023-07-27 11:12:11
ref-clone indicators for Schema
Cloning a Schema looks expensive, but it's not!
null
docs: ref-clone indicators for Schema Cloning a Schema looks expensive, but it's not!
diff --git a/ingester/src/buffer_tree/partition/buffer.rs b/ingester/src/buffer_tree/partition/buffer.rs index 3ce9182916..a6dbd157d6 100644 --- a/ingester/src/buffer_tree/partition/buffer.rs +++ b/ingester/src/buffer_tree/partition/buffer.rs @@ -89,6 +89,7 @@ impl DataBuffer { } } + /// Returns the [`Schema`] for the buffered data. pub(crate) fn schema(&self) -> Option<Schema> { match self.0.get() { FsmState::Buffering(v) => v.schema(), diff --git a/ingester/src/buffer_tree/partition/buffer/state_machine/persisting.rs b/ingester/src/buffer_tree/partition/buffer/state_machine/persisting.rs index d5cd0e6bf6..b1128066da 100644 --- a/ingester/src/buffer_tree/partition/buffer/state_machine/persisting.rs +++ b/ingester/src/buffer_tree/partition/buffer/state_machine/persisting.rs @@ -66,7 +66,7 @@ impl Queryable for Persisting { } fn schema(&self) -> Option<schema::Schema> { - Some(self.schema.clone()) + Some(self.schema.clone()) // Ref clone } } diff --git a/ingester/src/buffer_tree/partition/buffer/state_machine/snapshot.rs b/ingester/src/buffer_tree/partition/buffer/state_machine/snapshot.rs index db1fea6d1b..7c654c65ac 100644 --- a/ingester/src/buffer_tree/partition/buffer/state_machine/snapshot.rs +++ b/ingester/src/buffer_tree/partition/buffer/state_machine/snapshot.rs @@ -59,7 +59,7 @@ impl Queryable for Snapshot { } fn schema(&self) -> Option<schema::Schema> { - Some(self.schema.clone()) + Some(self.schema.clone()) // Ref clone } }
43bb149ddce6f556e6b7a8294de5de76b6b751a4
Dom Dwyer
2023-08-24 14:15:56
move crate docs to README
Move the docs to a README to be linked to.
null
docs(gossip): move crate docs to README Move the docs to a README to be linked to.
diff --git a/gossip/README.md b/gossip/README.md new file mode 100644 index 0000000000..d0dfbd44db --- /dev/null +++ b/gossip/README.md @@ -0,0 +1,118 @@ +A simple gossip & broadcast primitive for best-effort metadata distribution +between IOx nodes. + +# Peers + +Peers are uniquely identified by their self-reported "identity" UUID. A unique +UUID is generated for each gossip instance, ensuring the identity changes across +restarts of the underlying node. + +An identity is associated with an immutable socket address used for peer +communication. + +# Topics + +The gossip system implements a topic / interest based, send-side filter to +prevent node A sending frames to node B that it doesn't care about - this helps +reduce traffic and CPU / processing on both nodes. + +During peer exchange, a node advertises the set of peers it is (and other peers +are) interested in, and messages for a given topic are dispatched only to +interested nodes. + +Topics are immutable for the lifetime of a gossip instance, and control frames +are exempt from topic filtering and are always exchanged between all peers. + +# Transport + +Prefer small payloads where possible, and expect loss of some messages - this +primitive provides *best effort* delivery. + +This implementation sends unicast UDP frames between peers, with support for +both control frames & user payloads. The maximum UDP message size is 65,507 +bytes ([`MAX_USER_PAYLOAD_BYTES`] for application-level payloads), but a packet +this large is fragmented into smaller (at most MTU-sized) packets and is at +greater risk of being dropped due to a lost fragment. + +# Security + +Messages exchanged between peers are unauthenticated and connectionless - it's +trivial to forge a message appearing to come from a different peer, or include +malicious payloads. + +The security model of this implementation expects the peers to be running in a +trusted environment, secure from malicious users. + +# Peer Exchange + +When a gossip instance is initialised, it advertises itself to the set of +user-provided "seed" peers - other gossip instances with fixed, known addresses. +The peer then bootstraps the peer list from these seed peers. + +Peers are discovered through PONG messages from peers, which contain the list of +peers the sender has successfully communicated with. + +On receipt of a PONG frame, a node will send PING frames to all newly discovered +peers without adding the peer to its local peer list. Once the discovered peer +responds with a PONG, the peer is added to the peer list. This acts as a +liveness check, ensuring a node only adds peers it can communicate with to its +peer list. + +```text + ┌──────────┐ + │ Seed │ + └──────────┘ + ▲ │ + │ │ + (1) │ │ (2) + PING │ │ PONG + │ │ (contains Peer A) + │ ▼ + ┌──────────┐ + │ Local │ + └──────────┘ + ▲ │ + │ │ + (4) │ │ (3) + PONG │ │ PING + │ │ + │ ▼ + ┌──────────┐ + │ Peer A │ + └──────────┘ +``` + +The above illustrates this process when the "local" node joins: + + 1. Send PING messages to all configured seeds + 2. Receive a PONG response containing the list of all known peers + 3. Send PING frames to all discovered peers - do not add to peer list + 4. Receive PONG frames from discovered peers - add to peer list + +The peer addresses sent during PEX rounds contain the advertised peer identity +and the socket address the PONG sender discovered. + +# Dead Peer Removal + +All peers are periodically sent a PING frame, and a per-peer counter is +incremented. If a message of any sort is received (including the PONG response +to the soliciting PING), the peer's counter is reset to 0. + +Once a peer's counter reaches [`MAX_PING_UNACKED`], indicating a number of PINGs +have been sent without receiving any response, the peer is removed from the +node's peer list. + +Dead peers age out of the cluster once all nodes perform the above routine. If a +peer dies, it is still sent in PONG messages as part of PEX until it is removed +from the sender's peer list, but the receiver of the PONG will not add it to the +node's peer list unless it successfully commutates, ensuring dead peers are not +propagated. + +Ageing out dead peers is strictly an optimisation (and not for correctness). A +dead peer consumes a tiny amount of RAM, but also will have frames dispatched to +it - over time, as the number of dead peers accumulates, this would cause the +number of UDP frames sent per broadcast to increase, needlessly increasing +gossip traffic. + +This process is heavily biased towards reliability/deliverability and is too +slow for use as a general peer health check. \ No newline at end of file diff --git a/gossip/src/lib.rs b/gossip/src/lib.rs index 1b26e9ba3d..8c64a86799 100644 --- a/gossip/src/lib.rs +++ b/gossip/src/lib.rs @@ -1,123 +1,4 @@ -//! A simple gossip & broadcast primitive for best-effort metadata distribution -//! between IOx nodes. -//! -//! # Peers -//! -//! Peers are uniquely identified by their self-reported "identity" UUID. A -//! unique UUID is generated for each gossip instance, ensuring the identity -//! changes across restarts of the underlying node. -//! -//! An identity is associated with an immutable socket address used for peer -//! communication. -//! -//! # Topics -//! -//! The gossip system implements a topic / interest based, send-side filter to -//! prevent node A sending frames to node B that it doesn't care about - this -//! helps reduce traffic and CPU / processing on both nodes. -//! -//! During peer exchange, a node advertises the set of peers it is (and other -//! peers are) interested in, and messages for a given topic are dispatched only -//! to interested nodes. -//! -//! Topics are immutable for the lifetime of a gossip instance, and control -//! frames are exempt from topic filtering and are always exchanged between all -//! peers. -//! -//! # Transport -//! -//! Prefer small payloads where possible, and expect loss of some messages - -//! this primitive provides *best effort* delivery. -//! -//! This implementation sends unicast UDP frames between peers, with support for -//! both control frames & user payloads. The maximum UDP message size is 65,507 -//! bytes ([`MAX_USER_PAYLOAD_BYTES`] for application-level payloads), but a -//! packet this large is fragmented into smaller (at most MTU-sized) packets and -//! is at greater risk of being dropped due to a lost fragment. -//! -//! # Security -//! -//! Messages exchanged between peers are unauthenticated and connectionless - -//! it's trivial to forge a message appearing to come from a different peer, or -//! include malicious payloads. -//! -//! The security model of this implementation expects the peers to be running in -//! a trusted environment, secure from malicious users. -//! -//! # Peer Exchange -//! -//! When a gossip instance is initialised, it advertises itself to the set of -//! user-provided "seed" peers - other gossip instances with fixed, known -//! addresses. The peer then bootstraps the peer list from these seed peers. -//! -//! Peers are discovered through PONG messages from peers, which contain the -//! list of peers the sender has successfully communicated with. -//! -//! On receipt of a PONG frame, a node will send PING frames to all newly -//! discovered peers without adding the peer to its local peer list. Once the -//! discovered peer responds with a PONG, the peer is added to the peer list. -//! This acts as a liveness check, ensuring a node only adds peers it can -//! communicate with to its peer list. -//! -//! ```text -//! ┌──────────┐ -//! │ Seed │ -//! └──────────┘ -//! ▲ │ -//! │ │ -//! (1) │ │ (2) -//! PING │ │ PONG -//! │ │ (contains Peer A) -//! │ ▼ -//! ┌──────────┐ -//! │ Local │ -//! └──────────┘ -//! ▲ │ -//! │ │ -//! (4) │ │ (3) -//! PONG │ │ PING -//! │ │ -//! │ ▼ -//! ┌──────────┐ -//! │ Peer A │ -//! └──────────┘ -//! ``` -//! -//! The above illustrates this process when the "local" node joins: -//! -//! 1. Send PING messages to all configured seeds -//! 2. Receive a PONG response containing the list of all known peers -//! 3. Send PING frames to all discovered peers - do not add to peer list -//! 4. Receive PONG frames from discovered peers - add to peer list -//! -//! The peer addresses sent during PEX rounds contain the advertised peer -//! identity and the socket address the PONG sender discovered. -//! -//! # Dead Peer Removal -//! -//! All peers are periodically sent a PING frame, and a per-peer counter is -//! incremented. If a message of any sort is received (including the PONG -//! response to the soliciting PING), the peer's counter is reset to 0. -//! -//! Once a peer's counter reaches [`MAX_PING_UNACKED`], indicating a number of -//! PINGs have been sent without receiving any response, the peer is removed -//! from the node's peer list. -//! -//! Dead peers age out of the cluster once all nodes perform the above routine. -//! If a peer dies, it is still sent in PONG messages as part of PEX until it is -//! removed from the sender's peer list, but the receiver of the PONG will not -//! add it to the node's peer list unless it successfully commutates, ensuring -//! dead peers are not propagated. -//! -//! Ageing out dead peers is strictly an optimisation (and not for correctness). -//! A dead peer consumes a tiny amount of RAM, but also will have frames -//! dispatched to it - over time, as the number of dead peers accumulates, this -//! would cause the number of UDP frames sent per broadcast to increase, -//! needlessly increasing gossip traffic. -//! -//! This process is heavily biased towards reliability/deliverability and is too -//! slow for use as a general peer health check. - +#![doc = include_str!("../README.md")] #![deny(rustdoc::broken_intra_doc_links, rust_2018_idioms)] #![warn( clippy::clone_on_ref_ptr,
eb603bfb51094afc6d8fb8608342ecee036cb018
Dom Dwyer
2022-11-03 11:13:07
disable large variant lint
The DataFusionToExprError::UnsupportedOperants variant contains twice as many Expr to the second biggest variant. This is of a sufficient size difference to cause a lint warning.
null
refactor: disable large variant lint The DataFusionToExprError::UnsupportedOperants variant contains twice as many Expr to the second biggest variant. This is of a sufficient size difference to cause a lint warning.
diff --git a/predicate/src/delete_expr.rs b/predicate/src/delete_expr.rs index 848ae4c353..084ecd6186 100644 --- a/predicate/src/delete_expr.rs +++ b/predicate/src/delete_expr.rs @@ -20,6 +20,7 @@ pub(crate) fn expr_to_df(expr: DeleteExpr) -> Expr { } #[derive(Debug, Snafu)] +#[allow(clippy::large_enum_variant)] pub enum DataFusionToExprError { #[snafu(display("unsupported expression: {:?}", expr))] UnsupportedExpression { expr: Expr },
3ba0458653dfa884ddb74c5c7ee2921dee046537
Andrew Lamb
2022-11-02 10:20:26
Add object_store handler to querier so `remote get-table` works (#6014)
* feat: Add object_store handler to querier * test: end to end test for get-table from querier * fix: doc links
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
feat: Add object_store handler to querier so `remote get-table` works (#6014) * feat: Add object_store handler to querier * test: end to end test for get-table from querier * fix: doc links Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/Cargo.lock b/Cargo.lock index 7cde0188ad..c3853ee0c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3938,6 +3938,7 @@ dependencies = [ "schema", "service_common", "service_grpc_catalog", + "service_grpc_object_store", "service_grpc_schema", "sharder", "snafu", diff --git a/influxdb_iox/tests/end_to_end_cases/remote.rs b/influxdb_iox/tests/end_to_end_cases/remote.rs index 7fbd5da7b8..fcff86e101 100644 --- a/influxdb_iox/tests/end_to_end_cases/remote.rs +++ b/influxdb_iox/tests/end_to_end_cases/remote.rs @@ -177,6 +177,44 @@ async fn remote_store_get_table() { } .boxed() })), + Step::Custom(Box::new(move |state: &mut StepTestState| { + async move { + // Test that we can download files from the querier (not just the router) + // to ensure it has the correct grpc services + let querier_addr = state.cluster().querier().querier_grpc_base().to_string(); + let namespace = state.cluster().namespace().to_string(); + + // Ensure files are actually written to the filesystem + let dir = tempfile::tempdir().expect("could not get temporary directory"); + + Command::cargo_bin("influxdb_iox") + .unwrap() + .current_dir(&dir) + .arg("-h") + .arg(&querier_addr) + .arg("remote") + .arg("store") + .arg("get-table") + .arg(&namespace) + .arg(&table_name) + .assert() + .success(); + + let table_dir = dir.as_ref().join(&table_name); + + // There should be a directory created that, by default, is named the same as + // the table + assert!(table_dir.is_dir()); + let entries: Vec<_> = table_dir.read_dir().unwrap().flatten().collect(); + // The two Parquet files for this table should be present + assert_eq!( + entries.len(), + 2, + "Expected 2 files in the directory, got: {entries:?}" + ); + } + .boxed() + })), ], ) .run() diff --git a/ioxd_querier/src/lib.rs b/ioxd_querier/src/lib.rs index 20786e490b..9a67e7b18c 100644 --- a/ioxd_querier/src/lib.rs +++ b/ioxd_querier/src/lib.rs @@ -95,6 +95,7 @@ impl<C: QuerierHandler + std::fmt::Debug + 'static> ServerType for QuerierServer ); add_service!(builder, self.server.handler().schema_service()); add_service!(builder, self.server.handler().catalog_service()); + add_service!(builder, self.server.handler().object_store_service()); serve_builder!(builder); @@ -204,7 +205,11 @@ pub async fn create_querier_server_type( ) .await?, ); - let querier_handler = Arc::new(QuerierHandlerImpl::new(args.catalog, Arc::clone(&database))); + let querier_handler = Arc::new(QuerierHandlerImpl::new( + args.catalog, + Arc::clone(&database), + Arc::clone(&args.object_store), + )); let querier = QuerierServer::new(args.metric_registry, querier_handler); Ok(Arc::new(QuerierServerType::new( diff --git a/querier/Cargo.toml b/querier/Cargo.toml index 89efbfac1f..87e20ad1b0 100644 --- a/querier/Cargo.toml +++ b/querier/Cargo.toml @@ -32,6 +32,7 @@ rand = "0.8.3" service_common = { path = "../service_common" } service_grpc_catalog = { path = "../service_grpc_catalog"} service_grpc_schema = { path = "../service_grpc_schema" } +service_grpc_object_store = { path = "../service_grpc_object_store" } schema = { path = "../schema" } sharder = { path = "../sharder" } snafu = "0.7" diff --git a/querier/src/handler.rs b/querier/src/handler.rs index 8f6c7679a9..9ad84565af 100644 --- a/querier/src/handler.rs +++ b/querier/src/handler.rs @@ -9,10 +9,13 @@ use futures::{ use influxdb_iox_client::{ catalog::generated_types::catalog_service_server::CatalogServiceServer, schema::generated_types::schema_service_server::SchemaServiceServer, + store::generated_types::object_store_service_server::ObjectStoreServiceServer, }; use iox_catalog::interface::Catalog; +use object_store::ObjectStore; use observability_deps::tracing::warn; use service_grpc_catalog::CatalogService; +use service_grpc_object_store::ObjectStoreService; use service_grpc_schema::SchemaService; use std::sync::Arc; use thiserror::Error; @@ -28,16 +31,15 @@ pub enum Error {} /// The [`QuerierHandler`] does nothing at this point #[async_trait] pub trait QuerierHandler: Send + Sync { - /// Acquire a [`SchemaService`] gRPC service implementation. - /// - /// [`SchemaService`]: generated_types::influxdata::iox::schema::v1::schema_service_server::SchemaService. + /// Acquire a [`SchemaServiceServer`] gRPC service implementation. fn schema_service(&self) -> SchemaServiceServer<SchemaService>; - /// Acquire a [`CatalogService`] gRPC service implementation. - /// - /// [`CatalogService`]: generated_types::influxdata::iox::catalog::v1::catalog_service_server::CatalogService. + /// Acquire a [`CatalogServiceServer`] gRPC service implementation. fn catalog_service(&self) -> CatalogServiceServer<CatalogService>; + /// Acquire an [`ObjectStoreServiceServer`] gRPC service implementation. + fn object_store_service(&self) -> ObjectStoreServiceServer<ObjectStoreService>; + /// Wait until the handler finished to shutdown. /// /// Use [`shutdown`](Self::shutdown) to trigger a shutdown. @@ -65,6 +67,9 @@ pub struct QuerierHandlerImpl { /// Database that handles query operation database: Arc<QuerierDatabase>, + /// The object store + object_store: Arc<dyn ObjectStore>, + /// Future that resolves when the background worker exits join_handles: Vec<(String, SharedJoinHandle)>, @@ -78,7 +83,11 @@ pub struct QuerierHandlerImpl { impl QuerierHandlerImpl { /// Initialize the Querier - pub fn new(catalog: Arc<dyn Catalog>, database: Arc<QuerierDatabase>) -> Self { + pub fn new( + catalog: Arc<dyn Catalog>, + database: Arc<QuerierDatabase>, + object_store: Arc<dyn ObjectStore>, + ) -> Self { let shutdown = CancellationToken::new(); let poison_cabinet = Arc::new(PoisonCabinet::new()); @@ -86,6 +95,7 @@ impl QuerierHandlerImpl { Self { catalog, database, + object_store, join_handles, shutdown, poison_cabinet, @@ -103,6 +113,13 @@ impl QuerierHandler for QuerierHandlerImpl { CatalogServiceServer::new(CatalogService::new(Arc::clone(&self.catalog))) } + fn object_store_service(&self) -> ObjectStoreServiceServer<ObjectStoreService> { + ObjectStoreServiceServer::new(ObjectStoreService::new( + Arc::clone(&self.catalog), + Arc::clone(&self.object_store), + )) + } + async fn join(&self) { // Need to poll handlers unordered to detect early exists of any worker in the list. let mut unordered: FuturesUnordered<_> = self @@ -176,14 +193,15 @@ mod tests { async fn new() -> Self { let metric_registry = Arc::new(metric::Registry::new()); let catalog = Arc::new(MemCatalog::new(Arc::clone(&metric_registry))) as _; - let object_store = Arc::new(InMemory::new()); + let object_store = Arc::new(InMemory::new()) as _; + let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))); let exec = Arc::new(Executor::new(1)); let catalog_cache = Arc::new(CatalogCache::new_testing( Arc::clone(&catalog), time_provider, Arc::clone(&metric_registry), - Arc::clone(&object_store) as _, + Arc::clone(&object_store), &Handle::current(), )); // QuerierDatabase::new returns an error if there are no shards in the catalog @@ -211,7 +229,7 @@ mod tests { .await .unwrap(), ); - let querier = QuerierHandlerImpl::new(catalog, database); + let querier = QuerierHandlerImpl::new(catalog, database, object_store); Self { querier } }
6d147ec0083ea2e6e8f9884c84e9691f27aefa56
Dom Dwyer
2023-02-23 11:07:00
warn! -> error! and spelling
Fix a typo, use "error" level instead of "warn".
null
refactor: warn! -> error! and spelling Fix a typo, use "error" level instead of "warn".
diff --git a/wal/src/writer_thread.rs b/wal/src/writer_thread.rs index f340d5bbb8..10a7e2291d 100644 --- a/wal/src/writer_thread.rs +++ b/wal/src/writer_thread.rs @@ -1,7 +1,7 @@ use std::{sync::Arc, thread::JoinHandle}; use generated_types::influxdata::iox::wal::v1 as proto; -use observability_deps::tracing::{debug, warn}; +use observability_deps::tracing::{debug, error}; use parking_lot::Mutex; use prost::Message; use tokio::sync::mpsc; @@ -134,7 +134,7 @@ impl WriterIoThread { match segments.open_segment.write(&proto_data) { Ok(summary) => WriteResult::Ok(summary), Err(e) => { - warn!(erorr=%e, "failed to write WAL batch"); + error!(error=%e, "failed to write WAL batch"); WriteResult::Err(e.to_string()) } }
bcdafa5f257e8697398dce72360012c9da9f4193
Dom Dwyer
2023-09-05 13:49:57
remove Arc wrapper from ShardedCache
This Arc wrapper is unnecessary.
null
refactor: remove Arc wrapper from ShardedCache This Arc wrapper is unnecessary.
diff --git a/ioxd_router/src/lib.rs b/ioxd_router/src/lib.rs index 21e1c52a84..4bf9c7aeab 100644 --- a/ioxd_router/src/lib.rs +++ b/ioxd_router/src/lib.rs @@ -261,9 +261,7 @@ pub async fn create_router_server_type( // validator, and namespace auto-creator that reports cache hit/miss/update // metrics. let ns_cache = Arc::new(InstrumentedCache::new( - Arc::new(ShardedCache::new( - std::iter::repeat_with(MemoryNamespaceCache::default).take(10), - )), + ShardedCache::new(std::iter::repeat_with(MemoryNamespaceCache::default).take(10)), &metrics, )); diff --git a/router/benches/schema_validator.rs b/router/benches/schema_validator.rs index d6bf97d8f3..49bbe49fd1 100644 --- a/router/benches/schema_validator.rs +++ b/router/benches/schema_validator.rs @@ -43,9 +43,7 @@ fn bench(group: &mut BenchmarkGroup<WallTime>, tables: usize, columns_per_table: let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metrics))); let ns_cache = Arc::new(ReadThroughCache::new( - Arc::new(ShardedCache::new( - iter::repeat_with(MemoryNamespaceCache::default).take(10), - )), + ShardedCache::new(iter::repeat_with(MemoryNamespaceCache::default).take(10)), Arc::clone(&catalog), )); let validator = SchemaValidator::new(catalog, Arc::clone(&ns_cache), &metrics); diff --git a/router/src/namespace_cache/sharded_cache.rs b/router/src/namespace_cache/sharded_cache.rs index 9130a640a6..d08db6a1ee 100644 --- a/router/src/namespace_cache/sharded_cache.rs +++ b/router/src/namespace_cache/sharded_cache.rs @@ -23,7 +23,7 @@ impl<T> ShardedCache<T> { } #[async_trait] -impl<T> NamespaceCache for Arc<ShardedCache<T>> +impl<T> NamespaceCache for ShardedCache<T> where T: NamespaceCache, { @@ -86,9 +86,8 @@ mod tests { // The number of shards to hash into. const SHARDS: usize = 10; - let cache = Arc::new(ShardedCache::new( - iter::repeat_with(MemoryNamespaceCache::default).take(SHARDS), - )); + let cache = + ShardedCache::new(iter::repeat_with(MemoryNamespaceCache::default).take(SHARDS)); // Build a set of namespace -> unique integer to validate the shard // mapping later.
969319dfd37e99b9e1fc977924b2d929e539b884
Stuart Carnie
2023-02-14 09:21:11
Allow all valid characters following a keyword (#6959)
* fix: Allow all valid characters following a keyword Closes #6382 * chore: Identified additional test cases
null
fix: Allow all valid characters following a keyword (#6959) * fix: Allow all valid characters following a keyword Closes #6382 * chore: Identified additional test cases
diff --git a/influxdb_influxql_parser/src/keywords.rs b/influxdb_influxql_parser/src/keywords.rs index 75fafabe49..6820dd923d 100644 --- a/influxdb_influxql_parser/src/keywords.rs +++ b/influxdb_influxql_parser/src/keywords.rs @@ -3,34 +3,25 @@ //! [keywords]: https://docs.influxdata.com/influxdb/v1.8/query_language/spec/#keywords use crate::internal::ParseResult; -use nom::branch::alt; -use nom::bytes::complete::{tag, tag_no_case}; +use nom::bytes::complete::tag_no_case; use nom::character::complete::alpha1; -use nom::combinator::{eof, fail, peek, verify}; +use nom::combinator::{fail, verify}; use nom::sequence::terminated; +use nom::FindToken; use once_cell::sync::Lazy; use std::collections::HashSet; use std::hash::{Hash, Hasher}; -/// Peeks at the input for acceptable characters separating a keyword. +/// Verifies the next character of `i` is valid following a keyword. /// -/// Will return a failure if one of the expected characters is not found. -fn keyword_follow_char(i: &str) -> ParseResult<&str, &str> { - peek(alt(( - tag(" "), - tag("\n"), - tag(";"), - tag("("), - tag(")"), - tag("\t"), - tag(","), - tag("="), - tag("!"), // possible != - tag("/"), // possible comment - tag("-"), // possible comment - eof, - fail, // Return a failure if we reach the end of this alternation - )))(i) +/// Keywords may be followed by whitespace, statement terminator (;), parens, +/// or conditional and arithmetic operators or EOF +fn keyword_follow_char(i: &str) -> ParseResult<&str, ()> { + if i.is_empty() || b" \n\t;(),=!><+-/*|&^%".find_token(i.bytes().next().unwrap()) { + Ok((i, ())) + } else { + fail(i) + } } /// Token represents a string with case-insensitive ordering and equality. @@ -162,6 +153,7 @@ pub fn keyword<'a>(keyword: &'static str) -> impl FnMut(&'a str) -> ParseResult< #[cfg(test)] mod test { use super::*; + use crate::assert_error; use assert_matches::assert_matches; #[test] @@ -278,13 +270,36 @@ mod test { // Will fail because keyword `OR` in `ORDER` is not recognized, as is not terminated by a valid character let err = or_keyword("ORDER").unwrap_err(); assert_matches!(err, nom::Err::Error(crate::internal::Error::Nom(_, kind)) if kind == nom::error::ErrorKind::Fail); + } - // test valid follow-on characters + #[test] + fn test_keyword_followed_by_valid_char() { let mut tag_keyword = keyword("TAG"); - let (rem, got) = tag_keyword("tag!").unwrap(); - assert_eq!(rem, "!"); + // followed by EOF + let (rem, got) = tag_keyword("tag").unwrap(); + assert_eq!(rem, ""); + assert_eq!(got, "tag"); + + // + // Test some of the expected characters + // + + let (rem, got) = tag_keyword("tag!=foo").unwrap(); + assert_eq!(rem, "!=foo"); assert_eq!(got, "tag"); + + let (rem, got) = tag_keyword("tag>foo").unwrap(); + assert_eq!(rem, ">foo"); + assert_eq!(got, "tag"); + + let (rem, got) = tag_keyword("tag&1 = foo").unwrap(); + assert_eq!(rem, "&1 = foo"); + assert_eq!(got, "tag"); + + // Fallible + + assert_error!(tag_keyword("tag$"), Fail); } #[test] diff --git a/iox_query/src/plan/influxql/rewriter.rs b/iox_query/src/plan/influxql/rewriter.rs index f3c12be092..ef0e146fa2 100644 --- a/iox_query/src/plan/influxql/rewriter.rs +++ b/iox_query/src/plan/influxql/rewriter.rs @@ -746,6 +746,20 @@ mod test { "SELECT SUM(field_f64::float) AS SUM_field_f64, SUM(field_i64::integer) AS SUM_field_i64, SUM(shared_field0::float) AS SUM_shared_field0 FROM temp_01" ); + let stmt = parse_select("SELECT * FROM merge_00, merge_01"); + let stmt = rewrite_statement(&namespace, &stmt).unwrap(); + assert_eq!( + stmt.to_string(), + "SELECT col0::float AS col0, col0::tag AS col0_1, col1::float AS col1, col1::tag AS col1_1, col2::string AS col2, col3::string AS col3 FROM merge_00, merge_01" + ); + + let stmt = parse_select("SELECT /col0/ FROM merge_00, merge_01"); + let stmt = rewrite_statement(&namespace, &stmt).unwrap(); + assert_eq!( + stmt.to_string(), + "SELECT col0::float AS col0, col0::tag AS col0_1 FROM merge_00, merge_01" + ); + // Fallible cases let stmt = parse_select("SELECT *::field + *::tag FROM cpu"); diff --git a/iox_query/src/plan/influxql/test_utils.rs b/iox_query/src/plan/influxql/test_utils.rs index cde772d06b..6919f713a6 100644 --- a/iox_query/src/plan/influxql/test_utils.rs +++ b/iox_query/src/plan/influxql/test_utils.rs @@ -114,6 +114,29 @@ pub(crate) mod database { .with_string_field_column_with_stats("shared_field0", None, None) .with_one_row_of_data(), ), + // Schemas for testing clashing column names when merging across measurements + Arc::new( + TestChunk::new("merge_00") + .with_id(next_chunk_id()) + .with_quiet() + .with_time_column() + .with_tag_column("col0") + .with_f64_field_column("col1") + .with_bool_field_column("col2") + .with_string_field_column_with_stats("col3", None, None) + .with_one_row_of_data(), + ), + Arc::new( + TestChunk::new("merge_01") + .with_id(next_chunk_id()) + .with_quiet() + .with_time_column() + .with_tag_column("col1") + .with_f64_field_column("col0") + .with_bool_field_column("col3") + .with_string_field_column_with_stats("col2", None, None) + .with_one_row_of_data(), + ), ] } }
e19ce984078b91d4c05b45ac34eeb933e87b5c82
Andrew Lamb
2023-03-01 12:25:01
Update datafusion + arrow/arrow-flight/parquet to 34.0.0 (#7084)
* chore: Update datafusion + arrow/arrow-flight/parquet to 34.0.0 * chore: Run cargo hakari tasks * chore: Update plans * chore: Update querier expected output * chore: Update querier tests to use insta * fix: sort output too ---------
Co-authored-by: CircleCI[bot] <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
chore: Update datafusion + arrow/arrow-flight/parquet to 34.0.0 (#7084) * chore: Update datafusion + arrow/arrow-flight/parquet to 34.0.0 * chore: Run cargo hakari tasks * chore: Update plans * chore: Update querier expected output * chore: Update querier tests to use insta * fix: sort output too --------- Co-authored-by: CircleCI[bot] <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/Cargo.lock b/Cargo.lock index a410cd1f93..e383b52036 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -100,9 +100,9 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "arrow" -version = "33.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3724c874f1517cf898cd1c3ad18ab5071edf893c48e73139ab1e16cf0f2affe" +checksum = "f410d3907b6b3647b9e7bca4551274b2e3d716aa940afb67b7287257401da921" dependencies = [ "ahash 0.8.3", "arrow-arith", @@ -123,9 +123,9 @@ dependencies = [ [[package]] name = "arrow-arith" -version = "33.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e958823b8383ca14d0a2e973de478dd7674cd9f72837f8c41c132a0fda6a4e5e" +checksum = "f87391cf46473c9bc53dab68cb8872c3a81d4dfd1703f1c8aa397dba9880a043" dependencies = [ "arrow-array", "arrow-buffer", @@ -138,9 +138,9 @@ dependencies = [ [[package]] name = "arrow-array" -version = "33.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db670eab50e76654065b5aed930f4367101fcddcb2223802007d1e0b4d5a2579" +checksum = "d35d5475e65c57cffba06d0022e3006b677515f99b54af33a7cd54f6cdd4a5b5" dependencies = [ "ahash 0.8.3", "arrow-buffer", @@ -154,9 +154,9 @@ dependencies = [ [[package]] name = "arrow-buffer" -version = "33.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f0e01c931882448c0407bd32311a624b9f099739e94e786af68adc97016b5f2" +checksum = "68b4ec72eda7c0207727df96cf200f539749d736b21f3e782ece113e18c1a0a7" dependencies = [ "half 2.2.1", "num", @@ -164,9 +164,9 @@ dependencies = [ [[package]] name = "arrow-cast" -version = "33.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bf35d78836c93f80d9362f3ccb47ff5e2c5ecfc270ff42cdf1ef80334961d44" +checksum = "0a7285272c9897321dfdba59de29f5b05aeafd3cdedf104a941256d155f6d304" dependencies = [ "arrow-array", "arrow-buffer", @@ -180,9 +180,9 @@ dependencies = [ [[package]] name = "arrow-csv" -version = "33.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6aa7c2531d89d01fed8c469a9b1bf97132a0bdf70b4724fe4bbb4537a50880" +checksum = "981ee4e7f6a120da04e00d0b39182e1eeacccb59c8da74511de753c56b7fddf7" dependencies = [ "arrow-array", "arrow-buffer", @@ -199,9 +199,9 @@ dependencies = [ [[package]] name = "arrow-data" -version = "33.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea50db4d1e1e4c2da2bfdea7b6d2722eef64267d5ab680d815f7ae42428057f5" +checksum = "27cc673ee6989ea6e4b4e8c7d461f7e06026a096c8f0b1a7288885ff71ae1e56" dependencies = [ "arrow-buffer", "arrow-schema", @@ -211,9 +211,9 @@ dependencies = [ [[package]] name = "arrow-flight" -version = "33.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ad4c883d509d89f05b2891ad889729f17ab2191b5fd22b0cf3660a28cc40af5" +checksum = "bd16945f8f3be0f6170b8ced60d414e56239d91a16a3f8800bc1504bc58b2592" dependencies = [ "arrow-array", "arrow-buffer", @@ -234,9 +234,9 @@ dependencies = [ [[package]] name = "arrow-ipc" -version = "33.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4042fe6585155d1ec28a8e4937ec901a3ca7a19a22b9f6cd3f551b935cd84f5" +checksum = "e37b8b69d9e59116b6b538e8514e0ec63a30f08b617ce800d31cb44e3ef64c1a" dependencies = [ "arrow-array", "arrow-buffer", @@ -248,9 +248,9 @@ dependencies = [ [[package]] name = "arrow-json" -version = "33.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c907c4ab4f26970a3719dc06e78e8054a01d0c96da3664d23b941e201b33d2b" +checksum = "80c3fa0bed7cfebf6d18e46b733f9cb8a1cb43ce8e6539055ca3e1e48a426266" dependencies = [ "arrow-array", "arrow-buffer", @@ -267,9 +267,9 @@ dependencies = [ [[package]] name = "arrow-ord" -version = "33.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e131b447242a32129efc7932f58ed8931b42f35d8701c1a08f9f524da13b1d3c" +checksum = "d247dce7bed6a8d6a3c6debfa707a3a2f694383f0c692a39d736a593eae5ef94" dependencies = [ "arrow-array", "arrow-buffer", @@ -281,9 +281,9 @@ dependencies = [ [[package]] name = "arrow-row" -version = "33.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b591ef70d76f4ac28dd7666093295fece0e5f9298f49af51ea49c001e1635bb6" +checksum = "8d609c0181f963cea5c70fddf9a388595b5be441f3aa1d1cdbf728ca834bbd3a" dependencies = [ "ahash 0.8.3", "arrow-array", @@ -296,15 +296,15 @@ dependencies = [ [[package]] name = "arrow-schema" -version = "33.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb327717d87eb94be5eff3b0cb8987f54059d343ee5235abf7f143c85f54cfc8" +checksum = "64951898473bfb8e22293e83a44f02874d2257514d49cd95f9aa4afcff183fbc" [[package]] name = "arrow-select" -version = "33.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79d3c389d1cea86793934f31594f914c8547d82e91e3411d4833ad0aac3266a7" +checksum = "2a513d89c2e1ac22b28380900036cf1f3992c6443efc5e079de631dcf83c6888" dependencies = [ "arrow-array", "arrow-buffer", @@ -315,9 +315,9 @@ dependencies = [ [[package]] name = "arrow-string" -version = "33.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ee67790496dd310ddbf5096870324431e89aa76453e010020ac29b1184d356" +checksum = "5288979b2705dae1114c864d73150629add9153b9b8f1d7ee3963db94c372ba5" dependencies = [ "arrow-array", "arrow-buffer", @@ -400,6 +400,8 @@ dependencies = [ "pin-project-lite", "tokio", "xz2", + "zstd 0.11.2+zstd.1.5.2", + "zstd-safe 5.0.2+zstd.1.5.2", ] [[package]] @@ -1411,8 +1413,8 @@ dependencies = [ [[package]] name = "datafusion" -version = "18.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=f6e49ac7a027abb95d8b7fa755502dfa7d53c21c#f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" +version = "19.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12#ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12" dependencies = [ "ahash 0.8.3", "arrow", @@ -1453,12 +1455,13 @@ dependencies = [ "url", "uuid", "xz2", + "zstd 0.11.2+zstd.1.5.2", ] [[package]] name = "datafusion-common" -version = "18.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=f6e49ac7a027abb95d8b7fa755502dfa7d53c21c#f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" +version = "19.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12#ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12" dependencies = [ "arrow", "chrono", @@ -1470,8 +1473,8 @@ dependencies = [ [[package]] name = "datafusion-expr" -version = "18.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=f6e49ac7a027abb95d8b7fa755502dfa7d53c21c#f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" +version = "19.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12#ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12" dependencies = [ "ahash 0.8.3", "arrow", @@ -1482,8 +1485,8 @@ dependencies = [ [[package]] name = "datafusion-optimizer" -version = "18.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=f6e49ac7a027abb95d8b7fa755502dfa7d53c21c#f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" +version = "19.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12#ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12" dependencies = [ "arrow", "async-trait", @@ -1492,14 +1495,15 @@ dependencies = [ "datafusion-expr", "datafusion-physical-expr", "hashbrown 0.13.2", + "itertools", "log", "regex-syntax", ] [[package]] name = "datafusion-physical-expr" -version = "18.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=f6e49ac7a027abb95d8b7fa755502dfa7d53c21c#f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" +version = "19.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12#ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12" dependencies = [ "ahash 0.8.3", "arrow", @@ -1528,8 +1532,8 @@ dependencies = [ [[package]] name = "datafusion-proto" -version = "18.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=f6e49ac7a027abb95d8b7fa755502dfa7d53c21c#f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" +version = "19.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12#ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12" dependencies = [ "arrow", "chrono", @@ -1545,8 +1549,8 @@ dependencies = [ [[package]] name = "datafusion-row" -version = "18.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=f6e49ac7a027abb95d8b7fa755502dfa7d53c21c#f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" +version = "19.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12#ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12" dependencies = [ "arrow", "datafusion-common", @@ -1556,8 +1560,8 @@ dependencies = [ [[package]] name = "datafusion-sql" -version = "18.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=f6e49ac7a027abb95d8b7fa755502dfa7d53c21c#f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" +version = "19.0.0" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12#ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12" dependencies = [ "arrow-schema", "datafusion-common", @@ -3963,9 +3967,9 @@ dependencies = [ [[package]] name = "parquet" -version = "33.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1b076829801167d889795cd1957989055543430fa1469cb1f6e32b789bfc764" +checksum = "7ac135ecf63ebb5f53dda0921b0b76d6048b3ef631a5f4760b9e8f863ff00cfa" dependencies = [ "ahash 0.8.3", "arrow-array", @@ -3991,7 +3995,7 @@ dependencies = [ "thrift", "tokio", "twox-hash", - "zstd", + "zstd 0.12.3+zstd.1.5.2", ] [[package]] @@ -4021,7 +4025,7 @@ dependencies = [ "tokio", "uuid", "workspace-hack", - "zstd", + "zstd 0.12.3+zstd.1.5.2", ] [[package]] @@ -4426,9 +4430,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.6" +version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f8ad728fb08fe212df3c05169e940fbb6d9d16a877ddde14644a983ba2012e" +checksum = "a24be1d23b4552a012093e1b93697b73d644ae9590e3253d878d0e77d411b614" dependencies = [ "bytes", "heck", @@ -4493,6 +4497,7 @@ dependencies = [ "futures", "generated_types", "influxdb_iox_client", + "insta", "iox_catalog", "iox_query", "iox_tests", @@ -4833,7 +4838,7 @@ dependencies = [ "thiserror", "tokio", "tracing", - "zstd", + "zstd 0.12.3+zstd.1.5.2", ] [[package]] @@ -6742,8 +6747,8 @@ dependencies = [ "winapi", "windows-sys 0.42.0", "windows-sys 0.45.0", - "zstd", - "zstd-safe", + "zstd 0.12.3+zstd.1.5.2", + "zstd-safe 6.0.4+zstd.1.5.4", "zstd-sys", ] @@ -6820,13 +6825,32 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +[[package]] +name = "zstd" +version = "0.11.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" +dependencies = [ + "zstd-safe 5.0.2+zstd.1.5.2", +] + [[package]] name = "zstd" version = "0.12.3+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806" dependencies = [ - "zstd-safe", + "zstd-safe 6.0.4+zstd.1.5.4", +] + +[[package]] +name = "zstd-safe" +version = "5.0.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" +dependencies = [ + "libc", + "zstd-sys", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index f39e6f819c..0b62296dce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -115,12 +115,12 @@ edition = "2021" license = "MIT OR Apache-2.0" [workspace.dependencies] -arrow = { version = "33.0.0" } -arrow-flight = { version = "33.0.0" } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="f6e49ac7a027abb95d8b7fa755502dfa7d53c21c", default-features = false } -datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" } +arrow = { version = "34.0.0" } +arrow-flight = { version = "34.0.0" } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12", default-features = false } +datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12" } hashbrown = { version = "0.13.2" } -parquet = { version = "33.0.0" } +parquet = { version = "34.0.0" } # This profile optimizes for runtime performance and small binary size at the expense of longer # build times. It's most suitable for final release builds. diff --git a/arrow_util/src/test_util.rs b/arrow_util/src/test_util.rs index 8f3a433db1..ba47946618 100644 --- a/arrow_util/src/test_util.rs +++ b/arrow_util/src/test_util.rs @@ -23,9 +23,7 @@ macro_rules! assert_batches_eq { let expected_lines: Vec<String> = $EXPECTED_LINES.into_iter().map(|s| s.to_string()).collect(); - let formatted = arrow_util::display::pretty_format_batches($CHUNKS).unwrap(); - - let actual_lines = formatted.trim().split('\n').collect::<Vec<_>>(); + let actual_lines = arrow_util::test_util::batches_to_lines($CHUNKS); assert_eq!( expected_lines, actual_lines, @@ -48,24 +46,10 @@ macro_rules! assert_batches_eq { #[macro_export] macro_rules! assert_batches_sorted_eq { ($EXPECTED_LINES: expr, $CHUNKS: expr) => { - let mut expected_lines: Vec<String> = $EXPECTED_LINES.iter().map(|&s| s.into()).collect(); - - // sort except for header + footer - let num_lines = expected_lines.len(); - if num_lines > 3 { - expected_lines.as_mut_slice()[2..num_lines - 1].sort_unstable() - } - - let formatted = arrow_util::display::pretty_format_batches($CHUNKS).unwrap(); - // fix for windows: \r\n --> - - let mut actual_lines: Vec<&str> = formatted.trim().lines().collect(); + let expected_lines: Vec<String> = $EXPECTED_LINES.iter().map(|&s| s.into()).collect(); + let expected_lines = arrow_util::test_util::sort_lines(expected_lines); - // sort except for header + footer - let num_lines = actual_lines.len(); - if num_lines > 3 { - actual_lines.as_mut_slice()[2..num_lines - 1].sort_unstable() - } + let actual_lines = arrow_util::test_util::batches_to_sorted_lines($CHUNKS); assert_eq!( expected_lines, actual_lines, @@ -75,6 +59,47 @@ macro_rules! assert_batches_sorted_eq { }; } +/// Converts the [`RecordBatch`]es into a pretty printed output suitable for +/// comparing in tests +/// +/// Example: +/// +/// ```text +/// "+-----+------+------+--------------------------------+", +/// "| foo | host | load | time |", +/// "+-----+------+------+--------------------------------+", +/// "| | a | 1.0 | 1970-01-01T00:00:00.000000011Z |", +/// "| | a | 14.0 | 1970-01-01T00:00:00.000010001Z |", +/// "| | a | 3.0 | 1970-01-01T00:00:00.000000033Z |", +/// "| | b | 5.0 | 1970-01-01T00:00:00.000000011Z |", +/// "| | z | 0.0 | 1970-01-01T00:00:00Z |", +/// "+-----+------+------+--------------------------------+", +/// ``` +pub fn batches_to_lines(batches: &[RecordBatch]) -> Vec<String> { + crate::display::pretty_format_batches(batches) + .unwrap() + .trim() + .lines() + .map(|s| s.to_string()) + .collect() +} + +/// Converts the [`RecordBatch`]es into a pretty printed output suitable for +/// comparing in tests where sorting does not matter. +pub fn batches_to_sorted_lines(batches: &[RecordBatch]) -> Vec<String> { + sort_lines(batches_to_lines(batches)) +} + +/// Sorts the lines (assumed to be the output of `batches_to_lines` for stable comparison) +pub fn sort_lines(mut lines: Vec<String>) -> Vec<String> { + // sort except for header + footer + let num_lines = lines.len(); + if num_lines > 3 { + lines.as_mut_slice()[2..num_lines - 1].sort_unstable() + } + lines +} + // sort a record batch by all columns (to provide a stable output order for test // comparison) pub fn sort_record_batch(batch: RecordBatch) -> RecordBatch { diff --git a/influxdb_iox/tests/query_tests2/cases/in/dedup_and_predicates_parquet.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/dedup_and_predicates_parquet.sql.expected index f99a078835..76057501ba 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/dedup_and_predicates_parquet.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/dedup_and_predicates_parquet.sql.expected @@ -12,15 +12,13 @@ | plan_type | plan | ---------- | logical_plan | Sort: table.tag ASC NULLS LAST | -| | Projection: table.bar, table.foo, table.tag, table.time | -| | TableScan: table projection=[bar, foo, tag, time] | +| | TableScan: table projection=[bar, foo, tag, time] | | physical_plan | SortExec: expr=[tag@2 ASC NULLS LAST] | -| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | -| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | | | | ---------- -- SQL: SELECT * FROM "table" WHERE tag='A'; @@ -34,17 +32,15 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | -| | Filter: table.tag = Dictionary(Int32, Utf8("A")) | -| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] | -| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: tag@2 = A | -| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | -| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| logical_plan | Filter: table.tag = Dictionary(Int32, Utf8("A")) | +| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: tag@2 = A | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | | | | ---------- -- SQL: SELECT * FROM "table" WHERE foo=1 AND bar=2; @@ -58,17 +54,15 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | -| | Filter: table.foo = Float64(1) AND table.bar = Float64(2) | -| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.foo = Float64(1), table.bar = Float64(2)] | -| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: foo@1 = 1 AND bar@0 = 2 | -| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | -| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| logical_plan | Filter: table.foo = Float64(1) AND table.bar = Float64(2) | +| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.foo = Float64(1), table.bar = Float64(2)] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: foo@1 = 1 AND bar@0 = 2 | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | | | | ---------- -- SQL: SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag; @@ -84,20 +78,18 @@ | plan_type | plan | ---------- | logical_plan | Sort: table.tag ASC NULLS LAST | -| | Projection: table.bar, table.foo, table.tag, table.time | -| | Filter: table.time = TimestampNanosecond(0, None) | -| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.time = TimestampNanosecond(0, None)] | +| | Filter: table.time = TimestampNanosecond(0, None) | +| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.time = TimestampNanosecond(0, None)] | | physical_plan | SortExec: expr=[tag@2 ASC NULLS LAST] | | | CoalescePartitionsExec | -| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: time@3 = 0 | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | -| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | -| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=time = TimestampNanosecond(0, None), pruning_predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=time = TimestampNanosecond(0, None), pruning_predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: time@3 = 0 | +| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=time = TimestampNanosecond(0, None), pruning_predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=time = TimestampNanosecond(0, None), pruning_predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | | | | ---------- -- SQL: SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00'); @@ -111,16 +103,14 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | -| | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) | -| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] | -| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 | -| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | -| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| logical_plan | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) | +| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | | | | ---------- \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/dedup_and_predicates_parquet_ingester.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/dedup_and_predicates_parquet_ingester.sql.expected index 4757303f01..a1f58130ad 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/dedup_and_predicates_parquet_ingester.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/dedup_and_predicates_parquet_ingester.sql.expected @@ -12,16 +12,14 @@ | plan_type | plan | ---------- | logical_plan | Sort: table.tag ASC NULLS LAST | -| | Projection: table.bar, table.foo, table.tag, table.time | -| | TableScan: table projection=[bar, foo, tag, time] | +| | TableScan: table projection=[bar, foo, tag, time] | | physical_plan | SortExec: expr=[tag@2 ASC NULLS LAST] | -| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | -| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | SortExec: expr=[tag@2 ASC,time@3 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=2 | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | SortExec: expr=[tag@2 ASC,time@3 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=2 | | | | ---------- -- SQL: SELECT * FROM "table" WHERE tag='A'; @@ -35,18 +33,16 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | -| | Filter: table.tag = Dictionary(Int32, Utf8("A")) | -| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] | -| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: tag@2 = A | -| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | -| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | SortExec: expr=[tag@2 ASC,time@3 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=2 | +| logical_plan | Filter: table.tag = Dictionary(Int32, Utf8("A")) | +| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A"))] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: tag@2 = A | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | SortExec: expr=[tag@2 ASC,time@3 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=2 | | | | ---------- -- SQL: SELECT * FROM "table" WHERE foo=1 AND bar=2; @@ -60,18 +56,16 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | -| | Filter: table.foo = Float64(1) AND table.bar = Float64(2) | -| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.foo = Float64(1), table.bar = Float64(2)] | -| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: foo@1 = 1 AND bar@0 = 2 | -| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | -| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | SortExec: expr=[tag@2 ASC,time@3 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=2 | +| logical_plan | Filter: table.foo = Float64(1) AND table.bar = Float64(2) | +| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.foo = Float64(1), table.bar = Float64(2)] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: foo@1 = 1 AND bar@0 = 2 | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | SortExec: expr=[tag@2 ASC,time@3 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=2 | | | | ---------- -- SQL: SELECT * FROM "table" WHERE time=to_timestamp('1970-01-01T00:00:00.000000000+00:00') ORDER BY tag; @@ -87,21 +81,19 @@ | plan_type | plan | ---------- | logical_plan | Sort: table.tag ASC NULLS LAST | -| | Projection: table.bar, table.foo, table.tag, table.time | -| | Filter: table.time = TimestampNanosecond(0, None) | -| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.time = TimestampNanosecond(0, None)] | +| | Filter: table.time = TimestampNanosecond(0, None) | +| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.time = TimestampNanosecond(0, None)] | | physical_plan | SortExec: expr=[tag@2 ASC NULLS LAST] | | | CoalescePartitionsExec | -| | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: time@3 = 0 | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | -| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | -| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=time = TimestampNanosecond(0, None), pruning_predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | SortExec: expr=[tag@2 ASC,time@3 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=2 | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: time@3 = 0 | +| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=time = TimestampNanosecond(0, None), pruning_predicate=time_min@0 <= 0 AND 0 <= time_max@1, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | SortExec: expr=[tag@2 ASC,time@3 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=2 | | | | ---------- -- SQL: SELECT * FROM "table" WHERE tag='A' AND foo=1 AND time=to_timestamp('1970-01-01T00:00:00.000000000+00:00'); @@ -115,17 +107,15 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: table.bar, table.foo, table.tag, table.time | -| | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) | -| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] | -| physical_plan | ProjectionExec: expr=[bar@0 as bar, foo@1 as foo, tag@2 as tag, time@3 as time] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 | -| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | -| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | -| | SortExec: expr=[tag@2 ASC,time@3 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=2 | +| logical_plan | Filter: table.tag = Dictionary(Int32, Utf8("A")) AND table.foo = Float64(1) AND table.time = TimestampNanosecond(0, None) | +| | TableScan: table projection=[bar, foo, tag, time], partial_filters=[table.tag = Dictionary(Int32, Utf8("A")), table.foo = Float64(1), table.time = TimestampNanosecond(0, None)] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: tag@2 = A AND foo@1 = 1 AND time@3 = 0 | +| | DeduplicateExec: [tag@2 ASC,time@3 ASC] | +| | SortPreservingMergeExec: [tag@2 ASC,time@3 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=tag = Dictionary(Int32, Utf8("A")) AND time = TimestampNanosecond(0, None), pruning_predicate=tag_min@0 <= A AND A <= tag_max@1 AND time_min@2 <= 0 AND 0 <= time_max@3, output_ordering=[tag@2 ASC, time@3 ASC], projection=[bar, foo, tag, time] | +| | SortExec: expr=[tag@2 ASC,time@3 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=2 | | | | ---------- \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet.sql.expected index 8b0370f508..e4aa50d6ce 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet.sql.expected @@ -104,16 +104,15 @@ | plan_type | plan | ---------- | Plan with Metrics | CoalescePartitionsExec, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] | -| | ProjectionExec: expr=[area@0 as area, city@1 as city, max_temp@2 as max_temp, min_temp@3 as min_temp, state@4 as state, time@5 as time], metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] | -| | CoalesceBatchesExec: target_batch_size=8192, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] | -| | FilterExec: state@4 = MA, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=3, metrics=[fetch_time=1.234ms, repart_time=1.234ms, send_time=1.234ms] | -| | UnionExec, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] | -| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC], metrics=[elapsed_compute=1.234ms, mem_used=0, num_dupes=2, output_rows=5, spill_count=0, spilled_bytes=0] | -| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC], metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=7, spill_count=0, spilled_bytes=0] | -| | UnionExec, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=7, spill_count=0, spilled_bytes=0] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=state = Dictionary(Int32, Utf8("MA")), pruning_predicate=state_min@0 <= MA AND MA <= state_max@1, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned=474, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=4, page_index_eval_time=1.234ms, page_index_rows_filtered=0, predicate_evaluation_errors=0, pushdown_eval_time=1.234ms, pushdown_rows_filtered=0, row_groups_pruned=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning_total=1.234ms, time_elapsed_scanning_until_data=1.234ms] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=state = Dictionary(Int32, Utf8("MA")), pruning_predicate=state_min@0 <= MA AND MA <= state_max@1, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned=632, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=3, page_index_eval_time=1.234ms, page_index_rows_filtered=0, predicate_evaluation_errors=0, pushdown_eval_time=1.234ms, pushdown_rows_filtered=3, row_groups_pruned=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning_total=1.234ms, time_elapsed_scanning_until_data=1.234ms] | -| | ParquetExec: limit=None, partitions={2 groups: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], [1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, predicate=state = Dictionary(Int32, Utf8("MA")), pruning_predicate=state_min@0 <= MA AND MA <= state_max@1, projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned=1219, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=5, page_index_eval_time=1.234ms, page_index_rows_filtered=0, predicate_evaluation_errors=0, pushdown_eval_time=1.234ms, pushdown_rows_filtered=5, row_groups_pruned=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning_total=1.234ms, time_elapsed_scanning_until_data=1.234ms] | +| | CoalesceBatchesExec: target_batch_size=8192, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] | +| | FilterExec: state@4 = MA, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] | +| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=3, metrics=[fetch_time=1.234ms, repart_time=1.234ms, send_time=1.234ms] | +| | UnionExec, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=10, spill_count=0, spilled_bytes=0] | +| | DeduplicateExec: [state@4 ASC,city@1 ASC,time@5 ASC], metrics=[elapsed_compute=1.234ms, mem_used=0, num_dupes=2, output_rows=5, spill_count=0, spilled_bytes=0] | +| | SortPreservingMergeExec: [state@4 ASC,city@1 ASC,time@5 ASC], metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=7, spill_count=0, spilled_bytes=0] | +| | UnionExec, metrics=[elapsed_compute=1.234ms, mem_used=0, output_rows=7, spill_count=0, spilled_bytes=0] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=state = Dictionary(Int32, Utf8("MA")), pruning_predicate=state_min@0 <= MA AND MA <= state_max@1, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned=474, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=4, page_index_eval_time=1.234ms, page_index_rows_filtered=0, predicate_evaluation_errors=0, pushdown_eval_time=1.234ms, pushdown_rows_filtered=0, row_groups_pruned=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning_total=1.234ms, time_elapsed_scanning_until_data=1.234ms] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=state = Dictionary(Int32, Utf8("MA")), pruning_predicate=state_min@0 <= MA AND MA <= state_max@1, output_ordering=[state@4 ASC, city@1 ASC, time@5 ASC], projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned=632, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=3, page_index_eval_time=1.234ms, page_index_rows_filtered=0, predicate_evaluation_errors=0, pushdown_eval_time=1.234ms, pushdown_rows_filtered=3, row_groups_pruned=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning_total=1.234ms, time_elapsed_scanning_until_data=1.234ms] | +| | ParquetExec: limit=None, partitions={2 groups: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet], [1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, predicate=state = Dictionary(Int32, Utf8("MA")), pruning_predicate=state_min@0 <= MA AND MA <= state_max@1, projection=[area, city, max_temp, min_temp, state, time], metrics=[bytes_scanned=1219, elapsed_compute=1.234ms, mem_used=0, num_predicate_creation_errors=0, output_rows=5, page_index_eval_time=1.234ms, page_index_rows_filtered=0, predicate_evaluation_errors=0, pushdown_eval_time=1.234ms, pushdown_rows_filtered=5, row_groups_pruned=0, spill_count=0, spilled_bytes=0, time_elapsed_opening=1.234ms, time_elapsed_processing=1.234ms, time_elapsed_scanning_total=1.234ms, time_elapsed_scanning_until_data=1.234ms] | | | | ---------- \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet_many.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet_many.sql.expected index 253415dca1..c006608c60 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet_many.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet_many.sql.expected @@ -10,28 +10,26 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: COUNT(UInt8(1)), SUM(m.f) | -| | Aggregate: groupBy=[[]], aggr=[[COUNT(UInt8(1)), SUM(m.f)]] | -| | TableScan: m projection=[f] | -| physical_plan | ProjectionExec: expr=[COUNT(UInt8(1))@0 as COUNT(UInt8(1)), SUM(m.f)@1 as SUM(m.f)] | -| | AggregateExec: mode=Final, gby=[], aggr=[COUNT(UInt8(1)), SUM(m.f)] | -| | CoalescePartitionsExec | -| | AggregateExec: mode=Partial, gby=[], aggr=[COUNT(UInt8(1)), SUM(m.f)] | -| | UnionExec | -| | ProjectionExec: expr=[f@0 as f] | -| | DeduplicateExec: [tag@1 ASC,time@2 ASC] | -| | SortPreservingMergeExec: [tag@1 ASC,time@2 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000004.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000005.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000006.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000007.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000008.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000009.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | -| | ParquetExec: limit=None, partitions={4 groups: [[1/1/1/1/00000000-0000-0000-0000-00000000000a.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000b.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000c.parquet], [1/1/1/1/00000000-0000-0000-0000-00000000000d.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000e.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000f.parquet], [1/1/1/1/00000000-0000-0000-0000-000000000010.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000011.parquet], [1/1/1/1/00000000-0000-0000-0000-000000000012.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000013.parquet]]}, projection=[f] | +| logical_plan | Aggregate: groupBy=[[]], aggr=[[COUNT(UInt8(1)), SUM(m.f)]] | +| | TableScan: m projection=[f] | +| physical_plan | AggregateExec: mode=Final, gby=[], aggr=[COUNT(UInt8(1)), SUM(m.f)] | +| | CoalescePartitionsExec | +| | AggregateExec: mode=Partial, gby=[], aggr=[COUNT(UInt8(1)), SUM(m.f)] | +| | UnionExec | +| | ProjectionExec: expr=[f@0 as f] | +| | DeduplicateExec: [tag@1 ASC,time@2 ASC] | +| | SortPreservingMergeExec: [tag@1 ASC,time@2 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000004.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000005.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000006.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000007.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000008.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000009.parquet]]}, output_ordering=[tag@1 ASC, time@2 ASC], projection=[f, tag, time] | +| | ParquetExec: limit=None, partitions={4 groups: [[1/1/1/1/00000000-0000-0000-0000-00000000000a.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000b.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000c.parquet], [1/1/1/1/00000000-0000-0000-0000-00000000000d.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000e.parquet, 1/1/1/1/00000000-0000-0000-0000-00000000000f.parquet], [1/1/1/1/00000000-0000-0000-0000-000000000010.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000011.parquet], [1/1/1/1/00000000-0000-0000-0000-000000000012.parquet, 1/1/1/1/00000000-0000-0000-0000-000000000013.parquet]]}, projection=[f] | | | | ---------- \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/pushdown.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/pushdown.sql.expected index e4655f8718..33b5e68ef5 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/pushdown.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/pushdown.sql.expected @@ -17,10 +17,8 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | TableScan: restaurant projection=[count, system, time, town] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, projection=[count, system, time, town] | +| logical_plan | TableScan: restaurant projection=[count, system, time, town] | +| physical_plan | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where count > 200; @@ -40,13 +38,11 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: restaurant.count > UInt64(200) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200)] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: count@0 > 200 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200), pruning_predicate=count_max@0 > 200, projection=[count, system, time, town] | +| logical_plan | Filter: restaurant.count > UInt64(200) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200)] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: count@0 > 200 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200), pruning_predicate=count_max@0 > 200, projection=[count, system, time, town] | | | | ---------- -- SQL: EXPLAIN SELECT * from restaurant where count > 200.0; @@ -54,13 +50,11 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: CAST(restaurant.count AS Float64) > Float64(200) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.count AS Float64) > Float64(200)] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: CAST(count@0 AS Float64) > 200 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=CAST(count AS Float64) > Float64(200), projection=[count, system, time, town] | +| logical_plan | Filter: CAST(restaurant.count AS Float64) > Float64(200) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.count AS Float64) > Float64(200)] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: CAST(count@0 AS Float64) > 200 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=CAST(count AS Float64) > Float64(200), projection=[count, system, time, town] | | | | ---------- -- SQL: EXPLAIN SELECT * from restaurant where system > 4.0; @@ -68,13 +62,11 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: restaurant.system > Float64(4) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4)] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: system@1 > 4 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(4), pruning_predicate=system_max@0 > 4, projection=[count, system, time, town] | +| logical_plan | Filter: restaurant.system > Float64(4) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4)] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: system@1 > 4 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(4), pruning_predicate=system_max@0 > 4, projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where count > 200 and town != 'tewsbury'; @@ -93,13 +85,11 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury"))] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: count@0 > 200 AND town@3 != tewsbury | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2, projection=[count, system, time, town] | +| logical_plan | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury"))] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: count@0 > 200 AND town@3 != tewsbury | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")), pruning_predicate=count_max@0 > 200 AND (town_min@1 != tewsbury OR tewsbury != town_max@2), projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where count > 200 and town != 'tewsbury' and (system =5 or town = 'lawrence'); @@ -117,13 +107,11 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND (restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: count@0 > 200 AND town@3 != tewsbury AND system@1 = 5 OR town@3 = lawrence | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")) AND (system = Float64(5) OR town = Dictionary(Int32, Utf8("lawrence"))), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2, projection=[count, system, time, town] | +| logical_plan | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND (restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: count@0 > 200 AND town@3 != tewsbury AND (system@1 = 5 OR town@3 = lawrence) | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")) AND (system = Float64(5) OR town = Dictionary(Int32, Utf8("lawrence"))), pruning_predicate=count_max@0 > 200 AND (town_min@1 != tewsbury OR tewsbury != town_max@2) AND (system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2), projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where count > 200 and town != 'tewsbury' and (system =5 or town = 'lawrence') and count < 40000; @@ -140,13 +128,11 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND (restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))) AND restaurant.count < UInt64(40000) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence")), restaurant.count < UInt64(40000)] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: count@0 > 200 AND town@3 != tewsbury AND system@1 = 5 OR town@3 = lawrence AND count@0 < 40000 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")) AND (system = Float64(5) OR town = Dictionary(Int32, Utf8("lawrence"))) AND count < UInt64(40000), pruning_predicate=count_max@0 > 200 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2 AND count_min@5 < 40000, projection=[count, system, time, town] | +| logical_plan | Filter: restaurant.count > UInt64(200) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND (restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence"))) AND restaurant.count < UInt64(40000) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system = Float64(5) OR restaurant.town = Dictionary(Int32, Utf8("lawrence")), restaurant.count < UInt64(40000)] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: count@0 > 200 AND town@3 != tewsbury AND (system@1 = 5 OR town@3 = lawrence) AND count@0 < 40000 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND town != Dictionary(Int32, Utf8("tewsbury")) AND (system = Float64(5) OR town = Dictionary(Int32, Utf8("lawrence"))) AND count < UInt64(40000), pruning_predicate=count_max@0 > 200 AND (town_min@1 != tewsbury OR tewsbury != town_max@2) AND (system_min@3 <= 5 AND 5 <= system_max@4 OR town_min@1 <= lawrence AND lawrence <= town_max@2) AND count_min@5 < 40000, projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where count > 200 and count < 40000; @@ -165,13 +151,11 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: restaurant.count > UInt64(200) AND restaurant.count < UInt64(40000) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.count < UInt64(40000)] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: count@0 > 200 AND count@0 < 40000 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND count < UInt64(40000), pruning_predicate=count_max@0 > 200 AND count_min@1 < 40000, projection=[count, system, time, town] | +| logical_plan | Filter: restaurant.count > UInt64(200) AND restaurant.count < UInt64(40000) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.count > UInt64(200), restaurant.count < UInt64(40000)] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: count@0 > 200 AND count@0 < 40000 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=count > UInt64(200) AND count < UInt64(40000), pruning_predicate=count_max@0 > 200 AND count_min@1 < 40000, projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where system > 4.0 and system < 7.0; @@ -191,13 +175,11 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: restaurant.system > Float64(4) AND restaurant.system < Float64(7) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4), restaurant.system < Float64(7)] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: system@1 > 4 AND system@1 < 7 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(4) AND system < Float64(7), pruning_predicate=system_max@0 > 4 AND system_min@1 < 7, projection=[count, system, time, town] | +| logical_plan | Filter: restaurant.system > Float64(4) AND restaurant.system < Float64(7) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(4), restaurant.system < Float64(7)] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: system@1 > 4 AND system@1 < 7 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(4) AND system < Float64(7), pruning_predicate=system_max@0 > 4 AND system_min@1 < 7, projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where system > 5.0 and system < 7.0; @@ -214,13 +196,11 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: restaurant.system > Float64(5) AND restaurant.system < Float64(7) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.system < Float64(7)] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: system@1 > 5 AND system@1 < 7 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND system < Float64(7), pruning_predicate=system_max@0 > 5 AND system_min@1 < 7, projection=[count, system, time, town] | +| logical_plan | Filter: restaurant.system > Float64(5) AND restaurant.system < Float64(7) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.system < Float64(7)] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: system@1 > 5 AND system@1 < 7 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND system < Float64(7), pruning_predicate=system_max@0 > 5 AND system_min@1 < 7, projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where system > 5.0 and town != 'tewsbury' and 7.0 > system; @@ -236,13 +216,11 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: restaurant.system > Float64(5) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > restaurant.system | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), Float64(7) > restaurant.system] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: system@1 > 5 AND town@3 != tewsbury AND 7 > system@1 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > system, pruning_predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7, projection=[count, system, time, town] | +| logical_plan | Filter: restaurant.system > Float64(5) AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > restaurant.system | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), restaurant.town != Dictionary(Int32, Utf8("tewsbury")), Float64(7) > restaurant.system] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: system@1 > 5 AND town@3 != tewsbury AND 7 > system@1 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND town != Dictionary(Int32, Utf8("tewsbury")) AND Float64(7) > system, pruning_predicate=system_max@0 > 5 AND (town_min@1 != tewsbury OR tewsbury != town_max@2) AND system_min@3 < 7, projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where system > 5.0 and 'tewsbury' != town and system < 7.0 and (count = 632 or town = 'reading'); @@ -257,13 +235,11 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: restaurant.system > Float64(5) AND Dictionary(Int32, Utf8("tewsbury")) != restaurant.town AND restaurant.system < Float64(7) AND (restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), Dictionary(Int32, Utf8("tewsbury")) != restaurant.town, restaurant.system < Float64(7), restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: system@1 > 5 AND tewsbury != town@3 AND system@1 < 7 AND count@0 = 632 OR town@3 = reading | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND Dictionary(Int32, Utf8("tewsbury")) != town AND system < Float64(7) AND (count = UInt64(632) OR town = Dictionary(Int32, Utf8("reading"))), pruning_predicate=system_max@0 > 5 AND town_min@1 != tewsbury OR tewsbury != town_max@2 AND system_min@3 < 7 AND count_min@4 <= 632 AND 632 <= count_max@5 OR town_min@1 <= reading AND reading <= town_max@2, projection=[count, system, time, town] | +| logical_plan | Filter: restaurant.system > Float64(5) AND Dictionary(Int32, Utf8("tewsbury")) != restaurant.town AND restaurant.system < Float64(7) AND (restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[restaurant.system > Float64(5), Dictionary(Int32, Utf8("tewsbury")) != restaurant.town, restaurant.system < Float64(7), restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: system@1 > 5 AND tewsbury != town@3 AND system@1 < 7 AND (count@0 = 632 OR town@3 = reading) | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=system > Float64(5) AND Dictionary(Int32, Utf8("tewsbury")) != town AND system < Float64(7) AND (count = UInt64(632) OR town = Dictionary(Int32, Utf8("reading"))), pruning_predicate=system_max@0 > 5 AND (town_min@1 != tewsbury OR tewsbury != town_max@2) AND system_min@3 < 7 AND (count_min@4 <= 632 AND 632 <= count_max@5 OR town_min@1 <= reading AND reading <= town_max@2), projection=[count, system, time, town] | | | | ---------- -- SQL: SELECT * from restaurant where 5.0 < system and town != 'tewsbury' and system < 7.0 and (count = 632 or town = 'reading') and time > to_timestamp('1970-01-01T00:00:00.000000130+00:00'); @@ -275,13 +251,11 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: restaurant.count, restaurant.system, restaurant.time, restaurant.town | -| | Filter: Float64(5) < restaurant.system AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND restaurant.system < Float64(7) AND (restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))) AND restaurant.time > TimestampNanosecond(130, None) | -| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[Float64(5) < restaurant.system, restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system < Float64(7), restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading")), restaurant.time > TimestampNanosecond(130, None)] | -| physical_plan | ProjectionExec: expr=[count@0 as count, system@1 as system, time@2 as time, town@3 as town] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: 5 < system@1 AND town@3 != tewsbury AND system@1 < 7 AND count@0 = 632 OR town@3 = reading AND time@2 > 130 | -| | EmptyExec: produce_one_row=false | +| logical_plan | Filter: Float64(5) < restaurant.system AND restaurant.town != Dictionary(Int32, Utf8("tewsbury")) AND restaurant.system < Float64(7) AND (restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading"))) AND restaurant.time > TimestampNanosecond(130, None) | +| | TableScan: restaurant projection=[count, system, time, town], partial_filters=[Float64(5) < restaurant.system, restaurant.town != Dictionary(Int32, Utf8("tewsbury")), restaurant.system < Float64(7), restaurant.count = UInt64(632) OR restaurant.town = Dictionary(Int32, Utf8("reading")), restaurant.time > TimestampNanosecond(130, None)] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: 5 < system@1 AND town@3 != tewsbury AND system@1 < 7 AND (count@0 = 632 OR town@3 = reading) AND time@2 > 130 | +| | EmptyExec: produce_one_row=false | | | | ---------- -- SQL: SELECT * from restaurant where system > 5.0 and 'tewsbury' != town and system < 7.0 and town = 'reading'; @@ -317,7 +291,7 @@ | | TableScan: restaurant projection=[count, system, time, town], partial_filters=[CAST(restaurant.town AS Utf8) AS restaurant.town LIKE Utf8("%foo%") OR CAST(restaurant.town AS Utf8) AS restaurant.town LIKE Utf8("%bar%") OR CAST(restaurant.town AS Utf8) AS restaurant.town LIKE Utf8("%baz%") AS influx_regex_match(restaurant.town,Utf8("foo|bar|baz")), CAST(restaurant.town AS Utf8) AS restaurant.town NOT LIKE Utf8("%one%") AND CAST(restaurant.town AS Utf8) AS restaurant.town NOT LIKE Utf8("%two%") AS influx_regex_not_match(restaurant.town,Utf8("one|two")), CAST(restaurant.town AS Utf8) LIKE Utf8("%foo%") OR CAST(restaurant.town AS Utf8) LIKE Utf8("%bar%") OR CAST(restaurant.town AS Utf8) LIKE Utf8("%baz%"), CAST(restaurant.town AS Utf8) NOT LIKE Utf8("%one%"), CAST(restaurant.town AS Utf8) NOT LIKE Utf8("%two%")] | | physical_plan | ProjectionExec: expr=[count@1 as count, system@2 as system, time@3 as time, town@4 as town] | | | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: CAST(restaurant.town AS Utf8)restaurant.town@0 LIKE %foo% OR CAST(restaurant.town AS Utf8)restaurant.town@0 LIKE %bar% OR CAST(restaurant.town AS Utf8)restaurant.town@0 LIKE %baz% AND CAST(restaurant.town AS Utf8)restaurant.town@0 NOT LIKE %one% AND CAST(restaurant.town AS Utf8)restaurant.town@0 NOT LIKE %two% | +| | FilterExec: (CAST(restaurant.town AS Utf8)restaurant.town@0 LIKE %foo% OR CAST(restaurant.town AS Utf8)restaurant.town@0 LIKE %bar% OR CAST(restaurant.town AS Utf8)restaurant.town@0 LIKE %baz%) AND CAST(restaurant.town AS Utf8)restaurant.town@0 NOT LIKE %one% AND CAST(restaurant.town AS Utf8)restaurant.town@0 NOT LIKE %two% | | | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | | | ProjectionExec: expr=[CAST(town@3 AS Utf8) as CAST(restaurant.town AS Utf8)restaurant.town, count@0 as count, system@1 as system, time@2 as time, town@3 as town] | | | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=(CAST(town AS Utf8) AS restaurant.town LIKE Utf8("%foo%") OR CAST(town AS Utf8) AS restaurant.town LIKE Utf8("%bar%") OR CAST(town AS Utf8) AS restaurant.town LIKE Utf8("%baz%")) AND CAST(town AS Utf8) AS restaurant.town NOT LIKE Utf8("%one%") AND CAST(town AS Utf8) AS restaurant.town NOT LIKE Utf8("%two%") AND (CAST(town AS Utf8) LIKE Utf8("%foo%") OR CAST(town AS Utf8) LIKE Utf8("%bar%") OR CAST(town AS Utf8) LIKE Utf8("%baz%")) AND CAST(town AS Utf8) NOT LIKE Utf8("%one%") AND CAST(town AS Utf8) NOT LIKE Utf8("%two%"), projection=[count, system, time, town] | diff --git a/influxdb_iox/tests/query_tests2/cases/in/retention.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/retention.sql.expected index 9e30861e3a..1ad4b3da8e 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/retention.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/retention.sql.expected @@ -14,19 +14,17 @@ | plan_type | plan | ---------- | logical_plan | Sort: cpu.host ASC NULLS LAST, cpu.load ASC NULLS LAST, cpu.time ASC NULLS LAST | -| | Projection: cpu.host, cpu.load, cpu.time | -| | TableScan: cpu projection=[host, load, time] | +| | TableScan: cpu projection=[host, load, time] | | physical_plan | SortExec: expr=[host@0 ASC NULLS LAST,load@1 ASC NULLS LAST,time@2 ASC NULLS LAST] | -| | ProjectionExec: expr=[host@0 as host, load@1 as load, time@2 as time] | -| | DeduplicateExec: [host@0 ASC,time@2 ASC] | -| | SortPreservingMergeExec: [host@0 ASC,time@2 ASC] | -| | UnionExec | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: <REDACTED> -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: <REDACTED> -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] | +| | DeduplicateExec: [host@0 ASC,time@2 ASC] | +| | SortPreservingMergeExec: [host@0 ASC,time@2 ASC] | +| | UnionExec | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: <REDACTED> +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: <REDACTED> +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] | | | | ---------- -- SQL: SELECT * FROM cpu WHERE host != 'b' ORDER BY host,time; @@ -43,23 +41,21 @@ | plan_type | plan | ---------- | logical_plan | Sort: cpu.host ASC NULLS LAST, cpu.time ASC NULLS LAST | -| | Projection: cpu.host, cpu.load, cpu.time | -| | Filter: cpu.host != Dictionary(Int32, Utf8("b")) | -| | TableScan: cpu projection=[host, load, time], partial_filters=[cpu.host != Dictionary(Int32, Utf8("b"))] | +| | Filter: cpu.host != Dictionary(Int32, Utf8("b")) | +| | TableScan: cpu projection=[host, load, time], partial_filters=[cpu.host != Dictionary(Int32, Utf8("b"))] | | physical_plan | SortExec: expr=[host@0 ASC NULLS LAST,time@2 ASC NULLS LAST] | | | CoalescePartitionsExec | -| | ProjectionExec: expr=[host@0 as host, load@1 as load, time@2 as time] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: <REDACTED> -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | -| | DeduplicateExec: [host@0 ASC,time@2 ASC] | -| | SortPreservingMergeExec: [host@0 ASC,time@2 ASC] | -| | UnionExec | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: <REDACTED> -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=host != Dictionary(Int32, Utf8("b")), pruning_predicate=host_min@0 != b OR b != host_max@1, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: <REDACTED> -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=host != Dictionary(Int32, Utf8("b")), pruning_predicate=host_min@0 != b OR b != host_max@1, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: <REDACTED> +| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | +| | DeduplicateExec: [host@0 ASC,time@2 ASC] | +| | SortPreservingMergeExec: [host@0 ASC,time@2 ASC] | +| | UnionExec | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: <REDACTED> +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=host != Dictionary(Int32, Utf8("b")), pruning_predicate=host_min@0 != b OR b != host_max@1, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] | +| | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: <REDACTED> +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=host != Dictionary(Int32, Utf8("b")), pruning_predicate=host_min@0 != b OR b != host_max@1, output_ordering=[host@0 ASC, time@2 ASC], projection=[host, load, time] | | | | ---------- \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/several_chunks.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/several_chunks.sql.expected index cdf0da39ac..0a4ff875e1 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/several_chunks.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/several_chunks.sql.expected @@ -17,20 +17,18 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time | -| | TableScan: h2o projection=[city, other_temp, state, temp, time] | -| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] | -| | UnionExec | -| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | -| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | SortExec: expr=[city@0 ASC,state@2 ASC,time@4 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=1 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, projection=[city, other_temp, state, temp, time] | +| logical_plan | TableScan: h2o projection=[city, other_temp, state, temp, time] | +| physical_plan | UnionExec | +| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | SortExec: expr=[city@0 ASC,state@2 ASC,time@4 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=1 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, projection=[city, other_temp, state, temp, time] | | | | ---------- -- SQL: select temp, other_temp, time from h2o; @@ -74,22 +72,20 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time | -| | Filter: h2o.time >= TimestampNanosecond(250, None) | -| | TableScan: h2o projection=[city, other_temp, state, temp, time], partial_filters=[h2o.time >= TimestampNanosecond(250, None)] | -| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] | -| | CoalesceBatchesExec: target_batch_size=8192 | -| | FilterExec: time@4 >= 250 | -| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=3 | -| | UnionExec | -| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=time >= TimestampNanosecond(250, None), pruning_predicate=time_max@0 >= 250, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=time >= TimestampNanosecond(250, None), pruning_predicate=time_max@0 >= 250, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | -| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | SortExec: expr=[city@0 ASC,state@2 ASC,time@4 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=1 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, predicate=time >= TimestampNanosecond(250, None), pruning_predicate=time_max@0 >= 250, projection=[city, other_temp, state, temp, time] | +| logical_plan | Filter: h2o.time >= TimestampNanosecond(250, None) | +| | TableScan: h2o projection=[city, other_temp, state, temp, time], partial_filters=[h2o.time >= TimestampNanosecond(250, None)] | +| physical_plan | CoalesceBatchesExec: target_batch_size=8192 | +| | FilterExec: time@4 >= 250 | +| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=3 | +| | UnionExec | +| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=time >= TimestampNanosecond(250, None), pruning_predicate=time_max@0 >= 250, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, predicate=time >= TimestampNanosecond(250, None), pruning_predicate=time_max@0 >= 250, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | SortExec: expr=[city@0 ASC,state@2 ASC,time@4 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=1 | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000002.parquet]]}, predicate=time >= TimestampNanosecond(250, None), pruning_predicate=time_max@0 >= 250, projection=[city, other_temp, state, temp, time] | | | | ---------- \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/two_chunks.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/two_chunks.sql.expected index 3b60e3ef75..9bf74b44e3 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/two_chunks.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/two_chunks.sql.expected @@ -13,15 +13,13 @@ ---------- | plan_type | plan | ---------- -| logical_plan | Projection: h2o.city, h2o.other_temp, h2o.state, h2o.temp, h2o.time | -| | TableScan: h2o projection=[city, other_temp, state, temp, time] | -| physical_plan | ProjectionExec: expr=[city@0 as city, other_temp@1 as other_temp, state@2 as state, temp@3 as temp, time@4 as time] | -| | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | -| | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | -| | SortExec: expr=[city@0 ASC,state@2 ASC,time@4 ASC] | -| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=1 | +| logical_plan | TableScan: h2o projection=[city, other_temp, state, temp, time] | +| physical_plan | DeduplicateExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | SortPreservingMergeExec: [city@0 ASC,state@2 ASC,time@4 ASC] | +| | UnionExec | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[city@0 ASC, state@2 ASC, time@4 ASC], projection=[city, other_temp, state, temp, time] | +| | SortExec: expr=[city@0 ASC,state@2 ASC,time@4 ASC] | +| | RecordBatchesExec: batches_groups=1 batches=1 total_rows=1 | | | | ---------- -- SQL: select temp, other_temp, time from h2o; diff --git a/querier/Cargo.toml b/querier/Cargo.toml index 0c3b0c0826..5bdb8d69c1 100644 --- a/querier/Cargo.toml +++ b/querier/Cargo.toml @@ -52,6 +52,7 @@ workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] arrow_util = { path = "../arrow_util" } assert_matches = "1.5" +insta = { version = "1.28.0", features = ["yaml"] } iox_tests = { path = "../iox_tests" } mutable_batch_lp = { path = "../mutable_batch_lp" } object_store_metrics = { path = "../object_store_metrics" } diff --git a/querier/src/namespace/query_access.rs b/querier/src/namespace/query_access.rs index 3f8b340cbb..36a11f0f5c 100644 --- a/querier/src/namespace/query_access.rs +++ b/querier/src/namespace/query_access.rs @@ -199,7 +199,7 @@ mod tests { use super::*; use crate::namespace::test_util::{clear_parquet_cache, querier_namespace}; use arrow::record_batch::RecordBatch; - use arrow_util::assert_batches_sorted_eq; + use arrow_util::test_util::batches_to_sorted_lines; use data_types::ColumnType; use datafusion::common::DataFusionError; use iox_query::frontend::sql::SqlQueryPlanner; @@ -328,22 +328,24 @@ mod tests { let traces = Arc::new(RingBufferTraceCollector::new(100)); let span_ctx = SpanContext::new(Arc::clone(&traces) as _); - assert_query_with_span_ctx( - &querier_namespace, - "SELECT * FROM cpu WHERE host != 'z' ORDER BY host,time", - &[ - "+-----+------+------+--------------------------------+", - "| foo | host | load | time |", - "+-----+------+------+--------------------------------+", - "| | a | 1.0 | 1970-01-01T00:00:00.000000011Z |", - "| | a | 3.0 | 1970-01-01T00:00:00.000000033Z |", - "| | a | 4.0 | 1970-01-01T00:00:00.000010001Z |", - "| | b | 5.0 | 1970-01-01T00:00:00.000000011Z |", - "+-----+------+------+--------------------------------+", - ], - Some(span_ctx), - ) - .await; + insta::assert_yaml_snapshot!( + format_query_with_span_ctx( + &querier_namespace, + "SELECT * FROM cpu WHERE host != 'z' ORDER BY host,time", + Some(span_ctx), + ).await, + @r###" + --- + - +-----+------+------+--------------------------------+ + - "| foo | host | load | time |" + - +-----+------+------+--------------------------------+ + - "| | a | 1.0 | 1970-01-01T00:00:00.000000011Z |" + - "| | a | 3.0 | 1970-01-01T00:00:00.000000033Z |" + - "| | a | 4.0 | 1970-01-01T00:00:00.000010001Z |" + - "| | b | 5.0 | 1970-01-01T00:00:00.000000011Z |" + - +-----+------+------+--------------------------------+ + "### + ); // check span let span = traces @@ -464,71 +466,67 @@ mod tests { &Observation::U64Counter(0), ); - assert_query( - &querier_namespace, - "SELECT * FROM mem ORDER BY host,time", - &[ - "+------+------+--------------------------------+", - "| host | perc | time |", - "+------+------+--------------------------------+", - "| c | 50.0 | 1970-01-01T00:00:00.000000011Z |", - "| c | 51.0 | 1970-01-01T00:00:00.000000012Z |", - "| d | 53.0 | 1970-01-01T00:00:00.000000014Z |", - "+------+------+--------------------------------+", - ], - ) - .await; + insta::assert_yaml_snapshot!( + format_query( + &querier_namespace, + "SELECT * FROM mem ORDER BY host,time" + ).await, + @r###" + --- + - +------+------+--------------------------------+ + - "| host | perc | time |" + - +------+------+--------------------------------+ + - "| c | 50.0 | 1970-01-01T00:00:00.000000011Z |" + - "| c | 51.0 | 1970-01-01T00:00:00.000000012Z |" + - "| d | 53.0 | 1970-01-01T00:00:00.000000014Z |" + - +------+------+--------------------------------+ + "### + ); // --------------------------------------------------------- // EXPLAIN // 5 chunks but one was flaged for deleted -> 4 chunks left // all chunks are persisted and do not overlap -> they will be scanned in one IOxReadFilterNode node - assert_explain( - &querier_namespace, - "EXPLAIN SELECT * FROM cpu", - &[ - "+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", - "| plan_type | plan |", - "+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", - "| logical_plan | Projection: cpu.foo, cpu.host, cpu.load, cpu.time |", - "| | TableScan: cpu projection=[foo, host, load, time] |", - "| physical_plan | ProjectionExec: expr=[foo@0 as foo, host@1 as host, load@2 as load, time@3 as time] |", - "| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/2/2/<uuid>.parquet, 1/1/1/3/<uuid>.parquet]]}, projection=[foo, host, load, time] |", - "| | |", - "+---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", - ], - ) - .await; + insta::assert_yaml_snapshot!( + format_explain(&querier_namespace, "EXPLAIN SELECT * FROM cpu").await, + @r###" + --- + - +---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + - "| plan_type | plan |" + - +---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + - "| logical_plan | TableScan: cpu projection=[foo, host, load, time] |" + - "| physical_plan | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/2/2/<uuid>.parquet, 1/1/1/3/<uuid>.parquet]]}, projection=[foo, host, load, time] |" + - "| | |" + - +---------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + "### + ); // 3 chunks but 1 (with time = 1) got pruned by the tombstone --> 2 chunks left // The 2 participated chunks in the plan do not overlap -> no deduplication, no sort. Final sort is for order by // FilterExec is for the tombstone - assert_explain( - &querier_namespace, - "EXPLAIN SELECT * FROM mem ORDER BY host,time", - &[ - "+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------+", - "| plan_type | plan |", - "+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------+", - "| logical_plan | Sort: mem.host ASC NULLS LAST, mem.time ASC NULLS LAST |", - "| | Projection: mem.host, mem.perc, mem.time |", - "| | TableScan: mem projection=[host, perc, time] |", - "| physical_plan | SortExec: expr=[host@0 ASC NULLS LAST,time@2 ASC NULLS LAST] |", - "| | CoalescePartitionsExec |", - "| | ProjectionExec: expr=[host@0 as host, perc@1 as perc, time@2 as time] |", - "| | UnionExec |", - "| | CoalesceBatchesExec: target_batch_size=8192 |", - "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |", - "| | ParquetExec: limit=None, partitions={1 group: [[1/2/1/4/<uuid>.parquet]]}, projection=[host, perc, time] |", - "| | CoalesceBatchesExec: target_batch_size=8192 |", - "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |", - "| | ParquetExec: limit=None, partitions={1 group: [[1/2/1/4/<uuid>.parquet]]}, projection=[host, perc, time] |", - "| | |", - "+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------+", - ], - ) - .await; + insta::assert_yaml_snapshot!( + format_explain(&querier_namespace, "EXPLAIN SELECT * FROM mem ORDER BY host,time").await, + @r###" + --- + - +---------------+--------------------------------------------------------------------------------------------------------------------------------------------------+ + - "| plan_type | plan |" + - +---------------+--------------------------------------------------------------------------------------------------------------------------------------------------+ + - "| logical_plan | Sort: mem.host ASC NULLS LAST, mem.time ASC NULLS LAST |" + - "| | TableScan: mem projection=[host, perc, time] |" + - "| physical_plan | SortExec: expr=[host@0 ASC NULLS LAST,time@2 ASC NULLS LAST] |" + - "| | CoalescePartitionsExec |" + - "| | UnionExec |" + - "| | CoalesceBatchesExec: target_batch_size=8192 |" + - "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |" + - "| | ParquetExec: limit=None, partitions={1 group: [[1/2/1/4/<uuid>.parquet]]}, projection=[host, perc, time] |" + - "| | CoalesceBatchesExec: target_batch_size=8192 |" + - "| | FilterExec: time@2 < 1 OR time@2 > 13 OR NOT host@0 = CAST(d AS Dictionary(Int32, Utf8)) |" + - "| | ParquetExec: limit=None, partitions={1 group: [[1/2/1/4/<uuid>.parquet]]}, projection=[host, perc, time] |" + - "| | |" + - +---------------+--------------------------------------------------------------------------------------------------------------------------------------------------+ + "### + ); // ----------- // Add an overlapped chunk @@ -544,87 +542,72 @@ mod tests { // Since we made a new parquet file, we need to tell querier about it clear_parquet_cache(&querier_namespace, table_cpu.table.id); - assert_query( - &querier_namespace, - "SELECT * FROM cpu", // no need `order by` because data is sorted before comparing in assert_query - &[ - "+-----+------+------+--------------------------------+", - "| foo | host | load | time |", - "+-----+------+------+--------------------------------+", - "| | a | 1.0 | 1970-01-01T00:00:00.000000011Z |", - "| | a | 14.0 | 1970-01-01T00:00:00.000010001Z |", - "| | a | 3.0 | 1970-01-01T00:00:00.000000033Z |", - "| | b | 5.0 | 1970-01-01T00:00:00.000000011Z |", - "| | z | 0.0 | 1970-01-01T00:00:00Z |", - "+-----+------+------+--------------------------------+", - ], - ) - .await; + insta::assert_yaml_snapshot!( + format_query(&querier_namespace, + "SELECT * FROM cpu", // no need `order by` because data is sorted before comparing in assert_query + ).await, + @r###" + --- + - +-----+------+------+--------------------------------+ + - "| foo | host | load | time |" + - +-----+------+------+--------------------------------+ + - "| | a | 1.0 | 1970-01-01T00:00:00.000000011Z |" + - "| | a | 14.0 | 1970-01-01T00:00:00.000010001Z |" + - "| | a | 3.0 | 1970-01-01T00:00:00.000000033Z |" + - "| | b | 5.0 | 1970-01-01T00:00:00.000000011Z |" + - "| | z | 0.0 | 1970-01-01T00:00:00Z |" + - +-----+------+------+--------------------------------+ + "### + ); // 5 chunks: // . 2 chunks overlap with each other and must be deduplicated but no sort needed because they are sorted on the same sort key // . 3 chunks do not overlap and have no duplicated --> will be scanned in one IOxReadFilterNode node - assert_explain( - &querier_namespace, - "EXPLAIN SELECT * FROM cpu", - &[ - "+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", - "| plan_type | plan |", - "+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", - "| logical_plan | Projection: cpu.foo, cpu.host, cpu.load, cpu.time |", - "| | TableScan: cpu projection=[foo, host, load, time] |", - "| physical_plan | ProjectionExec: expr=[foo@0 as foo, host@1 as host, load@2 as load, time@3 as time] |", - "| | UnionExec |", - "| | DeduplicateExec: [host@1 ASC,time@3 ASC] |", - "| | SortPreservingMergeExec: [host@1 ASC,time@3 ASC] |", - "| | UnionExec |", - "| | ParquetExec: limit=None, partitions={1 group: [[1/1/2/2/<uuid>.parquet]]}, output_ordering=[host@1 ASC, time@3 ASC], projection=[foo, host, load, time] |", - "| | ParquetExec: limit=None, partitions={1 group: [[1/1/2/2/<uuid>.parquet]]}, output_ordering=[host@1 ASC, time@3 ASC], projection=[foo, host, load, time] |", - "| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/3/<uuid>.parquet]]}, projection=[foo, host, load, time] |", - "| | |", - "+---------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", - ], - ) - .await; + insta::assert_yaml_snapshot!( + format_explain(&querier_namespace, "EXPLAIN SELECT * FROM cpu").await, + @r###" + --- + - +---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + - "| plan_type | plan |" + - +---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + - "| logical_plan | TableScan: cpu projection=[foo, host, load, time] |" + - "| physical_plan | UnionExec |" + - "| | DeduplicateExec: [host@1 ASC,time@3 ASC] |" + - "| | SortPreservingMergeExec: [host@1 ASC,time@3 ASC] |" + - "| | UnionExec |" + - "| | ParquetExec: limit=None, partitions={1 group: [[1/1/2/2/<uuid>.parquet]]}, output_ordering=[host@1 ASC, time@3 ASC], projection=[foo, host, load, time] |" + - "| | ParquetExec: limit=None, partitions={1 group: [[1/1/2/2/<uuid>.parquet]]}, output_ordering=[host@1 ASC, time@3 ASC], projection=[foo, host, load, time] |" + - "| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/1/<uuid>.parquet, 1/1/1/3/<uuid>.parquet]]}, projection=[foo, host, load, time] |" + - "| | |" + - +---------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + "### + ); } - async fn assert_query( - querier_namespace: &Arc<QuerierNamespace>, - sql: &str, - expected_lines: &[&str], - ) { - assert_query_with_span_ctx(querier_namespace, sql, expected_lines, None).await + async fn format_query(querier_namespace: &Arc<QuerierNamespace>, sql: &str) -> Vec<String> { + format_query_with_span_ctx(querier_namespace, sql, None).await } - async fn assert_query_with_span_ctx( + async fn format_query_with_span_ctx( querier_namespace: &Arc<QuerierNamespace>, sql: &str, - expected_lines: &[&str], span_ctx: Option<SpanContext>, - ) { + ) -> Vec<String> { let results = run(querier_namespace, sql, span_ctx).await; - assert_batches_sorted_eq!(expected_lines, &results); + batches_to_sorted_lines(&results) } - async fn assert_explain( - querier_namespace: &Arc<QuerierNamespace>, - sql: &str, - expected_lines: &[&str], - ) { + async fn format_explain(querier_namespace: &Arc<QuerierNamespace>, sql: &str) -> Vec<String> { let results = run(querier_namespace, sql, None).await; let formatted = arrow_util::display::pretty_format_batches(&results).unwrap(); let regex = Regex::new("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}") .expect("UUID regex"); - let actual_lines = formatted + formatted .trim() .split('\n') .map(|s| regex.replace_all(s, "<uuid>").to_string()) - .collect::<Vec<_>>(); - assert_eq!( - expected_lines, actual_lines, - "\n\nexpected:\n\n{expected_lines:#?}\nactual:\n\n{actual_lines:#?}\n\n" - ); + .collect::<Vec<_>>() } async fn run( diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 6ba2720e4e..99173024d0 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -17,10 +17,10 @@ license.workspace = true ### BEGIN HAKARI SECTION [dependencies] ahash = { version = "0.8", default-features = false, features = ["runtime-rng"] } -arrow = { version = "33", features = ["dyn_cmp_dict", "prettyprint"] } -arrow-flight = { version = "33", features = ["flight-sql-experimental"] } -arrow-ord = { version = "33", default-features = false, features = ["dyn_cmp_dict"] } -arrow-string = { version = "33", default-features = false, features = ["dyn_cmp_dict"] } +arrow = { version = "34", features = ["dyn_cmp_dict", "prettyprint"] } +arrow-flight = { version = "34", features = ["flight-sql-experimental"] } +arrow-ord = { version = "34", default-features = false, features = ["dyn_cmp_dict"] } +arrow-string = { version = "34", default-features = false, features = ["dyn_cmp_dict"] } base64-594e8ee84c453af0 = { package = "base64", version = "0.13" } base64-647d43efb71741da = { package = "base64", version = "0.21" } bitflags = { version = "1" } @@ -29,7 +29,7 @@ bytes = { version = "1" } chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "serde"] } crossbeam-utils = { version = "0.8" } crypto-common = { version = "0.1", default-features = false, features = ["std"] } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "f6e49ac7a027abb95d8b7fa755502dfa7d53c21c" } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "ea3b965dd4861f966a5ec1c4a65f748f0e5dcc12" } digest = { version = "0.10", features = ["mac", "std"] } either = { version = "1" } fixedbitset = { version = "0.4" } @@ -58,7 +58,7 @@ num-traits = { version = "0.2", features = ["i128", "libm"] } object_store = { version = "0.5", default-features = false, features = ["aws", "azure", "gcp"] } once_cell = { version = "1", features = ["parking_lot"] } parking_lot = { version = "0.12", features = ["arc_lock"] } -parquet = { version = "33", features = ["async", "experimental"] } +parquet = { version = "34", features = ["async", "experimental"] } phf_shared = { version = "0.11" } predicates = { version = "2" } prost = { version = "0.11" }
ca8b59e3940e5590e37ae8e787371159a7e8e4df
Nga Tran
2023-03-09 14:17:54
very large input compacting files (#7166)
* test: very large input compacting files * chore: fix comments * chore: add description for each test * chore: commit to trigger CI tests to run again ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
test: very large input compacting files (#7166) * test: very large input compacting files * chore: fix comments * chore: add description for each test * chore: commit to trigger CI tests to run again --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/compactor2/src/components/split_or_compact/split_compact.rs b/compactor2/src/components/split_or_compact/split_compact.rs index af3c713e8a..0ad69e85a2 100644 --- a/compactor2/src/components/split_or_compact/split_compact.rs +++ b/compactor2/src/components/split_or_compact/split_compact.rs @@ -96,7 +96,7 @@ mod tests { } #[test] - fn test_compact_too_small_to_compact() { + fn test_compact_too_large_to_compact() { let files = create_overlapped_l1_l2_files_2(MAX_SIZE as i64); insta::assert_yaml_snapshot!( format_files("initial", &files), @@ -117,6 +117,7 @@ mod tests { let (files_to_compact_or_split, files_to_keep) = split_compact.apply(&p_info, files, CompactionLevel::Final); // nothing to compact or split + // after https://github.com/influxdata/idpe/issues/17246, this list won't be empty assert!(files_to_compact_or_split.is_empty()); assert_eq!(files_to_keep.len(), 4); } diff --git a/compactor2/tests/layouts/large_files.rs b/compactor2/tests/layouts/large_files.rs new file mode 100644 index 0000000000..afc3506b8c --- /dev/null +++ b/compactor2/tests/layouts/large_files.rs @@ -0,0 +1,867 @@ +//! layout tests for scenarios with large input files +//! +//! See [crate::layout] module for detailed documentation + +use data_types::CompactionLevel; + +use crate::layouts::{layout_setup_builder, parquet_builder, run_layout_scenario, ONE_MB}; + +const MAX_COMPACT_SIZE: usize = 300 * ONE_MB as usize; +const MAX_DESIRED_FILE_SIZE: u64 = 100 * ONE_MB; + +// This file should be upgraded after https://github.com/influxdata/idpe/issues/17246 +// One l1 file that is larger than max desired file size +#[tokio::test] +async fn one_larger_max_file_size() { + test_helpers::maybe_start_logging(); + + let setup = layout_setup_builder() + .await + .with_max_compact_size(MAX_COMPACT_SIZE) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .build() + .await; + + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(1) + .with_max_time(1000) + .with_compaction_level(CompactionLevel::FileNonOverlapped) + // file > max_desired_file_size_bytes + .with_file_size_bytes(MAX_DESIRED_FILE_SIZE + 1), + ) + .await; + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L1, all files 100mb " + - "L1.1[1,1000] |-------------------------------------L1.1-------------------------------------|" + - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "**** Final Output Files " + - "L1, all files 100mb " + - "L1.1[1,1000] |-------------------------------------L1.1-------------------------------------|" + "### + ); +} + +// One l0 file that is larger than max desired file size +#[tokio::test] +async fn one_l0_larger_max_file_size() { + test_helpers::maybe_start_logging(); + + let setup = layout_setup_builder() + .await + .with_max_compact_size(MAX_COMPACT_SIZE) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .build() + .await; + + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(1) + .with_max_time(1000) + .with_compaction_level(CompactionLevel::Initial) + // file size > max_desired_file_size_bytes + .with_file_size_bytes(MAX_DESIRED_FILE_SIZE + 1), + ) + .await; + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L0, all files 100mb " + - "L0.1[1,1000] |-------------------------------------L0.1-------------------------------------|" + - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "**** Final Output Files " + - "L0, all files 100mb " + - "L0.1[1,1000] |-------------------------------------L0.1-------------------------------------|" + "### + ); +} + +// This file should be upgraded after https://github.com/influxdata/idpe/issues/17246 +// One l1 file that is larger than max compact size +#[tokio::test] +async fn one_larger_max_compact_size() { + test_helpers::maybe_start_logging(); + + let setup = layout_setup_builder() + .await + .with_max_compact_size(MAX_COMPACT_SIZE) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .build() + .await; + + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(1) + .with_max_time(1000) + .with_compaction_level(CompactionLevel::FileNonOverlapped) + // file > max_desired_file_size_bytes + .with_file_size_bytes((MAX_COMPACT_SIZE + 1) as u64), + ) + .await; + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L1, all files 300mb " + - "L1.1[1,1000] |-------------------------------------L1.1-------------------------------------|" + - "WARNING: file L1.1[1,1000] 300mb exceeds soft limit 100mb by more than 50%" + - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "**** Final Output Files " + - "L1, all files 300mb " + - "L1.1[1,1000] |-------------------------------------L1.1-------------------------------------|" + - "WARNING: file L1.1[1,1000] 300mb exceeds soft limit 100mb by more than 50%" + "### + ); +} + +// This file should be upgraded after https://github.com/influxdata/idpe/issues/17246 +// One l0 file that is larger than max compact size +#[tokio::test] +async fn one_l0_larger_max_compact_size() { + test_helpers::maybe_start_logging(); + + let setup = layout_setup_builder() + .await + .with_max_compact_size(MAX_COMPACT_SIZE) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .build() + .await; + + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(1) + .with_max_time(1000) + .with_compaction_level(CompactionLevel::Initial) + // file > max_desired_file_size_bytes + .with_file_size_bytes((MAX_COMPACT_SIZE + 1) as u64), + ) + .await; + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L0, all files 300mb " + - "L0.1[1,1000] |-------------------------------------L0.1-------------------------------------|" + - "WARNING: file L0.1[1,1000] 300mb exceeds soft limit 100mb by more than 50%" + - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "**** Final Output Files " + - "L0, all files 300mb " + - "L0.1[1,1000] |-------------------------------------L0.1-------------------------------------|" + - "WARNING: file L0.1[1,1000] 300mb exceeds soft limit 100mb by more than 50%" + "### + ); +} + +// This is working as expected and should stay after https://github.com/influxdata/idpe/issues/17246 +// Two files that are under max compact size +#[tokio::test] +async fn two_large_files_total_under_max_compact_size() { + test_helpers::maybe_start_logging(); + + let setup = layout_setup_builder() + .await + .with_max_compact_size(MAX_COMPACT_SIZE) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .build() + .await; + + // each file size over max_desired_file_size_bytes but total size under max_compact_size + let size = MAX_DESIRED_FILE_SIZE + 10; + + for i in 1..=2 { + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(i) + .with_max_time(1000) + .with_compaction_level(CompactionLevel::try_from(i as i32).unwrap()) + .with_file_size_bytes(size), + ) + .await; + } + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L1, all files 100mb " + - "L1.1[1,1000] |-------------------------------------L1.1-------------------------------------|" + - "L2, all files 100mb " + - "L2.2[2,1000] |------------------------------------L2.2-------------------------------------| " + - "**** Simulation run 0, type=split(split_times=[501]). 2 Input Files, 200mb total:" + - "L1, all files 100mb " + - "L1.1[1,1000] |-------------------------------------L1.1-------------------------------------|" + - "L2, all files 100mb " + - "L2.2[2,1000] |------------------------------------L2.2-------------------------------------| " + - "**** 2 Output Files (parquet_file_id not yet assigned), 199.8mb total:" + - "L2 " + - "L2.?[1,501] 100.1mb |-----------------L2.?-----------------| " + - "L2.?[502,1000] 99.7mb |----------------L2.?-----------------| " + - "Committing partition 1:" + - " Soft Deleting 2 files: L1.1, L2.2" + - " Creating 2 files at level CompactionLevel::L2" + - "**** Final Output Files " + - "L2 " + - "L2.3[1,501] 100.1mb |-----------------L2.3-----------------| " + - "L2.4[502,1000] 99.7mb |----------------L2.4-----------------| " + "### + ); +} + +// These files should be split and then compacted after https://github.com/influxdata/idpe/issues/17246 +// Two similar size and time range files with total size larger than max compact size +#[tokio::test] +async fn two_large_files_total_over_max_compact_size() { + test_helpers::maybe_start_logging(); + + let setup = layout_setup_builder() + .await + .with_max_compact_size(MAX_COMPACT_SIZE) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .build() + .await; + + let size = MAX_COMPACT_SIZE / 2 + 10; + + for i in 1..=2 { + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(i) + .with_max_time(1000) + // L1.1 or L2.2 + .with_compaction_level(CompactionLevel::try_from(i as i32).unwrap()) + .with_file_size_bytes(size as u64), + ) + .await; + } + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L1, all files 150mb " + - "L1.1[1,1000] |-------------------------------------L1.1-------------------------------------|" + - "L2, all files 150mb " + - "L2.2[2,1000] |------------------------------------L2.2-------------------------------------| " + - "WARNING: file L1.1[1,1000] 150mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.2[2,1000] 150mb exceeds soft limit 100mb by more than 50%" + - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "**** Final Output Files " + - "L1, all files 150mb " + - "L1.1[1,1000] |-------------------------------------L1.1-------------------------------------|" + - "L2, all files 150mb " + - "L2.2[2,1000] |------------------------------------L2.2-------------------------------------| " + - "WARNING: file L1.1[1,1000] 150mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.2[2,1000] 150mb exceeds soft limit 100mb by more than 50%" + "### + ); +} + +// These files should be split and then compacted after https://github.com/influxdata/idpe/issues/17246 +// Two similar size files with total size larger than max compact size with small overlap range +// The time range of target level file is much smaller and at the end range of the start level file +#[tokio::test] +async fn two_large_files_total_over_max_compact_size_small_overlap_range() { + test_helpers::maybe_start_logging(); + + let setup = layout_setup_builder() + .await + .with_max_compact_size(MAX_COMPACT_SIZE) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .build() + .await; + + let size = MAX_COMPACT_SIZE / 2 + 10; + + for i in 1..=2 { + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time((i - 1) * 800) + .with_max_time(1000) + // L1.1 or L2.2 + .with_compaction_level(CompactionLevel::try_from(i as i32).unwrap()) + .with_file_size_bytes(size as u64), + ) + .await; + } + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L1, all files 150mb " + - "L1.1[0,1000] |-------------------------------------L1.1-------------------------------------|" + - "L2, all files 150mb " + - "L2.2[800,1000] |-----L2.2-----|" + - "WARNING: file L1.1[0,1000] 150mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.2[800,1000] 150mb exceeds soft limit 100mb by more than 50%" + - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "**** Final Output Files " + - "L1, all files 150mb " + - "L1.1[0,1000] |-------------------------------------L1.1-------------------------------------|" + - "L2, all files 150mb " + - "L2.2[800,1000] |-----L2.2-----|" + - "WARNING: file L1.1[0,1000] 150mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.2[800,1000] 150mb exceeds soft limit 100mb by more than 50%" + "### + ); +} + +// These files should be split and then compacted after https://github.com/influxdata/idpe/issues/17246 +// Two similar size files with total size larger than max compact size with small overlap range +// The overlapped range is at the end range of start_level file and start of target level file +// Two files have similar length of time range +#[tokio::test] +async fn two_large_files_total_over_max_compact_size_small_overlap_range_2() { + test_helpers::maybe_start_logging(); + + let setup = layout_setup_builder() + .await + .with_max_compact_size(MAX_COMPACT_SIZE) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .build() + .await; + + let size = MAX_COMPACT_SIZE / 2 + 10; + + for i in 1..=2 { + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(i * 800) + .with_max_time((i + 1) * 1000) + // L1.1 or L2.2 + .with_compaction_level(CompactionLevel::try_from(i as i32).unwrap()) + .with_file_size_bytes(size as u64), + ) + .await; + } + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L1, all files 150mb " + - "L1.1[800,2000] |------------------L1.1-------------------| " + - "L2, all files 150mb " + - "L2.2[1600,3000] |----------------------L2.2----------------------| " + - "WARNING: file L1.1[800,2000] 150mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.2[1600,3000] 150mb exceeds soft limit 100mb by more than 50%" + - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "**** Final Output Files " + - "L1, all files 150mb " + - "L1.1[800,2000] |------------------L1.1-------------------| " + - "L2, all files 150mb " + - "L2.2[1600,3000] |----------------------L2.2----------------------| " + - "WARNING: file L1.1[800,2000] 150mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.2[1600,3000] 150mb exceeds soft limit 100mb by more than 50%" + "### + ); +} + +// These files should be split and then compacted after https://github.com/influxdata/idpe/issues/17246 +// Two similar size files with total size larger than max compact size with small overlap range +// The overlapped range is at the end range of start_level file and start of target level file +// Time range of the start level file is much smaller than the one of target level file +#[tokio::test] +async fn two_large_files_total_over_max_compact_size_small_overlap_range_3() { + test_helpers::maybe_start_logging(); + + let setup = layout_setup_builder() + .await + .with_max_compact_size(MAX_COMPACT_SIZE) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .build() + .await; + + let size = MAX_COMPACT_SIZE / 2 + 10; + + for i in 1..=2 { + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time((i - 1) * 200) + .with_max_time((i - 1) * 1000 + 300) + // L1.1 or L2.2 + .with_compaction_level(CompactionLevel::try_from(i as i32).unwrap()) + .with_file_size_bytes(size as u64), + ) + .await; + } + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L1, all files 150mb " + - "L1.1[0,300] |------L1.1------| " + - "L2, all files 150mb " + - "L2.2[200,1300] |------------------------------L2.2-------------------------------| " + - "WARNING: file L1.1[0,300] 150mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.2[200,1300] 150mb exceeds soft limit 100mb by more than 50%" + - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "**** Final Output Files " + - "L1, all files 150mb " + - "L1.1[0,300] |------L1.1------| " + - "L2, all files 150mb " + - "L2.2[200,1300] |------------------------------L2.2-------------------------------| " + - "WARNING: file L1.1[0,300] 150mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.2[200,1300] 150mb exceeds soft limit 100mb by more than 50%" + "### + ); +} + +// These files should be split and then compacted after https://github.com/influxdata/idpe/issues/17246 +// Two similar size files with total size larger than max compact size and similar time range +// Start level is 0 +#[tokio::test] +async fn two_large_files_total_over_max_compact_size_start_l0() { + test_helpers::maybe_start_logging(); + + let setup = layout_setup_builder() + .await + .with_max_compact_size(MAX_COMPACT_SIZE) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .build() + .await; + + let size = MAX_COMPACT_SIZE / 2 + 10; + + for i in 0..=1 { + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(i) + .with_max_time(1000) + // L0.1 or L1.2 + .with_compaction_level(CompactionLevel::try_from(i as i32).unwrap()) + .with_file_size_bytes(size as u64), + ) + .await; + } + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L0, all files 150mb " + - "L0.1[0,1000] |-------------------------------------L0.1-------------------------------------|" + - "L1, all files 150mb " + - "L1.2[1,1000] |------------------------------------L1.2-------------------------------------| " + - "WARNING: file L0.1[0,1000] 150mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L1.2[1,1000] 150mb exceeds soft limit 100mb by more than 50%" + - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "**** Final Output Files " + - "L0, all files 150mb " + - "L0.1[0,1000] |-------------------------------------L0.1-------------------------------------|" + - "L1, all files 150mb " + - "L1.2[1,1000] |------------------------------------L1.2-------------------------------------| " + - "WARNING: file L0.1[0,1000] 150mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L1.2[1,1000] 150mb exceeds soft limit 100mb by more than 50%" + "### + ); +} + +// These files should be split and then compacted after https://github.com/influxdata/idpe/issues/17246 +// Real-life case with three good size L1s and one very large L2 +#[tokio::test] +async fn target_too_large_1() { + test_helpers::maybe_start_logging(); + + let setup = layout_setup_builder() + .await + .with_max_compact_size(MAX_COMPACT_SIZE) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .build() + .await; + + // Real-life case 1 + // . three L1s with total > max_desired_file_size_bytes to trigger compaction + // . one very large overlapped L2 + + // size of l1s & l2 + let l1_sizes = vec![53 * ONE_MB, 45 * ONE_MB, 5 * ONE_MB]; + let l2_size = 253 * ONE_MB; + + // L2 overlapped with the first L1 + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(1) + .with_max_time(1000) + .with_compaction_level(CompactionLevel::Final) + .with_file_size_bytes(l2_size), + ) + .await; + + // L1s + for i in 0..=2 { + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(i * 1000 + 1) + .with_max_time(i * 1000 + 1000) + .with_compaction_level(CompactionLevel::FileNonOverlapped) + .with_file_size_bytes(l1_sizes[i as usize]), + ) + .await; + } + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L1 " + - "L1.2[1,1000] 53mb |----------L1.2----------| " + - "L1.3[1001,2000] 45mb |----------L1.3----------| " + - "L1.4[2001,3000] 5mb |----------L1.4----------| " + - "L2 " + - "L2.1[1,1000] 253mb |----------L2.1----------| " + - "WARNING: file L2.1[1,1000] 253mb exceeds soft limit 100mb by more than 50%" + - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "**** Final Output Files " + - "L1 " + - "L1.2[1,1000] 53mb |----------L1.2----------| " + - "L1.3[1001,2000] 45mb |----------L1.3----------| " + - "L1.4[2001,3000] 5mb |----------L1.4----------| " + - "L2 " + - "L2.1[1,1000] 253mb |----------L2.1----------| " + - "WARNING: file L2.1[1,1000] 253mb exceeds soft limit 100mb by more than 50%" + "### + ); +} + +// These files should be split and then compacted after https://github.com/influxdata/idpe/issues/17246 +// Real-life case with two good size L1s and one very large L2 +#[tokio::test] +async fn target_too_large_2() { + test_helpers::maybe_start_logging(); + + let setup = layout_setup_builder() + .await + .with_max_compact_size(MAX_COMPACT_SIZE) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .build() + .await; + + // Real-life case 2 + // . two L1s with total > max_desired_file_size_bytes to trigger compaction + // . one very large overlapped L2 + + // size of l1s & l2 + let l1_sizes = vec![69 * ONE_MB, 50 * ONE_MB]; + let l2_size = 232 * ONE_MB; + + // L2 overlapped with both L1s + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(1) + .with_max_time(3000) + .with_compaction_level(CompactionLevel::Final) + .with_file_size_bytes(l2_size), + ) + .await; + + // L1s + for i in 0..=1 { + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(i * 1000 + 1) + .with_max_time(i * 1000 + 1000) + .with_compaction_level(CompactionLevel::FileNonOverlapped) + .with_file_size_bytes(l1_sizes[i as usize]), + ) + .await; + } + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L1 " + - "L1.2[1,1000] 69mb |----------L1.2----------| " + - "L1.3[1001,2000] 50mb |----------L1.3----------| " + - "L2 " + - "L2.1[1,3000] 232mb |-------------------------------------L2.1-------------------------------------|" + - "WARNING: file L2.1[1,3000] 232mb exceeds soft limit 100mb by more than 50%" + - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "**** Final Output Files " + - "L1 " + - "L1.2[1,1000] 69mb |----------L1.2----------| " + - "L1.3[1001,2000] 50mb |----------L1.3----------| " + - "L2 " + - "L2.1[1,3000] 232mb |-------------------------------------L2.1-------------------------------------|" + - "WARNING: file L2.1[1,3000] 232mb exceeds soft limit 100mb by more than 50%" + "### + ); +} + +// These files should be split and then compacted after https://github.com/influxdata/idpe/issues/17246 +// One very large start level file with one good size overlapped target level file +// Two have similar time range +#[tokio::test] +async fn start_too_large_similar_time_range() { + test_helpers::maybe_start_logging(); + + let setup = layout_setup_builder() + .await + .with_max_compact_size(MAX_COMPACT_SIZE) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .build() + .await; + + // . one L1 >> max_desired_file_size_bytes to trigger compaction + // . one good size overlapped L2 + // . total size = L1 & L2 > max_compact_size + + // size of l1 & l2 respectively + let sizes = vec![250 * ONE_MB, 52 * ONE_MB]; + + for i in 1..=2 { + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(i) + .with_max_time(1000) + // L1.1 or L2.2 + .with_compaction_level(CompactionLevel::try_from(i as i32).unwrap()) + .with_file_size_bytes(sizes[(i - 1) as usize]), + ) + .await; + } + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L1 " + - "L1.1[1,1000] 250mb |-------------------------------------L1.1-------------------------------------|" + - "L2 " + - "L2.2[2,1000] 52mb |------------------------------------L2.2-------------------------------------| " + - "WARNING: file L1.1[1,1000] 250mb exceeds soft limit 100mb by more than 50%" + - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "**** Final Output Files " + - "L1 " + - "L1.1[1,1000] 250mb |-------------------------------------L1.1-------------------------------------|" + - "L2 " + - "L2.2[2,1000] 52mb |------------------------------------L2.2-------------------------------------| " + - "WARNING: file L1.1[1,1000] 250mb exceeds soft limit 100mb by more than 50%" + "### + ); +} + +// These files should be split and then compacted after https://github.com/influxdata/idpe/issues/17246 +// One very large start level file with one good size overlapped target level file +// Overlapped range is small +// The overlapped range is at the end of both start_level file and target level file +#[tokio::test] +async fn start_too_large_small_time_range() { + test_helpers::maybe_start_logging(); + + let setup = layout_setup_builder() + .await + .with_max_compact_size(MAX_COMPACT_SIZE) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .build() + .await; + + // . one L1 >> max_desired_file_size_bytes to trigger compaction + // . one good size overlapped L2 + // . total size = L1 & L2 > max_compact_size + + // size of l1 & l2 respectively + let sizes = vec![250 * ONE_MB, 52 * ONE_MB]; + + for i in 1..=2 { + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time((i - 1) * 800) + .with_max_time(1000) + // L1.1 or L2.2 + .with_compaction_level(CompactionLevel::try_from(i as i32).unwrap()) + .with_file_size_bytes(sizes[(i - 1) as usize]), + ) + .await; + } + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L1 " + - "L1.1[0,1000] 250mb |-------------------------------------L1.1-------------------------------------|" + - "L2 " + - "L2.2[800,1000] 52mb |-----L2.2-----|" + - "WARNING: file L1.1[0,1000] 250mb exceeds soft limit 100mb by more than 50%" + - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "**** Final Output Files " + - "L1 " + - "L1.1[0,1000] 250mb |-------------------------------------L1.1-------------------------------------|" + - "L2 " + - "L2.2[800,1000] 52mb |-----L2.2-----|" + - "WARNING: file L1.1[0,1000] 250mb exceeds soft limit 100mb by more than 50%" + "### + ); +} + +// These files should be split and then compacted after https://github.com/influxdata/idpe/issues/17246 +// One very large start level file with one good size overlapped target level file +// Overlapped range is small +// The overlapped range is at the end of start_level file and start of target level file +#[tokio::test] +async fn start_too_large_small_time_range_2() { + test_helpers::maybe_start_logging(); + + let setup = layout_setup_builder() + .await + .with_max_compact_size(MAX_COMPACT_SIZE) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .build() + .await; + + // . one L1 >> max_desired_file_size_bytes to trigger compaction + // . one good size overlapped L2 + // . total size = L1 & L2 > max_compact_size + + // size of l1 & l2 respectively + let sizes = vec![250 * ONE_MB, 52 * ONE_MB]; + + for i in 1..=2 { + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(i * 800) + .with_max_time((i + 1) * 1000) + // L1.1 or L2.2 + .with_compaction_level(CompactionLevel::try_from(i as i32).unwrap()) + .with_file_size_bytes(sizes[(i - 1) as usize]), + ) + .await; + } + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L1 " + - "L1.1[800,2000] 250mb|------------------L1.1-------------------| " + - "L2 " + - "L2.2[1600,3000] 52mb |----------------------L2.2----------------------| " + - "WARNING: file L1.1[800,2000] 250mb exceeds soft limit 100mb by more than 50%" + - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "**** Final Output Files " + - "L1 " + - "L1.1[800,2000] 250mb|------------------L1.1-------------------| " + - "L2 " + - "L2.2[1600,3000] 52mb |----------------------L2.2----------------------| " + - "WARNING: file L1.1[800,2000] 250mb exceeds soft limit 100mb by more than 50%" + "### + ); +} + +// These files should be split and then compacted after https://github.com/influxdata/idpe/issues/17246 +// One very large start level file with one good size overlapped target level file +// Overlapped range is small +// The overlapped range is at the end of start_level file and start of target level file +// Time range of start level is much larger than target level +#[tokio::test] +async fn start_too_large_small_time_range_3() { + test_helpers::maybe_start_logging(); + + let setup = layout_setup_builder() + .await + .with_max_compact_size(MAX_COMPACT_SIZE) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .build() + .await; + + // . one L1 >> max_desired_file_size_bytes to trigger compaction + // . one good size overlapped L2 + // . total size = L1 & L2 > max_compact_size + + // size of l1 & l2 respectively + let sizes = vec![250 * ONE_MB, 52 * ONE_MB]; + + for i in 1..=2 { + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time((i - 1) * 200) + .with_max_time((i - 1) * 1000 + 300) + // L1.1 or L2.2 + .with_compaction_level(CompactionLevel::try_from(i as i32).unwrap()) + .with_file_size_bytes(sizes[(i - 1) as usize]), + ) + .await; + } + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L1 " + - "L1.1[0,300] 250mb |------L1.1------| " + - "L2 " + - "L2.2[200,1300] 52mb |------------------------------L2.2-------------------------------| " + - "WARNING: file L1.1[0,300] 250mb exceeds soft limit 100mb by more than 50%" + - "SKIPPED COMPACTION for PartitionId(1): partition 1 has overlapped files that exceed max compact size limit 314572800. The may happen if a large amount of data has the same timestamp" + - "**** Final Output Files " + - "L1 " + - "L1.1[0,300] 250mb |------L1.1------| " + - "L2 " + - "L2.2[200,1300] 52mb |------------------------------L2.2-------------------------------| " + - "WARNING: file L1.1[0,300] 250mb exceeds soft limit 100mb by more than 50%" + "### + ); +} diff --git a/compactor2/tests/layouts/mod.rs b/compactor2/tests/layouts/mod.rs index 0aa6894fed..0ec9c52071 100644 --- a/compactor2/tests/layouts/mod.rs +++ b/compactor2/tests/layouts/mod.rs @@ -50,6 +50,7 @@ //! ``` mod core; mod knobs; +mod large_files; mod large_overlaps; mod many_files; mod single_timestamp;
44e266d00036005a0ece1d44d01bd2f0397ff903
Joe-Blount
2023-07-31 08:15:49
compaction looping fixes (#8363)
* fix: selectively merge L1 to L2 when L0s still exist * fix: avoid grouping files that undo previous splits * chore: add test case for new fixes * chore: insta test churn * chore: lint cleanup
null
fix: compaction looping fixes (#8363) * fix: selectively merge L1 to L2 when L0s still exist * fix: avoid grouping files that undo previous splits * chore: add test case for new fixes * chore: insta test churn * chore: lint cleanup
diff --git a/compactor/src/components/round_info_source/mod.rs b/compactor/src/components/round_info_source/mod.rs index 1d2419df3a..a1e5b96548 100644 --- a/compactor/src/components/round_info_source/mod.rs +++ b/compactor/src/components/round_info_source/mod.rs @@ -172,7 +172,11 @@ impl RoundInfoSource for LevelBasedRoundInfo { _partition_info: &PartitionInfo, files: &[ParquetFile], ) -> Result<RoundInfo, DynError> { - let start_level = get_start_level(files); + let start_level = get_start_level( + files, + self.max_num_files_per_plan, + self.max_total_file_size_per_plan, + ); if self.too_many_small_files_to_compact(files, start_level) { return Ok(RoundInfo::ManySmallFiles { @@ -187,23 +191,53 @@ impl RoundInfoSource for LevelBasedRoundInfo { } } -fn get_start_level(files: &[ParquetFile]) -> CompactionLevel { +// get_start_level decides what level to start compaction from. Often this is the lowest level +// we have ParquetFiles in, but occasionally we decide to compact L1->L2 when L0s still exist. +// +// If we ignore the invariants (where intra-level overlaps are allowed), this would be a math problem +// to optimize write amplification. +// +// However, allowing intra-level overlaps in L0 but not L1/L2 adds extra challenge to compacting L0s to L1. +// This is especially true when there are large quantitites of overlapping L0s and L1s, potentially resulting +// in many split/compact cycles to resolve the overlaps. +// +// Since L1 & L2 only have inter-level overlaps, they can be compacted with just a few splits to align the L1s +// with the L2s. The relative ease of moving data from L1 to L2 provides additional motivation to compact the +// L1s to L2s when a backlog of L0s exist. The easily solvable L1->L2 compaction can give us a clean slate in +// L1, greatly simplifying the remaining L0->L1 compactions. +fn get_start_level(files: &[ParquetFile], max_files: usize, max_bytes: usize) -> CompactionLevel { // panic if the files are empty assert!(!files.is_empty()); - // Start with initial level - // If there are files in this level, itis the start level - // Otherwise repeat until reaching the final level. - let mut level = CompactionLevel::Initial; - while level != CompactionLevel::Final { - if files.iter().any(|f| f.compaction_level == level) { - return level; - } + let mut l0_cnt: usize = 0; + let mut l0_bytes: usize = 0; + let mut l1_bytes: usize = 0; - level = level.next(); + for f in files { + match f.compaction_level { + CompactionLevel::Initial => { + l0_cnt += 1; + l0_bytes += f.file_size_bytes as usize; + } + CompactionLevel::FileNonOverlapped => { + l1_bytes += f.file_size_bytes as usize; + } + _ => {} + } } - level + if l1_bytes > 3 * max_bytes && (l0_cnt > max_files || l0_bytes > max_bytes) { + // L1 is big enough to pose an overlap challenge compacting from L0, and there is quite a bit more coming from L0. + // The criteria for this early L1->L2 compaction significanly impacts write amplification. The above values optimize + // existing test cases, but may be changed as additional test cases are added. + CompactionLevel::FileNonOverlapped + } else if l0_bytes > 0 { + CompactionLevel::Initial + } else if l1_bytes > 0 { + CompactionLevel::FileNonOverlapped + } else { + CompactionLevel::Final + } } fn get_num_overlapped_files( diff --git a/compactor/src/components/split_or_compact/start_level_files_to_split.rs b/compactor/src/components/split_or_compact/start_level_files_to_split.rs index 4a6eaba61e..e7dd794c22 100644 --- a/compactor/src/components/split_or_compact/start_level_files_to_split.rs +++ b/compactor/src/components/split_or_compact/start_level_files_to_split.rs @@ -301,7 +301,26 @@ pub fn merge_small_l0_chains( for chain in &chains { let this_chain_bytes = chain.iter().map(|f| f.file_size_bytes as usize).sum(); - if prior_chain_bytes > 0 && prior_chain_bytes + this_chain_bytes <= max_compact_size { + // matching max_lo_created_at times indicates that the files were deliberately split. We shouldn't merge + // chains with matching max_lo_created_at times, because that would encourage undoing the previous split, + // which minimally increases write amplification, and may cause unproductive split/compact loops. + let mut matches = 0; + if prior_chain_bytes > 0 { + for f in chain.iter() { + for f2 in &merged_chains[prior_chain_idx as usize] { + if f.max_l0_created_at == f2.max_l0_created_at { + matches += 1; + break; + } + } + } + } + + // Merge it if: there a prior chain to merge with, and merging wouldn't make it too big, or undo a previous split + if prior_chain_bytes > 0 + && prior_chain_bytes + this_chain_bytes <= max_compact_size + && matches == 0 + { // this chain can be added to the prior chain. merged_chains[prior_chain_idx as usize].append(&mut chain.clone()); prior_chain_bytes += this_chain_bytes; diff --git a/compactor/tests/integration.rs b/compactor/tests/integration.rs index fbf60bffd2..31ee42ec11 100644 --- a/compactor/tests/integration.rs +++ b/compactor/tests/integration.rs @@ -68,8 +68,8 @@ async fn test_num_files_over_limit() { assert_levels( &files, vec![ - (8, CompactionLevel::FileNonOverlapped), (9, CompactionLevel::FileNonOverlapped), + (10, CompactionLevel::FileNonOverlapped), ], ); } diff --git a/compactor/tests/layouts/backfill.rs b/compactor/tests/layouts/backfill.rs index 708d5d95b1..33302dfc06 100644 --- a/compactor/tests/layouts/backfill.rs +++ b/compactor/tests/layouts/backfill.rs @@ -746,97 +746,85 @@ async fn random_backfill_over_l2s() { - "Committing partition 1:" - " Soft Deleting 4 files: L0.76, L0.77, L0.79, L0.80" - " Creating 8 files" - - "**** Simulation run 15, type=compact(ManySmallFiles). 10 Input Files, 200mb total:" - - "L0 " - - "L0.75[42,356] 1.04us 33mb|-----------L0.75-----------| " - - "L0.86[357,357] 1.04us 0b |L0.86| " - - "L0.87[358,670] 1.04us 33mb |-----------L0.87-----------| " - - "L0.84[671,672] 1.04us 109kb |L0.84| " - - "L0.85[673,986] 1.04us 33mb |-----------L0.85-----------| " - - "L0.78[42,356] 1.05us 33mb|-----------L0.78-----------| " - - "L0.90[357,357] 1.05us 0b |L0.90| " - - "L0.91[358,670] 1.05us 33mb |-----------L0.91-----------| " - - "L0.88[671,672] 1.05us 109kb |L0.88| " - - "L0.89[673,986] 1.05us 33mb |-----------L0.89-----------| " - - "**** 1 Output Files (parquet_file_id not yet assigned), 200mb total:" - - "L0, all files 200mb " - - "L0.?[42,986] 1.05us |------------------------------------------L0.?------------------------------------------|" + - "**** Simulation run 15, type=compact(ManySmallFiles). 2 Input Files, 67mb total:" + - "L0, all files 33mb " + - "L0.75[42,356] 1.04us |-----------------------------------------L0.75------------------------------------------|" + - "L0.78[42,356] 1.05us |-----------------------------------------L0.78------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 67mb total:" + - "L0, all files 67mb " + - "L0.?[42,356] 1.05us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 10 files: L0.75, L0.78, L0.84, L0.85, L0.86, L0.87, L0.88, L0.89, L0.90, L0.91" + - " Soft Deleting 2 files: L0.75, L0.78" - " Creating 1 files" - - "**** Simulation run 16, type=split(HighL0OverlapSingleFile)(split_times=[670]). 1 Input Files, 100mb total:" - - "L1, all files 100mb " - - "L1.82[358,672] 1.03us |-----------------------------------------L1.82------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:" - - "L1 " - - "L1.?[358,670] 1.03us 99mb|-----------------------------------------L1.?------------------------------------------| " - - "L1.?[671,672] 1.03us 651kb |L1.?|" - - "**** Simulation run 17, type=split(HighL0OverlapSingleFile)(split_times=[356]). 1 Input Files, 100mb total:" - - "L1, all files 100mb " - - "L1.81[42,357] 1.03us |-----------------------------------------L1.81------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 100mb total:" - - "L1 " - - "L1.?[42,356] 1.03us 100mb|-----------------------------------------L1.?------------------------------------------| " - - "L1.?[357,357] 1.03us 325kb |L1.?|" - - "**** Simulation run 18, type=split(HighL0OverlapSingleFile)(split_times=[356, 670]). 1 Input Files, 200mb total:" - - "L0, all files 200mb " - - "L0.92[42,986] 1.05us |-----------------------------------------L0.92------------------------------------------|" - - "**** 3 Output Files (parquet_file_id not yet assigned), 200mb total:" - - "L0 " - - "L0.?[42,356] 1.05us 67mb |-----------L0.?------------| " - - "L0.?[357,670] 1.05us 66mb |-----------L0.?------------| " - - "L0.?[671,986] 1.05us 67mb |------------L0.?------------| " + - "**** Simulation run 16, type=compact(ManySmallFiles). 2 Input Files, 66mb total:" + - "L0, all files 33mb " + - "L0.87[358,670] 1.04us |-----------------------------------------L0.87------------------------------------------|" + - "L0.91[358,670] 1.05us |-----------------------------------------L0.91------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 66mb total:" + - "L0, all files 66mb " + - "L0.?[358,670] 1.05us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 3 files: L1.81, L1.82, L0.92" - - " Creating 7 files" - - "**** Simulation run 19, type=split(ReduceOverlap)(split_times=[672]). 1 Input Files, 67mb total:" + - " Soft Deleting 2 files: L0.87, L0.91" + - " Creating 1 files" + - "**** Simulation run 17, type=compact(ManySmallFiles). 2 Input Files, 218kb total:" + - "L0, all files 109kb " + - "L0.84[671,672] 1.04us |-----------------------------------------L0.84------------------------------------------|" + - "L0.88[671,672] 1.05us |-----------------------------------------L0.88------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 218kb total:" + - "L0, all files 218kb " + - "L0.?[671,672] 1.05us |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 2 files: L0.84, L0.88" + - " Creating 1 files" + - "**** Simulation run 18, type=compact(ManySmallFiles). 2 Input Files, 67mb total:" + - "L0, all files 33mb " + - "L0.85[673,986] 1.04us |-----------------------------------------L0.85------------------------------------------|" + - "L0.89[673,986] 1.05us |-----------------------------------------L0.89------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 67mb total:" - "L0, all files 67mb " - - "L0.99[671,986] 1.05us |-----------------------------------------L0.99------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 67mb total:" - - "L0 " - - "L0.?[671,672] 1.05us 218kb|L0.?| " - - "L0.?[673,986] 1.05us 67mb|-----------------------------------------L0.?------------------------------------------| " - - "**** Simulation run 20, type=split(ReduceOverlap)(split_times=[357]). 1 Input Files, 66mb total:" - - "L0, all files 66mb " - - "L0.98[357,670] 1.05us |-----------------------------------------L0.98------------------------------------------|" - - "**** 2 Output Files (parquet_file_id not yet assigned), 66mb total:" - - "L0 " - - "L0.?[357,357] 1.05us 0b |L0.?| " - - "L0.?[358,670] 1.05us 66mb|-----------------------------------------L0.?------------------------------------------| " + - "L0.?[673,986] 1.05us |------------------------------------------L0.?------------------------------------------|" - "Committing partition 1:" - - " Soft Deleting 2 files: L0.98, L0.99" - - " Creating 4 files" - - "**** Simulation run 21, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[232]). 4 Input Files, 167mb total:" + - " Soft Deleting 2 files: L0.85, L0.89" + - " Creating 1 files" + - "**** Simulation run 19, type=compact(ManySmallFiles). 2 Input Files, 0b total:" + - "L0, all files 0b " + - "L0.86[357,357] 1.04us |-----------------------------------------L0.86------------------------------------------|" + - "L0.90[357,357] 1.05us |-----------------------------------------L0.90------------------------------------------|" + - "**** 1 Output Files (parquet_file_id not yet assigned), 0b total:" + - "L0, all files 0b " + - "L0.?[357,357] 1.05us |------------------------------------------L0.?------------------------------------------|" + - "Committing partition 1:" + - " Soft Deleting 2 files: L0.86, L0.90" + - " Creating 1 files" + - "**** Simulation run 20, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[232]). 3 Input Files, 167mb total:" - "L0 " - - "L0.97[42,356] 1.05us 67mb|-----------------------------------------L0.97-----------------------------------------| " - - "L0.102[357,357] 1.05us 0b |L0.102|" + - "L0.92[42,356] 1.05us 67mb|-----------------------------------------L0.92-----------------------------------------| " + - "L0.96[357,357] 1.05us 0b |L0.96|" - "L1 " - - "L1.95[42,356] 1.03us 100mb|-----------------------------------------L1.95-----------------------------------------| " - - "L1.96[357,357] 1.03us 325kb |L1.96|" + - "L1.81[42,357] 1.03us 100mb|-----------------------------------------L1.81------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 167mb total:" - "L1 " - "L1.?[42,232] 1.05us 101mb|------------------------L1.?------------------------| " - "L1.?[233,357] 1.05us 66mb |--------------L1.?---------------| " - "Committing partition 1:" - - " Soft Deleting 4 files: L1.95, L1.96, L0.97, L0.102" + - " Soft Deleting 3 files: L1.81, L0.92, L0.96" - " Creating 2 files" - - "**** Simulation run 22, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[547]). 4 Input Files, 166mb total:" + - "**** Simulation run 21, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[547]). 3 Input Files, 166mb total:" - "L0 " - - "L0.103[358,670] 1.05us 66mb|----------------------------------------L0.103-----------------------------------------| " - - "L0.100[671,672] 1.05us 218kb |L0.100|" + - "L0.93[358,670] 1.05us 66mb|-----------------------------------------L0.93-----------------------------------------| " + - "L0.94[671,672] 1.05us 218kb |L0.94|" - "L1 " - - "L1.93[358,670] 1.03us 99mb|-----------------------------------------L1.93-----------------------------------------| " - - "L1.94[671,672] 1.03us 651kb |L1.94|" + - "L1.82[358,672] 1.03us 100mb|-----------------------------------------L1.82------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 166mb total:" - "L1 " - "L1.?[358,547] 1.05us 100mb|------------------------L1.?------------------------| " - "L1.?[548,672] 1.05us 66mb |--------------L1.?---------------| " - "Committing partition 1:" - - " Soft Deleting 4 files: L1.93, L1.94, L0.100, L0.103" + - " Soft Deleting 3 files: L1.82, L0.93, L0.94" - " Creating 2 files" - - "**** Simulation run 23, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[861]). 2 Input Files, 167mb total:" + - "**** Simulation run 22, type=split(CompactAndSplitOutput(TotalSizeLessThanMaxCompactSize))(split_times=[861]). 2 Input Files, 167mb total:" - "L0 " - - "L0.101[673,986] 1.05us 67mb|-----------------------------------------L0.101-----------------------------------------|" + - "L0.95[673,986] 1.05us 67mb|-----------------------------------------L0.95------------------------------------------|" - "L1 " - "L1.83[673,986] 1.03us 100mb|-----------------------------------------L1.83------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 167mb total:" @@ -844,60 +832,60 @@ async fn random_backfill_over_l2s() { - "L1.?[673,861] 1.05us 100mb|------------------------L1.?------------------------| " - "L1.?[862,986] 1.05us 67mb |--------------L1.?---------------| " - "Committing partition 1:" - - " Soft Deleting 2 files: L1.83, L0.101" + - " Soft Deleting 2 files: L1.83, L0.95" - " Creating 2 files" - - "**** Simulation run 24, type=split(ReduceOverlap)(split_times=[399, 499]). 1 Input Files, 100mb total:" + - "**** Simulation run 23, type=split(ReduceOverlap)(split_times=[399, 499]). 1 Input Files, 100mb total:" - "L1, all files 100mb " - - "L1.106[358,547] 1.05us |-----------------------------------------L1.106-----------------------------------------|" + - "L1.99[358,547] 1.05us |-----------------------------------------L1.99------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:" - "L1 " - "L1.?[358,399] 1.05us 22mb|------L1.?-------| " - "L1.?[400,499] 1.05us 52mb |--------------------L1.?---------------------| " - "L1.?[500,547] 1.05us 26mb |--------L1.?--------| " - - "**** Simulation run 25, type=split(ReduceOverlap)(split_times=[299]). 1 Input Files, 66mb total:" + - "**** Simulation run 24, type=split(ReduceOverlap)(split_times=[299]). 1 Input Files, 66mb total:" - "L1, all files 66mb " - - "L1.105[233,357] 1.05us |-----------------------------------------L1.105-----------------------------------------|" + - "L1.98[233,357] 1.05us |-----------------------------------------L1.98------------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 66mb total:" - "L1 " - "L1.?[233,299] 1.05us 35mb|--------------------L1.?---------------------| " - "L1.?[300,357] 1.05us 31mb |-----------------L1.?------------------| " - - "**** Simulation run 26, type=split(ReduceOverlap)(split_times=[99, 199]). 1 Input Files, 101mb total:" + - "**** Simulation run 25, type=split(ReduceOverlap)(split_times=[99, 199]). 1 Input Files, 101mb total:" - "L1, all files 101mb " - - "L1.104[42,232] 1.05us |-----------------------------------------L1.104-----------------------------------------|" + - "L1.97[42,232] 1.05us |-----------------------------------------L1.97------------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 101mb total:" - "L1 " - "L1.?[42,99] 1.05us 30mb |----------L1.?-----------| " - "L1.?[100,199] 1.05us 52mb |--------------------L1.?--------------------| " - "L1.?[200,232] 1.05us 18mb |----L1.?-----| " - - "**** Simulation run 27, type=split(ReduceOverlap)(split_times=[599]). 1 Input Files, 66mb total:" + - "**** Simulation run 26, type=split(ReduceOverlap)(split_times=[599]). 1 Input Files, 66mb total:" - "L1, all files 66mb " - - "L1.107[548,672] 1.05us |-----------------------------------------L1.107-----------------------------------------|" + - "L1.100[548,672] 1.05us |-----------------------------------------L1.100-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 66mb total:" - "L1 " - "L1.?[548,599] 1.05us 27mb|---------------L1.?----------------| " - "L1.?[600,672] 1.05us 39mb |-----------------------L1.?-----------------------| " - - "**** Simulation run 28, type=split(ReduceOverlap)(split_times=[899]). 1 Input Files, 67mb total:" + - "**** Simulation run 27, type=split(ReduceOverlap)(split_times=[899]). 1 Input Files, 67mb total:" - "L1, all files 67mb " - - "L1.109[862,986] 1.05us |-----------------------------------------L1.109-----------------------------------------|" + - "L1.102[862,986] 1.05us |-----------------------------------------L1.102-----------------------------------------|" - "**** 2 Output Files (parquet_file_id not yet assigned), 67mb total:" - "L1 " - "L1.?[862,899] 1.05us 20mb|----------L1.?----------| " - "L1.?[900,986] 1.05us 47mb |----------------------------L1.?----------------------------| " - - "**** Simulation run 29, type=split(ReduceOverlap)(split_times=[699, 799]). 1 Input Files, 100mb total:" + - "**** Simulation run 28, type=split(ReduceOverlap)(split_times=[699, 799]). 1 Input Files, 100mb total:" - "L1, all files 100mb " - - "L1.108[673,861] 1.05us |-----------------------------------------L1.108-----------------------------------------|" + - "L1.101[673,861] 1.05us |-----------------------------------------L1.101-----------------------------------------|" - "**** 3 Output Files (parquet_file_id not yet assigned), 100mb total:" - "L1 " - "L1.?[673,699] 1.05us 14mb|---L1.?---| " - "L1.?[700,799] 1.05us 53mb |--------------------L1.?---------------------| " - "L1.?[800,861] 1.05us 34mb |-----------L1.?------------| " - "Committing partition 1:" - - " Soft Deleting 6 files: L1.104, L1.105, L1.106, L1.107, L1.108, L1.109" + - " Soft Deleting 6 files: L1.97, L1.98, L1.99, L1.100, L1.101, L1.102" - " Creating 15 files" - - "**** Simulation run 30, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[71, 142]). 4 Input Files, 283mb total:" + - "**** Simulation run 29, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[71, 142]). 4 Input Files, 283mb total:" - "L1 " - - "L1.115[42,99] 1.05us 30mb |--------L1.115---------| " - - "L1.116[100,199] 1.05us 52mb |------------------L1.116------------------| " + - "L1.108[42,99] 1.05us 30mb |--------L1.108---------| " + - "L1.109[100,199] 1.05us 52mb |------------------L1.109------------------| " - "L2 " - "L2.1[0,99] 99ns 100mb |-------------------L2.1-------------------| " - "L2.2[100,199] 199ns 100mb |-------------------L2.2-------------------| " @@ -907,13 +895,13 @@ async fn random_backfill_over_l2s() { - "L2.?[72,142] 1.05us 99mb |------------L2.?-------------| " - "L2.?[143,199] 1.05us 82mb |---------L2.?----------| " - "Committing partition 1:" - - " Soft Deleting 4 files: L2.1, L2.2, L1.115, L1.116" + - " Soft Deleting 4 files: L2.1, L2.2, L1.108, L1.109" - " Creating 3 files" - - "**** Simulation run 31, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[271, 342]). 5 Input Files, 284mb total:" + - "**** Simulation run 30, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[271, 342]). 5 Input Files, 284mb total:" - "L1 " - - "L1.117[200,232] 1.05us 18mb|---L1.117---| " - - "L1.113[233,299] 1.05us 35mb |----------L1.113-----------| " - - "L1.114[300,357] 1.05us 31mb |--------L1.114---------| " + - "L1.110[200,232] 1.05us 18mb|---L1.110---| " + - "L1.106[233,299] 1.05us 35mb |----------L1.106-----------| " + - "L1.107[300,357] 1.05us 31mb |--------L1.107---------| " - "L2 " - "L2.3[200,299] 299ns 100mb|-------------------L2.3-------------------| " - "L2.4[300,399] 399ns 100mb |-------------------L2.4-------------------| " @@ -923,14 +911,14 @@ async fn random_backfill_over_l2s() { - "L2.?[272,342] 1.05us 100mb |------------L2.?-------------| " - "L2.?[343,399] 1.05us 83mb |---------L2.?----------| " - "Committing partition 1:" - - " Soft Deleting 5 files: L2.3, L2.4, L1.113, L1.114, L1.117" + - " Soft Deleting 5 files: L2.3, L2.4, L1.106, L1.107, L1.110" - " Creating 3 files" - - "**** Simulation run 32, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[404, 465]). 4 Input Files, 257mb total:" + - "**** Simulation run 31, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[404, 465]). 4 Input Files, 257mb total:" - "L1 " - - "L1.110[358,399] 1.05us 22mb |-------L1.110--------| " - - "L1.111[400,499] 1.05us 52mb |------------------------L1.111-------------------------| " + - "L1.103[358,399] 1.05us 22mb |-------L1.103--------| " + - "L1.104[400,499] 1.05us 52mb |------------------------L1.104-------------------------| " - "L2 " - - "L2.130[343,399] 1.05us 83mb|------------L2.130------------| " + - "L2.123[343,399] 1.05us 83mb|------------L2.123------------| " - "L2.5[400,499] 499ns 100mb |-------------------------L2.5--------------------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 257mb total:" - "L2 " @@ -938,13 +926,13 @@ async fn random_backfill_over_l2s() { - "L2.?[405,465] 1.05us 99mb |--------------L2.?--------------| " - "L2.?[466,499] 1.05us 58mb |------L2.?-------| " - "Committing partition 1:" - - " Soft Deleting 4 files: L2.5, L1.110, L1.111, L2.130" + - " Soft Deleting 4 files: L2.5, L1.103, L1.104, L2.123" - " Creating 3 files" - - "**** Simulation run 33, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[569, 638]). 5 Input Files, 292mb total:" + - "**** Simulation run 32, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[569, 638]). 5 Input Files, 292mb total:" - "L1 " - - "L1.112[500,547] 1.05us 26mb|------L1.112-------| " - - "L1.118[548,599] 1.05us 27mb |-------L1.118--------| " - - "L1.119[600,672] 1.05us 39mb |------------L1.119------------| " + - "L1.105[500,547] 1.05us 26mb|------L1.105-------| " + - "L1.111[548,599] 1.05us 27mb |-------L1.111--------| " + - "L1.112[600,672] 1.05us 39mb |------------L1.112------------| " - "L2 " - "L2.6[500,599] 599ns 100mb|-------------------L2.6-------------------| " - "L2.7[600,699] 699ns 100mb |-------------------L2.7-------------------| " @@ -954,14 +942,14 @@ async fn random_backfill_over_l2s() { - "L2.?[570,638] 1.05us 100mb |------------L2.?------------| " - "L2.?[639,699] 1.05us 91mb |----------L2.?-----------| " - "Committing partition 1:" - - " Soft Deleting 5 files: L2.6, L2.7, L1.112, L1.118, L1.119" + - " Soft Deleting 5 files: L2.6, L2.7, L1.105, L1.111, L1.112" - " Creating 3 files" - - "**** Simulation run 34, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[702, 765]). 4 Input Files, 258mb total:" + - "**** Simulation run 33, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[702, 765]). 4 Input Files, 258mb total:" - "L1 " - - "L1.122[673,699] 1.05us 14mb |---L1.122---| " - - "L1.123[700,799] 1.05us 53mb |-----------------------L1.123------------------------| " + - "L1.115[673,699] 1.05us 14mb |---L1.115---| " + - "L1.116[700,799] 1.05us 53mb |-----------------------L1.116------------------------| " - "L2 " - - "L2.136[639,699] 1.05us 91mb|------------L2.136-------------| " + - "L2.129[639,699] 1.05us 91mb|------------L2.129-------------| " - "L2.8[700,799] 799ns 100mb |------------------------L2.8-------------------------| " - "**** 3 Output Files (parquet_file_id not yet assigned), 258mb total:" - "L2 " @@ -969,12 +957,12 @@ async fn random_backfill_over_l2s() { - "L2.?[703,765] 1.05us 100mb |--------------L2.?--------------| " - "L2.?[766,799] 1.05us 56mb |------L2.?------| " - "Committing partition 1:" - - " Soft Deleting 4 files: L2.8, L1.122, L1.123, L2.136" + - " Soft Deleting 4 files: L2.8, L1.115, L1.116, L2.129" - " Creating 3 files" - - "**** Simulation run 35, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[865]). 3 Input Files, 154mb total:" + - "**** Simulation run 34, type=split(CompactAndSplitOutput(FoundSubsetLessThanMaxCompactSize))(split_times=[865]). 3 Input Files, 154mb total:" - "L1 " - - "L1.124[800,861] 1.05us 34mb|-----------------------L1.124------------------------| " - - "L1.120[862,899] 1.05us 20mb |------------L1.120-------------| " + - "L1.117[800,861] 1.05us 34mb|-----------------------L1.117------------------------| " + - "L1.113[862,899] 1.05us 20mb |------------L1.113-------------| " - "L2 " - "L2.9[800,899] 899ns 100mb|-----------------------------------------L2.9------------------------------------------| " - "**** 2 Output Files (parquet_file_id not yet assigned), 154mb total:" @@ -982,28 +970,28 @@ async fn random_backfill_over_l2s() { - "L2.?[800,865] 1.05us 101mb|--------------------------L2.?---------------------------| " - "L2.?[866,899] 1.05us 53mb |-----------L2.?------------| " - "Committing partition 1:" - - " Soft Deleting 3 files: L2.9, L1.120, L1.124" + - " Soft Deleting 3 files: L2.9, L1.113, L1.117" - " Creating 2 files" - - "**** Final Output Files (4.58gb written)" + - "**** Final Output Files (4.06gb written)" - "L1 " - - "L1.121[900,986] 1.05us 47mb |L1.121| " + - "L1.114[900,986] 1.05us 47mb |L1.114| " - "L2 " - "L2.10[900,999] 999ns 100mb |L2.10-| " - - "L2.125[0,71] 1.05us 101mb|L2.125| " - - "L2.126[72,142] 1.05us 99mb |L2.126| " - - "L2.127[143,199] 1.05us 82mb |L2.127| " - - "L2.128[200,271] 1.05us 101mb |L2.128| " - - "L2.129[272,342] 1.05us 100mb |L2.129| " - - "L2.131[343,404] 1.05us 100mb |L2.131| " - - "L2.132[405,465] 1.05us 99mb |L2.132| " - - "L2.133[466,499] 1.05us 58mb |L2.133| " - - "L2.134[500,569] 1.05us 101mb |L2.134| " - - "L2.135[570,638] 1.05us 100mb |L2.135| " - - "L2.137[639,702] 1.05us 101mb |L2.137| " - - "L2.138[703,765] 1.05us 100mb |L2.138| " - - "L2.139[766,799] 1.05us 56mb |L2.139| " - - "L2.140[800,865] 1.05us 101mb |L2.140| " - - "L2.141[866,899] 1.05us 53mb |L2.141| " + - "L2.118[0,71] 1.05us 101mb|L2.118| " + - "L2.119[72,142] 1.05us 99mb |L2.119| " + - "L2.120[143,199] 1.05us 82mb |L2.120| " + - "L2.121[200,271] 1.05us 101mb |L2.121| " + - "L2.122[272,342] 1.05us 100mb |L2.122| " + - "L2.124[343,404] 1.05us 100mb |L2.124| " + - "L2.125[405,465] 1.05us 99mb |L2.125| " + - "L2.126[466,499] 1.05us 58mb |L2.126| " + - "L2.127[500,569] 1.05us 101mb |L2.127| " + - "L2.128[570,638] 1.05us 100mb |L2.128| " + - "L2.130[639,702] 1.05us 101mb |L2.130| " + - "L2.131[703,765] 1.05us 100mb |L2.131| " + - "L2.132[766,799] 1.05us 56mb |L2.132| " + - "L2.133[800,865] 1.05us 101mb |L2.133| " + - "L2.134[866,899] 1.05us 53mb |L2.134| " "### ); } @@ -3020,63 +3008,66 @@ async fn actual_case_from_catalog_1() { - "WARNING: file L0.161[327,333] 336ns 183mb exceeds soft limit 100mb by more than 50%" - "WARNING: file L0.162[330,338] 340ns 231mb exceeds soft limit 100mb by more than 50%" - "WARNING: file L0.163[331,338] 341ns 232mb exceeds soft limit 100mb by more than 50%" - - "**** Final Output Files (17.64gb written)" + - "**** Final Output Files (15.47gb written)" - "L2 " - - "L2.578[134,149] 342ns 202mb |L2.578| " - - "L2.579[150,165] 342ns 218mb |L2.579| " - - "L2.580[166,176] 342ns 186mb |L2.580| " - - "L2.581[177,182] 342ns 150mb |L2.581| " - - "L2.582[183,197] 342ns 267mb |L2.582| " - - "L2.583[198,207] 342ns 157mb |L2.583| " - - "L2.584[208,220] 342ns 147mb |L2.584| " - - "L2.585[221,232] 342ns 270mb |L2.585| " - - "L2.588[233,253] 342ns 286mb |L2.588| " - - "L2.589[254,270] 342ns 289mb |L2.589| " - - "L2.590[271,281] 342ns 225mb |L2.590| " - - "L2.591[282,296] 342ns 234mb |L2.591| " - - "L2.592[297,302] 342ns 232mb |L2.592| " - - "L2.593[303,308] 342ns 244mb |L2.593| " - - "L2.594[309,314] 342ns 282mb |L2.594|" - - "L2.595[315,317] 342ns 214mb |L2.595|" - - "L2.596[318,320] 342ns 222mb |L2.596|" - - "L2.597[321,323] 342ns 146mb |L2.597|" - - "L2.598[324,326] 342ns 254mb |L2.598|" - - "L2.599[327,329] 342ns 197mb |L2.599|" - - "L2.600[330,332] 342ns 228mb |L2.600|" - - "L2.601[333,335] 342ns 199mb |L2.601|" - - "L2.602[336,338] 342ns 280mb |L2.602|" - - "L2.850[1,26] 342ns 101mb |L2.850| " - - "L2.853[69,85] 342ns 104mb |L2.853| " - - "L2.854[86,98] 342ns 107mb |L2.854| " - - "L2.861[27,48] 342ns 103mb |L2.861| " - - "L2.862[49,68] 342ns 98mb |L2.862| " - - "L2.863[99,108] 342ns 102mb |L2.863| " - - "L2.864[109,117] 342ns 91mb |L2.864| " - - "L2.865[118,124] 342ns 91mb |L2.865| " - - "L2.866[125,130] 342ns 107mb |L2.866| " - - "L2.867[131,133] 342ns 64mb |L2.867| " - - "L2.868[339,339] 342ns 25mb |L2.868|" - - "WARNING: file L2.578[134,149] 342ns 202mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.579[150,165] 342ns 218mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.580[166,176] 342ns 186mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.581[177,182] 342ns 150mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.582[183,197] 342ns 267mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.583[198,207] 342ns 157mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.585[221,232] 342ns 270mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.588[233,253] 342ns 286mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.589[254,270] 342ns 289mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.590[271,281] 342ns 225mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.591[282,296] 342ns 234mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.592[297,302] 342ns 232mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.593[303,308] 342ns 244mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.594[309,314] 342ns 282mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.595[315,317] 342ns 214mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.596[318,320] 342ns 222mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.598[324,326] 342ns 254mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.599[327,329] 342ns 197mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.600[330,332] 342ns 228mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.601[333,335] 342ns 199mb exceeds soft limit 100mb by more than 50%" - - "WARNING: file L2.602[336,338] 342ns 280mb exceeds soft limit 100mb by more than 50%" + - "L2.594[150,165] 342ns 218mb |L2.594| " + - "L2.595[166,171] 342ns 118mb |L2.595| " + - "L2.598[183,197] 342ns 267mb |L2.598| " + - "L2.599[198,207] 342ns 157mb |L2.599| " + - "L2.600[208,220] 342ns 147mb |L2.600| " + - "L2.601[221,232] 342ns 270mb |L2.601| " + - "L2.602[233,244] 342ns 147mb |L2.602| " + - "L2.603[245,253] 342ns 139mb |L2.603| " + - "L2.604[271,276] 342ns 117mb |L2.604| " + - "L2.605[277,281] 342ns 109mb |L2.605| " + - "L2.612[254,261] 342ns 105mb |L2.612| " + - "L2.613[262,270] 342ns 184mb |L2.613| " + - "L2.616[309,311] 342ns 101mb |L2.616|" + - "L2.617[312,314] 342ns 181mb |L2.617|" + - "L2.618[315,317] 342ns 214mb |L2.618|" + - "L2.619[318,320] 342ns 222mb |L2.619|" + - "L2.620[321,323] 342ns 146mb |L2.620|" + - "L2.621[324,326] 342ns 254mb |L2.621|" + - "L2.622[327,329] 342ns 197mb |L2.622|" + - "L2.623[330,332] 342ns 228mb |L2.623|" + - "L2.624[333,335] 342ns 199mb |L2.624|" + - "L2.625[336,337] 342ns 156mb |L2.625|" + - "L2.626[338,338] 342ns 124mb |L2.626|" + - "L2.628[1,36] 342ns 103mb |L2.628-| " + - "L2.629[37,71] 342ns 103mb |L2.629-| " + - "L2.630[72,83] 342ns 103mb |L2.630| " + - "L2.638[172,177] 342ns 109mb |L2.638| " + - "L2.639[178,182] 342ns 109mb |L2.639| " + - "L2.640[282,288] 342ns 100mb |L2.640| " + - "L2.643[300,303] 342ns 110mb |L2.643| " + - "L2.646[84,94] 342ns 107mb |L2.646| " + - "L2.647[95,104] 342ns 97mb |L2.647| " + - "L2.648[105,111] 342ns 86mb |L2.648| " + - "L2.649[112,119] 342ns 114mb |L2.649| " + - "L2.650[120,126] 342ns 98mb |L2.650| " + - "L2.651[127,130] 342ns 82mb |L2.651| " + - "L2.652[131,138] 342ns 108mb |L2.652| " + - "L2.653[139,145] 342ns 93mb |L2.653| " + - "L2.654[146,149] 342ns 77mb |L2.654| " + - "L2.655[289,293] 342ns 110mb |L2.655| " + - "L2.656[294,297] 342ns 82mb |L2.656| " + - "L2.657[298,299] 342ns 82mb |L2.657| " + - "L2.658[304,306] 342ns 113mb |L2.658| " + - "L2.659[307,308] 342ns 113mb |L2.659| " + - "L2.660[339,339] 342ns 25mb |L2.660|" + - "WARNING: file L2.594[150,165] 342ns 218mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.598[183,197] 342ns 267mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.599[198,207] 342ns 157mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.601[221,232] 342ns 270mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.613[262,270] 342ns 184mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.617[312,314] 342ns 181mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.618[315,317] 342ns 214mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.619[318,320] 342ns 222mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.621[324,326] 342ns 254mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.622[327,329] 342ns 197mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.623[330,332] 342ns 228mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.624[333,335] 342ns 199mb exceeds soft limit 100mb by more than 50%" + - "WARNING: file L2.625[336,337] 342ns 156mb exceeds soft limit 100mb by more than 50%" "### ); } diff --git a/compactor/tests/layouts/many_files.rs b/compactor/tests/layouts/many_files.rs index 1b78518077..47f724fcd5 100644 --- a/compactor/tests/layouts/many_files.rs +++ b/compactor/tests/layouts/many_files.rs @@ -4670,17 +4670,17 @@ async fn l0s_almost_needing_vertical_split() { - "L0.998[24,100] 1.02us |-----------------------------------------L0.998-----------------------------------------|" - "L0.999[24,100] 1.02us |-----------------------------------------L0.999-----------------------------------------|" - "L0.1000[24,100] 1.02us |----------------------------------------L0.1000-----------------------------------------|" - - "**** Final Output Files (6.5gb written)" + - "**** Final Output Files (5.23gb written)" - "L2 " - - "L2.3141[24,37] 1.02us 108mb|---L2.3141---| " - - "L2.3150[38,49] 1.02us 102mb |--L2.3150--| " - - "L2.3151[50,60] 1.02us 93mb |-L2.3151-| " - - "L2.3152[61,63] 1.02us 37mb |L2.3152| " - - "L2.3153[64,73] 1.02us 101mb |L2.3153-| " - - "L2.3154[74,82] 1.02us 90mb |L2.3154| " - - "L2.3155[83,90] 1.02us 101mb |L2.3155| " - - "L2.3156[91,98] 1.02us 93mb |L2.3156| " - - "L2.3157[99,100] 1.02us 26mb |L2.3157|" + - "L2.3086[24,35] 1.02us 102mb|--L2.3086--| " + - "L2.3095[36,47] 1.02us 105mb |--L2.3095--| " + - "L2.3096[48,58] 1.02us 95mb |-L2.3096-| " + - "L2.3097[59,65] 1.02us 76mb |L2.3097| " + - "L2.3098[66,76] 1.02us 106mb |-L2.3098-| " + - "L2.3099[77,86] 1.02us 96mb |L2.3099-| " + - "L2.3100[87,90] 1.02us 53mb |L2.3100| " + - "L2.3101[91,98] 1.02us 90mb |L2.3101| " + - "L2.3102[99,100] 1.02us 26mb |L2.3102|" "### ); } diff --git a/compactor/tests/layouts/stuck.rs b/compactor/tests/layouts/stuck.rs index ff81c28800..c9e7996c0c 100644 --- a/compactor/tests/layouts/stuck.rs +++ b/compactor/tests/layouts/stuck.rs @@ -1111,81 +1111,78 @@ async fn stuck_l0() { - "L2.59[1686863759000000000,1686867839000000000] 1686928811.43s 96mb |--L2.59--| " - "L2.74[1686867899000000000,1686868319000000000] 1686928811.43s 14mb |L2.74| " - "L2.78[1686868379000000000,1686873599000000000] 1686928118.43s 39mb |---L2.78----| " - - "**** Final Output Files (51.79gb written)" - - "L1 " - - "L1.1998[1686873236374845980,1686873480877117666] 1686936871.55s 48mb |L1.1998|" - - "L1.1999[1686873480877117667,1686873599000000000] 1686936871.55s 23mb |L1.1999|" + - "**** Final Output Files (43.43gb written)" - "L2 " - - "L2.1842[1686841379000000000,1686842394885830950] 1686936871.55s 100mb|L2.1842| " - - "L2.1843[1686842394885830951,1686842786390926609] 1686936871.55s 39mb |L2.1843| " - - "L2.1844[1686842786390926610,1686843316416262506] 1686936871.55s 100mb |L2.1844| " - - "L2.1849[1686843316416262507,1686843684225377382] 1686936871.55s 100mb |L2.1849| " - - "L2.1850[1686843684225377383,1686844052034492257] 1686936871.55s 100mb |L2.1850| " - - "L2.1851[1686844052034492258,1686844193781853218] 1686936871.55s 39mb |L2.1851| " - - "L2.1852[1686844193781853219,1686844848044943955] 1686936871.55s 100mb |L2.1852| " - - "L2.1857[1686844848044943956,1686845161518311556] 1686936871.55s 100mb |L2.1857| " - - "L2.1858[1686845161518311557,1686845474991679156] 1686936871.55s 100mb |L2.1858| " - - "L2.1859[1686845474991679157,1686845579000000000] 1686936871.55s 33mb |L2.1859| " - - "L2.1867[1686845579000000001,1686846586992441776] 1686936871.55s 100mb |L2.1867| " - - "L2.1872[1686846586992441777,1686847213091266628] 1686936871.55s 100mb |L2.1872| " - - "L2.1873[1686847213091266629,1686847594984883551] 1686936871.55s 61mb |L2.1873| " - - "L2.1874[1686847594984883552,1686847967133303901] 1686936871.55s 100mb |L2.1874| " - - "L2.1875[1686847967133303902,1686848339281724250] 1686936871.55s 100mb |L2.1875| " - - "L2.1876[1686848339281724251,1686848602977306099] 1686936871.55s 71mb |L2.1876| " - - "L2.1877[1686848602977306100,1686849027096192900] 1686936871.55s 100mb |L2.1877| " - - "L2.1878[1686849027096192901,1686849451215079700] 1686936871.55s 100mb |L2.1878| " - - "L2.1879[1686849451215079701,1686849779000000000] 1686936871.55s 77mb |L2.1879| " - - "L2.1887[1686849779000000001,1686850474198233689] 1686936871.55s 100mb |L2.1887| " - - "L2.1888[1686850474198233690,1686851169396467377] 1686936871.55s 100mb |L2.1888| " - - "L2.1889[1686851169396467378,1686851644365067698] 1686936871.55s 68mb |L2.1889| " - - "L2.1890[1686851644365067699,1686852420659097557] 1686936871.55s 100mb |L2.1890| " - - "L2.1895[1686852420659097558,1686852801563322990] 1686936871.55s 100mb |L2.1895| " - - "L2.1896[1686852801563322991,1686853182467548422] 1686936871.55s 100mb |L2.1896| " - - "L2.1897[1686853182467548423,1686853509730135395] 1686936871.55s 86mb |L2.1897| " - - "L2.1898[1686853509730135396,1686853969686755621] 1686936871.55s 100mb |L2.1898| " - - "L2.1899[1686853969686755622,1686854429643375846] 1686936871.55s 100mb |L2.1899| " - - "L2.1900[1686854429643375847,1686854819000000000] 1686936871.55s 85mb |L2.1900| " - - "L2.1908[1686854819000000001,1686855825796452886] 1686936871.55s 100mb |L2.1908| " - - "L2.1913[1686855825796452887,1686856447395293366] 1686936871.55s 100mb |L2.1913| " - - "L2.1914[1686856447395293367,1686856832592905771] 1686936871.55s 62mb |L2.1914| " - - "L2.1915[1686856832592905772,1686857205112223577] 1686936871.55s 100mb |L2.1915| " - - "L2.1916[1686857205112223578,1686857577631541382] 1686936871.55s 100mb |L2.1916| " - - "L2.1917[1686857577631541383,1686857839389339451] 1686936871.55s 70mb |L2.1917| " - - "L2.1918[1686857839389339452,1686858253462843732] 1686936871.55s 100mb |L2.1918| " - - "L2.1919[1686858253462843733,1686858667536348012] 1686936871.55s 100mb |L2.1919| " - - "L2.1920[1686858667536348013,1686859019000000000] 1686936871.55s 85mb |L2.1920| " - - "L2.1928[1686859019000000001,1686859728979165803] 1686936871.55s 100mb |L2.1928| " - - "L2.1929[1686859728979165804,1686860438958331605] 1686936871.55s 100mb |L2.1929| " - - "L2.1931[1686860438958331606,1686860836834049919] 1686936871.55s 100mb |L2.1931| " - - "L2.1932[1686860836834049920,1686861148937490634] 1686936871.55s 78mb |L2.1932| " - - "L2.1933[1686861148937490635,1686861914356348643] 1686936871.55s 100mb |L2.1933| " - - "L2.1938[1686861914356348644,1686862293494638783] 1686936871.55s 100mb |L2.1938| " - - "L2.1939[1686862293494638784,1686862672632928922] 1686936871.55s 100mb |L2.1939| " - - "L2.1940[1686862672632928923,1686862679775206651] 1686936871.55s 2mb |L2.1940| " - - "L2.1941[1686862679775206652,1686863077953136510] 1686936871.55s 100mb |L2.1941| " - - "L2.1942[1686863077953136511,1686863476131066368] 1686936871.55s 100mb |L2.1942| " - - "L2.1943[1686863476131066369,1686863699000000000] 1686936871.55s 56mb |L2.1943| " - - "L2.1951[1686863699000000001,1686864349392123893] 1686936871.55s 100mb |L2.1951| " - - "L2.1952[1686864349392123894,1686864999784247785] 1686936871.55s 100mb |L2.1952| " - - "L2.1953[1686864999784247786,1686865607586435465] 1686936871.55s 93mb |L2.1953| " - - "L2.1954[1686865607586435466,1686866298103608885] 1686936871.55s 100mb |L2.1954| " - - "L2.1959[1686866298103608886,1686866809270889026] 1686936871.55s 100mb |L2.1959| " - - "L2.1964[1686866809270889027,1686867189314140837] 1686936871.55s 100mb |L2.1964| " - - "L2.1965[1686867189314140838,1686867569357392647] 1686936871.55s 100mb |L2.1965| " - - "L2.1966[1686867569357392648,1686867839000000000] 1686936871.55s 71mb |L2.1966| " - - "L2.1974[1686867839000000001,1686868754092704252] 1686936871.55s 100mb |L2.1974| " - - "L2.1975[1686868754092704253,1686869669185408503] 1686936871.55s 100mb |L2.1975| " - - "L2.1976[1686869669185408504,1686869927156879962] 1686936871.55s 28mb |L2.1976| " - - "L2.1984[1686869927156879963,1686870583552763572] 1686936871.55s 100mb |L2.1984| " - - "L2.1989[1686870583552763573,1686870997054710512] 1686936871.55s 100mb |L2.1989|" - - "L2.1990[1686870997054710513,1686871410556657451] 1686936871.55s 100mb |L2.1990|" - - "L2.1992[1686871410556657452,1686871702557232840] 1686936871.55s 100mb |L2.1992|" - - "L2.1993[1686871702557232841,1686871994557808228] 1686936871.55s 100mb |L2.1993|" - - "L2.1994[1686871994557808229,1686872015313759923] 1686936871.55s 7mb |L2.1994|" - - "L2.1997[1686873480877117667,1686873599000000000] 1686936871.55s 16mb |L2.1997|" - - "L2.2002[1686872015313759924,1686872503834880725] 1686936871.55s 100mb |L2.2002|" - - "L2.2003[1686872503834880726,1686872992356001526] 1686936871.55s 100mb |L2.2003|" - - "L2.2004[1686872992356001527,1686873480877117666] 1686936871.55s 100mb |L2.2004|" + - "L2.1463[1686841379000000000,1686842332558996896] 1686936871.55s 100mb|L2.1463| " + - "L2.1468[1686842332558996897,1686842963082388201] 1686936871.55s 100mb |L2.1468| " + - "L2.1469[1686842963082388202,1686843593605779505] 1686936871.55s 100mb |L2.1469| " + - "L2.1471[1686843593605779506,1686844269803455042] 1686936871.55s 100mb |L2.1471| " + - "L2.1476[1686844269803455043,1686844592758068181] 1686936871.55s 100mb |L2.1476| " + - "L2.1477[1686844592758068182,1686844915712681319] 1686936871.55s 100mb |L2.1477| " + - "L2.1478[1686844915712681320,1686844946001130578] 1686936871.55s 9mb |L2.1478| " + - "L2.1479[1686844946001130579,1686845261138897644] 1686936871.55s 100mb |L2.1479| " + - "L2.1480[1686845261138897645,1686845576276664709] 1686936871.55s 100mb |L2.1480| " + - "L2.1481[1686845576276664710,1686845579000000000] 1686936871.55s 885kb |L2.1481| " + - "L2.1489[1686845579000000001,1686846612945515506] 1686936871.55s 100mb |L2.1489| " + - "L2.1494[1686846612945515507,1686847302242526939] 1686936871.55s 100mb |L2.1494| " + - "L2.1499[1686847302242526940,1686847769313756192] 1686936871.55s 100mb |L2.1499| " + - "L2.1500[1686847769313756193,1686848236384985444] 1686936871.55s 100mb |L2.1500| " + - "L2.1502[1686848236384985445,1686848816392993760] 1686936871.55s 100mb |L2.1502| " + - "L2.1507[1686848816392993761,1686849165082054031] 1686936871.55s 100mb |L2.1507| " + - "L2.1508[1686849165082054032,1686849513771114301] 1686936871.55s 100mb |L2.1508| " + - "L2.1509[1686849513771114302,1686849779000000000] 1686936871.55s 76mb |L2.1509| " + - "L2.1510[1686849779000000001,1686850288711664442] 1686936871.55s 100mb |L2.1510| " + - "L2.1511[1686850288711664443,1686850559000000000] 1686936871.55s 53mb |L2.1511| " + - "L2.1519[1686850559000000001,1686851147210677461] 1686936871.55s 100mb |L2.1519| " + - "L2.1520[1686851147210677462,1686851735421354921] 1686936871.55s 100mb |L2.1520| " + - "L2.1521[1686851735421354922,1686852240527466641] 1686936871.55s 86mb |L2.1521| " + - "L2.1522[1686852240527466642,1686852812866488092] 1686936871.55s 100mb |L2.1522| " + - "L2.1523[1686852812866488093,1686853385205509542] 1686936871.55s 100mb |L2.1523| " + - "L2.1525[1686853385205509543,1686853965359592641] 1686936871.55s 100mb |L2.1525| " + - "L2.1530[1686853965359592642,1686854382616966390] 1686936871.55s 100mb |L2.1530| " + - "L2.1531[1686854382616966391,1686854799874340138] 1686936871.55s 100mb |L2.1531| " + - "L2.1532[1686854799874340139,1686854819000000000] 1686936871.55s 5mb |L2.1532| " + - "L2.1540[1686854819000000001,1686855555092837650] 1686936871.55s 100mb |L2.1540| " + - "L2.1541[1686855555092837651,1686856291185675299] 1686936871.55s 100mb |L2.1541| " + - "L2.1542[1686856291185675300,1686856502561319445] 1686936871.55s 29mb |L2.1542| " + - "L2.1543[1686856502561319446,1686857135555597834] 1686936871.55s 100mb |L2.1543| " + - "L2.1548[1686857135555597835,1686857409590080289] 1686936871.55s 100mb |L2.1548| " + - "L2.1549[1686857409590080290,1686857683624562743] 1686936871.55s 100mb |L2.1549| " + - "L2.1550[1686857683624562744,1686857768549876222] 1686936871.55s 31mb |L2.1550| " + - "L2.1551[1686857768549876223,1686858217039101175] 1686936871.55s 100mb |L2.1551| " + - "L2.1552[1686858217039101176,1686858665528326127] 1686936871.55s 100mb |L2.1552| " + - "L2.1554[1686858665528326128,1686859092257365665] 1686936871.55s 100mb |L2.1554| " + - "L2.1555[1686859092257365666,1686859499000000000] 1686936871.55s 95mb |L2.1555| " + - "L2.1563[1686859499000000001,1686860075111679382] 1686936871.55s 100mb |L2.1563| " + - "L2.1564[1686860075111679383,1686860651223358763] 1686936871.55s 100mb |L2.1564| " + - "L2.1565[1686860651223358764,1686861094471633615] 1686936871.55s 77mb |L2.1565| " + - "L2.1573[1686861094471633616,1686861597923995018] 1686936871.55s 100mb |L2.1573| " + - "L2.1574[1686861597923995019,1686862101376356420] 1686936871.55s 100mb |L2.1574| " + - "L2.1582[1686862101376356421,1686862532038334079] 1686936871.55s 100mb |L2.1582| " + - "L2.1583[1686862532038334080,1686862962700311737] 1686936871.55s 100mb |L2.1583| " + - "L2.1584[1686862962700311738,1686863391646317185] 1686936871.55s 100mb |L2.1584| " + - "L2.1592[1686863391646317186,1686864106500589152] 1686936871.55s 100mb |L2.1592| " + - "L2.1593[1686864106500589153,1686864821354861118] 1686936871.55s 100mb |L2.1593| " + - "L2.1594[1686864821354861119,1686865116328768791] 1686936871.55s 41mb |L2.1594| " + - "L2.1595[1686865116328768792,1686865695735321041] 1686936871.55s 100mb |L2.1595| " + - "L2.1596[1686865695735321042,1686866275141873290] 1686936871.55s 100mb |L2.1596| " + - "L2.1598[1686866275141873291,1686866862140786963] 1686936871.55s 100mb |L2.1598| " + - "L2.1603[1686866862140786964,1686867205172843469] 1686936871.55s 100mb |L2.1603| " + - "L2.1604[1686867205172843470,1686867548204899974] 1686936871.55s 100mb |L2.1604| " + - "L2.1605[1686867548204899975,1686867839000000000] 1686936871.55s 85mb |L2.1605| " + - "L2.1613[1686867839000000001,1686869156057291877] 1686936871.55s 100mb |L2.1613| " + - "L2.1618[1686869156057291878,1686869793900296627] 1686936871.55s 100mb |L2.1618| " + - "L2.1619[1686869793900296628,1686870431743301376] 1686936871.55s 100mb |L2.1619| " + - "L2.1620[1686870431743301377,1686870473114583753] 1686936871.55s 6mb |L2.1620|" + - "L2.1628[1686870473114583754,1686870911305888930] 1686936871.55s 100mb |L2.1628|" + - "L2.1629[1686870911305888931,1686871349497194106] 1686936871.55s 100mb |L2.1629|" + - "L2.1638[1686871349497194107,1686871921260674819] 1686936871.55s 100mb |L2.1638|" + - "L2.1643[1686871921260674820,1686872342671651604] 1686936871.55s 100mb |L2.1643|" + - "L2.1648[1686872342671651605,1686872646602704048] 1686936871.55s 100mb |L2.1648|" + - "L2.1649[1686872646602704049,1686872950533756491] 1686936871.55s 100mb |L2.1649|" + - "L2.1650[1686872950533756492,1686873064787630789] 1686936871.55s 38mb |L2.1650|" + - "L2.1651[1686873064787630790,1686873383892289532] 1686936871.55s 100mb |L2.1651|" + - "L2.1652[1686873383892289533,1686873599000000000] 1686936871.55s 67mb |L2.1652|" "### ); } @@ -1702,31 +1699,32 @@ async fn stuck_l0_large_l0s() { - "L0.198[197,1970000] 197ns 10b|----------------------------------------L0.198-----------------------------------------| " - "L0.199[198,1980000] 198ns 10b|----------------------------------------L0.199-----------------------------------------| " - "L0.200[199,1990000] 199ns 10b|----------------------------------------L0.200-----------------------------------------| " - - "**** Final Output Files (14.48gb written)" - - "L1 " - - "L1.3770[1752,1811] 199ns 62mb|L1.3770| " + - "**** Final Output Files (11.25gb written)" - "L2 " - - "L2.3711[1812,1990000] 199ns 190mb|----------------------------------------L2.3711----------------------------------------| " - - "L2.3743[1,102] 199ns 100mb|L2.3743| " - - "L2.3744[103,203] 199ns 100mb|L2.3744| " - - "L2.3745[204,305] 199ns 101mb|L2.3745| " - - "L2.3763[1165,1255] 199ns 101mb|L2.3763| " - - "L2.3771[306,407] 199ns 101mb|L2.3771| " - - "L2.3772[408,508] 199ns 100mb|L2.3772| " - - "L2.3773[509,606] 199ns 99mb|L2.3773| " - - "L2.3774[607,711] 199ns 101mb|L2.3774| " - - "L2.3775[712,815] 199ns 100mb|L2.3775| " - - "L2.3776[816,904] 199ns 87mb|L2.3776| " - - "L2.3777[905,1011] 199ns 101mb|L2.3777| " - - "L2.3778[1012,1117] 199ns 100mb|L2.3778| " - - "L2.3779[1118,1164] 199ns 46mb|L2.3779| " - - "L2.3780[1256,1350] 199ns 100mb|L2.3780| " - - "L2.3781[1351,1444] 199ns 99mb|L2.3781| " - - "L2.3782[1445,1480] 199ns 40mb|L2.3782| " - - "L2.3783[1481,1580] 199ns 100mb|L2.3783| " - - "L2.3784[1581,1679] 199ns 99mb|L2.3784| " - - "L2.3785[1680,1751] 199ns 74mb|L2.3785| " - - "WARNING: file L2.3711[1812,1990000] 199ns 190mb exceeds soft limit 100mb by more than 50%" + - "L2.3439[1812,2716] 199ns 190mb|L2.3439| " + - "L2.3523[1,102] 199ns 101mb|L2.3523| " + - "L2.3546[103,204] 199ns 101mb|L2.3546| " + - "L2.3547[205,305] 199ns 100mb|L2.3547| " + - "L2.3548[306,361] 199ns 57mb|L2.3548| " + - "L2.3549[362,463] 199ns 101mb|L2.3549| " + - "L2.3550[464,564] 199ns 100mb|L2.3550| " + - "L2.3551[565,619] 199ns 56mb|L2.3551| " + - "L2.3552[620,720] 199ns 101mb|L2.3552| " + - "L2.3553[721,820] 199ns 100mb|L2.3553| " + - "L2.3554[821,875] 199ns 56mb|L2.3554| " + - "L2.3555[876,975] 199ns 100mb|L2.3555| " + - "L2.3556[976,1074] 199ns 99mb|L2.3556| " + - "L2.3557[1075,1138] 199ns 66mb|L2.3557| " + - "L2.3558[1139,1240] 199ns 101mb|L2.3558| " + - "L2.3559[1241,1341] 199ns 100mb|L2.3559| " + - "L2.3560[1342,1396] 199ns 56mb|L2.3560| " + - "L2.3561[1397,1497] 199ns 101mb|L2.3561| " + - "L2.3562[1498,1597] 199ns 100mb|L2.3562| " + - "L2.3563[1598,1652] 199ns 56mb|L2.3563| " + - "L2.3564[1653,1751] 199ns 101mb|L2.3564| " + - "L2.3565[1752,1811] 199ns 62mb|L2.3565| " + - "L2.3566[2717,1990000] 199ns 2kb|----------------------------------------L2.3566----------------------------------------| " + - "WARNING: file L2.3439[1812,2716] 199ns 190mb exceeds soft limit 100mb by more than 50%" "### ); } @@ -3027,17 +3025,805 @@ async fn split_precent_loop() { - "L1.3[1676005158277000000,1676010156669000000] 1676010160.05s 58mb |L1.3| " - "WARNING: file L0.40[1676020762355000000,1676036230752000000] 1676036233.84s 159mb exceeds soft limit 100mb by more than 50%" - "WARNING: file L0.43[1676039845773000000,1676063836202000000] 1676063839.07s 242mb exceeds soft limit 100mb by more than 50%" - - "**** Final Output Files (4.17gb written)" + - "**** Final Output Files (3.4gb written)" + - "L1 " + - "L1.260[1676045833054395546,1676050409609000000] 1676066475.26s 41mb |L1.260| " + - "L2 " + - "L2.228[1676050409609000001,1676066212011000000] 1676066475.26s 145mb |----L2.228-----| " + - "L2.251[1675987200001000000,1675995209209749739] 1676066475.26s 100mb|L2.251-| " + - "L2.261[1675995209209749740,1676003044683020379] 1676066475.26s 100mb |L2.261| " + - "L2.262[1676003044683020380,1676010880156291018] 1676066475.26s 100mb |L2.262| " + - "L2.263[1676010880156291019,1676018715629412205] 1676066475.26s 100mb |L2.263| " + - "L2.264[1676018715629412206,1676027900853050774] 1676066475.26s 100mb |-L2.264-| " + - "L2.265[1676027900853050775,1676037086076689342] 1676066475.26s 100mb |-L2.265-| " + - "L2.266[1676037086076689343,1676045833054395545] 1676066475.26s 95mb |L2.266-| " + "### + ); +} + +// This is a simplified version of a test generated from actual catalog contents (which was thousands of lines). +// The key attributes are: +// - there are enough bytes of L0 to trigger vertical splitting +// - there are enough L0 files that the individual files are tiny +// - there are lots of L1s that make it a pain to merge down from L0 +// - when the L0s get split, they're split into enough pieces that the algorigthm (pre-fix) would put the L0s back together in a single file. +// The result, prior to the fix motivating this test case, is that the L0s would be vertically split, then regrouped together in a single chain, +// so they get recompacted together, which again prompts the need for vertical splitting, resulting in an unproductive cycle. +#[tokio::test] +async fn very_big_overlapped_backlog() { + test_helpers::maybe_start_logging(); + + let max_files = 20; + let setup = layout_setup_builder() + .await + .with_max_num_files_per_plan(max_files) + .with_max_desired_file_size_bytes(MAX_DESIRED_FILE_SIZE) + .with_partition_timeout(Duration::from_millis(100000)) + .with_suppress_run_output() // remove this to debug + .build() + .await; + + let max_time: i64 = 200000; + let l0_cnt: i64 = 200; + let l0_interval = max_time / l0_cnt; + let l0_size = MAX_DESIRED_FILE_SIZE * 4 / l0_cnt as u64; + let l1_cnt = 100; + let l1_interval = max_time / l1_cnt; + + // Create 100s of overlapping L0s + for i in 0..l0_cnt { + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(i * l0_interval) + .with_max_time((i + 1) * l0_interval) + .with_compaction_level(CompactionLevel::Initial) + .with_max_l0_created_at(Time::from_timestamp_nanos(l1_cnt + i)) + .with_file_size_bytes(l0_size), + ) + .await; + } + + // Create a lot of L1s, on the same time range as the L0s + for i in 0..l1_cnt { + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(i * l1_interval) + .with_max_time((i + 1) * l1_interval - 1) + .with_compaction_level(CompactionLevel::FileNonOverlapped) + .with_max_l0_created_at(Time::from_timestamp_nanos(i)) + .with_file_size_bytes(MAX_DESIRED_FILE_SIZE), + ) + .await; + } + + // Create a lot of L2s, on the same time range as the L0s and L1s + for i in 0..l1_cnt { + setup + .partition + .create_parquet_file( + parquet_builder() + .with_min_time(i * l1_interval) + .with_max_time((i + 1) * l1_interval - 1) + .with_compaction_level(CompactionLevel::Final) + .with_max_l0_created_at(Time::from_timestamp_nanos(i)) + .with_file_size_bytes(MAX_DESIRED_FILE_SIZE), + ) + .await; + } + + insta::assert_yaml_snapshot!( + run_layout_scenario(&setup).await, + @r###" + --- + - "**** Input Files " + - "L0 " + - "L0.1[0,1000] 100ns 2mb |L0.1| " + - "L0.2[1000,2000] 101ns 2mb|L0.2| " + - "L0.3[2000,3000] 102ns 2mb|L0.3| " + - "L0.4[3000,4000] 103ns 2mb |L0.4| " + - "L0.5[4000,5000] 104ns 2mb |L0.5| " + - "L0.6[5000,6000] 105ns 2mb |L0.6| " + - "L0.7[6000,7000] 106ns 2mb |L0.7| " + - "L0.8[7000,8000] 107ns 2mb |L0.8| " + - "L0.9[8000,9000] 108ns 2mb |L0.9| " + - "L0.10[9000,10000] 109ns 2mb |L0.10| " + - "L0.11[10000,11000] 110ns 2mb |L0.11| " + - "L0.12[11000,12000] 111ns 2mb |L0.12| " + - "L0.13[12000,13000] 112ns 2mb |L0.13| " + - "L0.14[13000,14000] 113ns 2mb |L0.14| " + - "L0.15[14000,15000] 114ns 2mb |L0.15| " + - "L0.16[15000,16000] 115ns 2mb |L0.16| " + - "L0.17[16000,17000] 116ns 2mb |L0.17| " + - "L0.18[17000,18000] 117ns 2mb |L0.18| " + - "L0.19[18000,19000] 118ns 2mb |L0.19| " + - "L0.20[19000,20000] 119ns 2mb |L0.20| " + - "L0.21[20000,21000] 120ns 2mb |L0.21| " + - "L0.22[21000,22000] 121ns 2mb |L0.22| " + - "L0.23[22000,23000] 122ns 2mb |L0.23| " + - "L0.24[23000,24000] 123ns 2mb |L0.24| " + - "L0.25[24000,25000] 124ns 2mb |L0.25| " + - "L0.26[25000,26000] 125ns 2mb |L0.26| " + - "L0.27[26000,27000] 126ns 2mb |L0.27| " + - "L0.28[27000,28000] 127ns 2mb |L0.28| " + - "L0.29[28000,29000] 128ns 2mb |L0.29| " + - "L0.30[29000,30000] 129ns 2mb |L0.30| " + - "L0.31[30000,31000] 130ns 2mb |L0.31| " + - "L0.32[31000,32000] 131ns 2mb |L0.32| " + - "L0.33[32000,33000] 132ns 2mb |L0.33| " + - "L0.34[33000,34000] 133ns 2mb |L0.34| " + - "L0.35[34000,35000] 134ns 2mb |L0.35| " + - "L0.36[35000,36000] 135ns 2mb |L0.36| " + - "L0.37[36000,37000] 136ns 2mb |L0.37| " + - "L0.38[37000,38000] 137ns 2mb |L0.38| " + - "L0.39[38000,39000] 138ns 2mb |L0.39| " + - "L0.40[39000,40000] 139ns 2mb |L0.40| " + - "L0.41[40000,41000] 140ns 2mb |L0.41| " + - "L0.42[41000,42000] 141ns 2mb |L0.42| " + - "L0.43[42000,43000] 142ns 2mb |L0.43| " + - "L0.44[43000,44000] 143ns 2mb |L0.44| " + - "L0.45[44000,45000] 144ns 2mb |L0.45| " + - "L0.46[45000,46000] 145ns 2mb |L0.46| " + - "L0.47[46000,47000] 146ns 2mb |L0.47| " + - "L0.48[47000,48000] 147ns 2mb |L0.48| " + - "L0.49[48000,49000] 148ns 2mb |L0.49| " + - "L0.50[49000,50000] 149ns 2mb |L0.50| " + - "L0.51[50000,51000] 150ns 2mb |L0.51| " + - "L0.52[51000,52000] 151ns 2mb |L0.52| " + - "L0.53[52000,53000] 152ns 2mb |L0.53| " + - "L0.54[53000,54000] 153ns 2mb |L0.54| " + - "L0.55[54000,55000] 154ns 2mb |L0.55| " + - "L0.56[55000,56000] 155ns 2mb |L0.56| " + - "L0.57[56000,57000] 156ns 2mb |L0.57| " + - "L0.58[57000,58000] 157ns 2mb |L0.58| " + - "L0.59[58000,59000] 158ns 2mb |L0.59| " + - "L0.60[59000,60000] 159ns 2mb |L0.60| " + - "L0.61[60000,61000] 160ns 2mb |L0.61| " + - "L0.62[61000,62000] 161ns 2mb |L0.62| " + - "L0.63[62000,63000] 162ns 2mb |L0.63| " + - "L0.64[63000,64000] 163ns 2mb |L0.64| " + - "L0.65[64000,65000] 164ns 2mb |L0.65| " + - "L0.66[65000,66000] 165ns 2mb |L0.66| " + - "L0.67[66000,67000] 166ns 2mb |L0.67| " + - "L0.68[67000,68000] 167ns 2mb |L0.68| " + - "L0.69[68000,69000] 168ns 2mb |L0.69| " + - "L0.70[69000,70000] 169ns 2mb |L0.70| " + - "L0.71[70000,71000] 170ns 2mb |L0.71| " + - "L0.72[71000,72000] 171ns 2mb |L0.72| " + - "L0.73[72000,73000] 172ns 2mb |L0.73| " + - "L0.74[73000,74000] 173ns 2mb |L0.74| " + - "L0.75[74000,75000] 174ns 2mb |L0.75| " + - "L0.76[75000,76000] 175ns 2mb |L0.76| " + - "L0.77[76000,77000] 176ns 2mb |L0.77| " + - "L0.78[77000,78000] 177ns 2mb |L0.78| " + - "L0.79[78000,79000] 178ns 2mb |L0.79| " + - "L0.80[79000,80000] 179ns 2mb |L0.80| " + - "L0.81[80000,81000] 180ns 2mb |L0.81| " + - "L0.82[81000,82000] 181ns 2mb |L0.82| " + - "L0.83[82000,83000] 182ns 2mb |L0.83| " + - "L0.84[83000,84000] 183ns 2mb |L0.84| " + - "L0.85[84000,85000] 184ns 2mb |L0.85| " + - "L0.86[85000,86000] 185ns 2mb |L0.86| " + - "L0.87[86000,87000] 186ns 2mb |L0.87| " + - "L0.88[87000,88000] 187ns 2mb |L0.88| " + - "L0.89[88000,89000] 188ns 2mb |L0.89| " + - "L0.90[89000,90000] 189ns 2mb |L0.90| " + - "L0.91[90000,91000] 190ns 2mb |L0.91| " + - "L0.92[91000,92000] 191ns 2mb |L0.92| " + - "L0.93[92000,93000] 192ns 2mb |L0.93| " + - "L0.94[93000,94000] 193ns 2mb |L0.94| " + - "L0.95[94000,95000] 194ns 2mb |L0.95| " + - "L0.96[95000,96000] 195ns 2mb |L0.96| " + - "L0.97[96000,97000] 196ns 2mb |L0.97| " + - "L0.98[97000,98000] 197ns 2mb |L0.98| " + - "L0.99[98000,99000] 198ns 2mb |L0.99| " + - "L0.100[99000,100000] 199ns 2mb |L0.100| " + - "L0.101[100000,101000] 200ns 2mb |L0.101| " + - "L0.102[101000,102000] 201ns 2mb |L0.102| " + - "L0.103[102000,103000] 202ns 2mb |L0.103| " + - "L0.104[103000,104000] 203ns 2mb |L0.104| " + - "L0.105[104000,105000] 204ns 2mb |L0.105| " + - "L0.106[105000,106000] 205ns 2mb |L0.106| " + - "L0.107[106000,107000] 206ns 2mb |L0.107| " + - "L0.108[107000,108000] 207ns 2mb |L0.108| " + - "L0.109[108000,109000] 208ns 2mb |L0.109| " + - "L0.110[109000,110000] 209ns 2mb |L0.110| " + - "L0.111[110000,111000] 210ns 2mb |L0.111| " + - "L0.112[111000,112000] 211ns 2mb |L0.112| " + - "L0.113[112000,113000] 212ns 2mb |L0.113| " + - "L0.114[113000,114000] 213ns 2mb |L0.114| " + - "L0.115[114000,115000] 214ns 2mb |L0.115| " + - "L0.116[115000,116000] 215ns 2mb |L0.116| " + - "L0.117[116000,117000] 216ns 2mb |L0.117| " + - "L0.118[117000,118000] 217ns 2mb |L0.118| " + - "L0.119[118000,119000] 218ns 2mb |L0.119| " + - "L0.120[119000,120000] 219ns 2mb |L0.120| " + - "L0.121[120000,121000] 220ns 2mb |L0.121| " + - "L0.122[121000,122000] 221ns 2mb |L0.122| " + - "L0.123[122000,123000] 222ns 2mb |L0.123| " + - "L0.124[123000,124000] 223ns 2mb |L0.124| " + - "L0.125[124000,125000] 224ns 2mb |L0.125| " + - "L0.126[125000,126000] 225ns 2mb |L0.126| " + - "L0.127[126000,127000] 226ns 2mb |L0.127| " + - "L0.128[127000,128000] 227ns 2mb |L0.128| " + - "L0.129[128000,129000] 228ns 2mb |L0.129| " + - "L0.130[129000,130000] 229ns 2mb |L0.130| " + - "L0.131[130000,131000] 230ns 2mb |L0.131| " + - "L0.132[131000,132000] 231ns 2mb |L0.132| " + - "L0.133[132000,133000] 232ns 2mb |L0.133| " + - "L0.134[133000,134000] 233ns 2mb |L0.134| " + - "L0.135[134000,135000] 234ns 2mb |L0.135| " + - "L0.136[135000,136000] 235ns 2mb |L0.136| " + - "L0.137[136000,137000] 236ns 2mb |L0.137| " + - "L0.138[137000,138000] 237ns 2mb |L0.138| " + - "L0.139[138000,139000] 238ns 2mb |L0.139| " + - "L0.140[139000,140000] 239ns 2mb |L0.140| " + - "L0.141[140000,141000] 240ns 2mb |L0.141| " + - "L0.142[141000,142000] 241ns 2mb |L0.142| " + - "L0.143[142000,143000] 242ns 2mb |L0.143| " + - "L0.144[143000,144000] 243ns 2mb |L0.144| " + - "L0.145[144000,145000] 244ns 2mb |L0.145| " + - "L0.146[145000,146000] 245ns 2mb |L0.146| " + - "L0.147[146000,147000] 246ns 2mb |L0.147| " + - "L0.148[147000,148000] 247ns 2mb |L0.148| " + - "L0.149[148000,149000] 248ns 2mb |L0.149| " + - "L0.150[149000,150000] 249ns 2mb |L0.150| " + - "L0.151[150000,151000] 250ns 2mb |L0.151| " + - "L0.152[151000,152000] 251ns 2mb |L0.152| " + - "L0.153[152000,153000] 252ns 2mb |L0.153| " + - "L0.154[153000,154000] 253ns 2mb |L0.154| " + - "L0.155[154000,155000] 254ns 2mb |L0.155| " + - "L0.156[155000,156000] 255ns 2mb |L0.156| " + - "L0.157[156000,157000] 256ns 2mb |L0.157| " + - "L0.158[157000,158000] 257ns 2mb |L0.158| " + - "L0.159[158000,159000] 258ns 2mb |L0.159| " + - "L0.160[159000,160000] 259ns 2mb |L0.160| " + - "L0.161[160000,161000] 260ns 2mb |L0.161| " + - "L0.162[161000,162000] 261ns 2mb |L0.162| " + - "L0.163[162000,163000] 262ns 2mb |L0.163| " + - "L0.164[163000,164000] 263ns 2mb |L0.164| " + - "L0.165[164000,165000] 264ns 2mb |L0.165| " + - "L0.166[165000,166000] 265ns 2mb |L0.166| " + - "L0.167[166000,167000] 266ns 2mb |L0.167| " + - "L0.168[167000,168000] 267ns 2mb |L0.168| " + - "L0.169[168000,169000] 268ns 2mb |L0.169| " + - "L0.170[169000,170000] 269ns 2mb |L0.170| " + - "L0.171[170000,171000] 270ns 2mb |L0.171| " + - "L0.172[171000,172000] 271ns 2mb |L0.172| " + - "L0.173[172000,173000] 272ns 2mb |L0.173| " + - "L0.174[173000,174000] 273ns 2mb |L0.174| " + - "L0.175[174000,175000] 274ns 2mb |L0.175| " + - "L0.176[175000,176000] 275ns 2mb |L0.176| " + - "L0.177[176000,177000] 276ns 2mb |L0.177| " + - "L0.178[177000,178000] 277ns 2mb |L0.178| " + - "L0.179[178000,179000] 278ns 2mb |L0.179| " + - "L0.180[179000,180000] 279ns 2mb |L0.180| " + - "L0.181[180000,181000] 280ns 2mb |L0.181| " + - "L0.182[181000,182000] 281ns 2mb |L0.182| " + - "L0.183[182000,183000] 282ns 2mb |L0.183| " + - "L0.184[183000,184000] 283ns 2mb |L0.184|" + - "L0.185[184000,185000] 284ns 2mb |L0.185|" + - "L0.186[185000,186000] 285ns 2mb |L0.186|" + - "L0.187[186000,187000] 286ns 2mb |L0.187|" + - "L0.188[187000,188000] 287ns 2mb |L0.188|" + - "L0.189[188000,189000] 288ns 2mb |L0.189|" + - "L0.190[189000,190000] 289ns 2mb |L0.190|" + - "L0.191[190000,191000] 290ns 2mb |L0.191|" + - "L0.192[191000,192000] 291ns 2mb |L0.192|" + - "L0.193[192000,193000] 292ns 2mb |L0.193|" + - "L0.194[193000,194000] 293ns 2mb |L0.194|" + - "L0.195[194000,195000] 294ns 2mb |L0.195|" + - "L0.196[195000,196000] 295ns 2mb |L0.196|" + - "L0.197[196000,197000] 296ns 2mb |L0.197|" + - "L0.198[197000,198000] 297ns 2mb |L0.198|" + - "L0.199[198000,199000] 298ns 2mb |L0.199|" + - "L0.200[199000,200000] 299ns 2mb |L0.200|" + - "L1 " + - "L1.201[0,1999] 0ns 100mb |L1.201| " + - "L1.202[2000,3999] 1ns 100mb|L1.202| " + - "L1.203[4000,5999] 2ns 100mb |L1.203| " + - "L1.204[6000,7999] 3ns 100mb |L1.204| " + - "L1.205[8000,9999] 4ns 100mb |L1.205| " + - "L1.206[10000,11999] 5ns 100mb |L1.206| " + - "L1.207[12000,13999] 6ns 100mb |L1.207| " + - "L1.208[14000,15999] 7ns 100mb |L1.208| " + - "L1.209[16000,17999] 8ns 100mb |L1.209| " + - "L1.210[18000,19999] 9ns 100mb |L1.210| " + - "L1.211[20000,21999] 10ns 100mb |L1.211| " + - "L1.212[22000,23999] 11ns 100mb |L1.212| " + - "L1.213[24000,25999] 12ns 100mb |L1.213| " + - "L1.214[26000,27999] 13ns 100mb |L1.214| " + - "L1.215[28000,29999] 14ns 100mb |L1.215| " + - "L1.216[30000,31999] 15ns 100mb |L1.216| " + - "L1.217[32000,33999] 16ns 100mb |L1.217| " + - "L1.218[34000,35999] 17ns 100mb |L1.218| " + - "L1.219[36000,37999] 18ns 100mb |L1.219| " + - "L1.220[38000,39999] 19ns 100mb |L1.220| " + - "L1.221[40000,41999] 20ns 100mb |L1.221| " + - "L1.222[42000,43999] 21ns 100mb |L1.222| " + - "L1.223[44000,45999] 22ns 100mb |L1.223| " + - "L1.224[46000,47999] 23ns 100mb |L1.224| " + - "L1.225[48000,49999] 24ns 100mb |L1.225| " + - "L1.226[50000,51999] 25ns 100mb |L1.226| " + - "L1.227[52000,53999] 26ns 100mb |L1.227| " + - "L1.228[54000,55999] 27ns 100mb |L1.228| " + - "L1.229[56000,57999] 28ns 100mb |L1.229| " + - "L1.230[58000,59999] 29ns 100mb |L1.230| " + - "L1.231[60000,61999] 30ns 100mb |L1.231| " + - "L1.232[62000,63999] 31ns 100mb |L1.232| " + - "L1.233[64000,65999] 32ns 100mb |L1.233| " + - "L1.234[66000,67999] 33ns 100mb |L1.234| " + - "L1.235[68000,69999] 34ns 100mb |L1.235| " + - "L1.236[70000,71999] 35ns 100mb |L1.236| " + - "L1.237[72000,73999] 36ns 100mb |L1.237| " + - "L1.238[74000,75999] 37ns 100mb |L1.238| " + - "L1.239[76000,77999] 38ns 100mb |L1.239| " + - "L1.240[78000,79999] 39ns 100mb |L1.240| " + - "L1.241[80000,81999] 40ns 100mb |L1.241| " + - "L1.242[82000,83999] 41ns 100mb |L1.242| " + - "L1.243[84000,85999] 42ns 100mb |L1.243| " + - "L1.244[86000,87999] 43ns 100mb |L1.244| " + - "L1.245[88000,89999] 44ns 100mb |L1.245| " + - "L1.246[90000,91999] 45ns 100mb |L1.246| " + - "L1.247[92000,93999] 46ns 100mb |L1.247| " + - "L1.248[94000,95999] 47ns 100mb |L1.248| " + - "L1.249[96000,97999] 48ns 100mb |L1.249| " + - "L1.250[98000,99999] 49ns 100mb |L1.250| " + - "L1.251[100000,101999] 50ns 100mb |L1.251| " + - "L1.252[102000,103999] 51ns 100mb |L1.252| " + - "L1.253[104000,105999] 52ns 100mb |L1.253| " + - "L1.254[106000,107999] 53ns 100mb |L1.254| " + - "L1.255[108000,109999] 54ns 100mb |L1.255| " + - "L1.256[110000,111999] 55ns 100mb |L1.256| " + - "L1.257[112000,113999] 56ns 100mb |L1.257| " + - "L1.258[114000,115999] 57ns 100mb |L1.258| " + - "L1.259[116000,117999] 58ns 100mb |L1.259| " + - "L1.260[118000,119999] 59ns 100mb |L1.260| " + - "L1.261[120000,121999] 60ns 100mb |L1.261| " + - "L1.262[122000,123999] 61ns 100mb |L1.262| " + - "L1.263[124000,125999] 62ns 100mb |L1.263| " + - "L1.264[126000,127999] 63ns 100mb |L1.264| " + - "L1.265[128000,129999] 64ns 100mb |L1.265| " + - "L1.266[130000,131999] 65ns 100mb |L1.266| " + - "L1.267[132000,133999] 66ns 100mb |L1.267| " + - "L1.268[134000,135999] 67ns 100mb |L1.268| " + - "L1.269[136000,137999] 68ns 100mb |L1.269| " + - "L1.270[138000,139999] 69ns 100mb |L1.270| " + - "L1.271[140000,141999] 70ns 100mb |L1.271| " + - "L1.272[142000,143999] 71ns 100mb |L1.272| " + - "L1.273[144000,145999] 72ns 100mb |L1.273| " + - "L1.274[146000,147999] 73ns 100mb |L1.274| " + - "L1.275[148000,149999] 74ns 100mb |L1.275| " + - "L1.276[150000,151999] 75ns 100mb |L1.276| " + - "L1.277[152000,153999] 76ns 100mb |L1.277| " + - "L1.278[154000,155999] 77ns 100mb |L1.278| " + - "L1.279[156000,157999] 78ns 100mb |L1.279| " + - "L1.280[158000,159999] 79ns 100mb |L1.280| " + - "L1.281[160000,161999] 80ns 100mb |L1.281| " + - "L1.282[162000,163999] 81ns 100mb |L1.282| " + - "L1.283[164000,165999] 82ns 100mb |L1.283| " + - "L1.284[166000,167999] 83ns 100mb |L1.284| " + - "L1.285[168000,169999] 84ns 100mb |L1.285| " + - "L1.286[170000,171999] 85ns 100mb |L1.286| " + - "L1.287[172000,173999] 86ns 100mb |L1.287| " + - "L1.288[174000,175999] 87ns 100mb |L1.288| " + - "L1.289[176000,177999] 88ns 100mb |L1.289| " + - "L1.290[178000,179999] 89ns 100mb |L1.290| " + - "L1.291[180000,181999] 90ns 100mb |L1.291| " + - "L1.292[182000,183999] 91ns 100mb |L1.292| " + - "L1.293[184000,185999] 92ns 100mb |L1.293|" + - "L1.294[186000,187999] 93ns 100mb |L1.294|" + - "L1.295[188000,189999] 94ns 100mb |L1.295|" + - "L1.296[190000,191999] 95ns 100mb |L1.296|" + - "L1.297[192000,193999] 96ns 100mb |L1.297|" + - "L1.298[194000,195999] 97ns 100mb |L1.298|" + - "L1.299[196000,197999] 98ns 100mb |L1.299|" + - "L1.300[198000,199999] 99ns 100mb |L1.300|" + - "L2 " + - "L2.301[0,1999] 0ns 100mb |L2.301| " + - "L2.302[2000,3999] 1ns 100mb|L2.302| " + - "L2.303[4000,5999] 2ns 100mb |L2.303| " + - "L2.304[6000,7999] 3ns 100mb |L2.304| " + - "L2.305[8000,9999] 4ns 100mb |L2.305| " + - "L2.306[10000,11999] 5ns 100mb |L2.306| " + - "L2.307[12000,13999] 6ns 100mb |L2.307| " + - "L2.308[14000,15999] 7ns 100mb |L2.308| " + - "L2.309[16000,17999] 8ns 100mb |L2.309| " + - "L2.310[18000,19999] 9ns 100mb |L2.310| " + - "L2.311[20000,21999] 10ns 100mb |L2.311| " + - "L2.312[22000,23999] 11ns 100mb |L2.312| " + - "L2.313[24000,25999] 12ns 100mb |L2.313| " + - "L2.314[26000,27999] 13ns 100mb |L2.314| " + - "L2.315[28000,29999] 14ns 100mb |L2.315| " + - "L2.316[30000,31999] 15ns 100mb |L2.316| " + - "L2.317[32000,33999] 16ns 100mb |L2.317| " + - "L2.318[34000,35999] 17ns 100mb |L2.318| " + - "L2.319[36000,37999] 18ns 100mb |L2.319| " + - "L2.320[38000,39999] 19ns 100mb |L2.320| " + - "L2.321[40000,41999] 20ns 100mb |L2.321| " + - "L2.322[42000,43999] 21ns 100mb |L2.322| " + - "L2.323[44000,45999] 22ns 100mb |L2.323| " + - "L2.324[46000,47999] 23ns 100mb |L2.324| " + - "L2.325[48000,49999] 24ns 100mb |L2.325| " + - "L2.326[50000,51999] 25ns 100mb |L2.326| " + - "L2.327[52000,53999] 26ns 100mb |L2.327| " + - "L2.328[54000,55999] 27ns 100mb |L2.328| " + - "L2.329[56000,57999] 28ns 100mb |L2.329| " + - "L2.330[58000,59999] 29ns 100mb |L2.330| " + - "L2.331[60000,61999] 30ns 100mb |L2.331| " + - "L2.332[62000,63999] 31ns 100mb |L2.332| " + - "L2.333[64000,65999] 32ns 100mb |L2.333| " + - "L2.334[66000,67999] 33ns 100mb |L2.334| " + - "L2.335[68000,69999] 34ns 100mb |L2.335| " + - "L2.336[70000,71999] 35ns 100mb |L2.336| " + - "L2.337[72000,73999] 36ns 100mb |L2.337| " + - "L2.338[74000,75999] 37ns 100mb |L2.338| " + - "L2.339[76000,77999] 38ns 100mb |L2.339| " + - "L2.340[78000,79999] 39ns 100mb |L2.340| " + - "L2.341[80000,81999] 40ns 100mb |L2.341| " + - "L2.342[82000,83999] 41ns 100mb |L2.342| " + - "L2.343[84000,85999] 42ns 100mb |L2.343| " + - "L2.344[86000,87999] 43ns 100mb |L2.344| " + - "L2.345[88000,89999] 44ns 100mb |L2.345| " + - "L2.346[90000,91999] 45ns 100mb |L2.346| " + - "L2.347[92000,93999] 46ns 100mb |L2.347| " + - "L2.348[94000,95999] 47ns 100mb |L2.348| " + - "L2.349[96000,97999] 48ns 100mb |L2.349| " + - "L2.350[98000,99999] 49ns 100mb |L2.350| " + - "L2.351[100000,101999] 50ns 100mb |L2.351| " + - "L2.352[102000,103999] 51ns 100mb |L2.352| " + - "L2.353[104000,105999] 52ns 100mb |L2.353| " + - "L2.354[106000,107999] 53ns 100mb |L2.354| " + - "L2.355[108000,109999] 54ns 100mb |L2.355| " + - "L2.356[110000,111999] 55ns 100mb |L2.356| " + - "L2.357[112000,113999] 56ns 100mb |L2.357| " + - "L2.358[114000,115999] 57ns 100mb |L2.358| " + - "L2.359[116000,117999] 58ns 100mb |L2.359| " + - "L2.360[118000,119999] 59ns 100mb |L2.360| " + - "L2.361[120000,121999] 60ns 100mb |L2.361| " + - "L2.362[122000,123999] 61ns 100mb |L2.362| " + - "L2.363[124000,125999] 62ns 100mb |L2.363| " + - "L2.364[126000,127999] 63ns 100mb |L2.364| " + - "L2.365[128000,129999] 64ns 100mb |L2.365| " + - "L2.366[130000,131999] 65ns 100mb |L2.366| " + - "L2.367[132000,133999] 66ns 100mb |L2.367| " + - "L2.368[134000,135999] 67ns 100mb |L2.368| " + - "L2.369[136000,137999] 68ns 100mb |L2.369| " + - "L2.370[138000,139999] 69ns 100mb |L2.370| " + - "L2.371[140000,141999] 70ns 100mb |L2.371| " + - "L2.372[142000,143999] 71ns 100mb |L2.372| " + - "L2.373[144000,145999] 72ns 100mb |L2.373| " + - "L2.374[146000,147999] 73ns 100mb |L2.374| " + - "L2.375[148000,149999] 74ns 100mb |L2.375| " + - "L2.376[150000,151999] 75ns 100mb |L2.376| " + - "L2.377[152000,153999] 76ns 100mb |L2.377| " + - "L2.378[154000,155999] 77ns 100mb |L2.378| " + - "L2.379[156000,157999] 78ns 100mb |L2.379| " + - "L2.380[158000,159999] 79ns 100mb |L2.380| " + - "L2.381[160000,161999] 80ns 100mb |L2.381| " + - "L2.382[162000,163999] 81ns 100mb |L2.382| " + - "L2.383[164000,165999] 82ns 100mb |L2.383| " + - "L2.384[166000,167999] 83ns 100mb |L2.384| " + - "L2.385[168000,169999] 84ns 100mb |L2.385| " + - "L2.386[170000,171999] 85ns 100mb |L2.386| " + - "L2.387[172000,173999] 86ns 100mb |L2.387| " + - "L2.388[174000,175999] 87ns 100mb |L2.388| " + - "L2.389[176000,177999] 88ns 100mb |L2.389| " + - "L2.390[178000,179999] 89ns 100mb |L2.390| " + - "L2.391[180000,181999] 90ns 100mb |L2.391| " + - "L2.392[182000,183999] 91ns 100mb |L2.392| " + - "L2.393[184000,185999] 92ns 100mb |L2.393|" + - "L2.394[186000,187999] 93ns 100mb |L2.394|" + - "L2.395[188000,189999] 94ns 100mb |L2.395|" + - "L2.396[190000,191999] 95ns 100mb |L2.396|" + - "L2.397[192000,193999] 96ns 100mb |L2.397|" + - "L2.398[194000,195999] 97ns 100mb |L2.398|" + - "L2.399[196000,197999] 98ns 100mb |L2.399|" + - "L2.400[198000,199999] 99ns 100mb |L2.400|" + - "**** Final Output Files (45.56gb written)" - "L2 " - - "L2.304[1676034607207000001,1676066212011000000] 1676066475.26s 286mb |-------------L2.304--------------| " - - "L2.321[1675987200001000000,1675993675383098793] 1676066475.26s 100mb|L2.321| " - - "L2.329[1675993675383098794,1676001131291506471] 1676066475.26s 100mb |L2.329| " - - "L2.330[1676001131291506472,1676008587199914148] 1676066475.26s 100mb |L2.330| " - - "L2.331[1676008587199914149,1676014352965372887] 1676066475.26s 77mb |L2.331| " - - "L2.332[1676014352965372888,1676023648281062782] 1676066475.26s 100mb |-L2.332-| " - - "L2.333[1676023648281062783,1676032943596752676] 1676066475.26s 100mb |-L2.333-| " - - "L2.334[1676032943596752677,1676034607207000000] 1676066475.26s 18mb |L2.334| " - - "WARNING: file L2.304[1676034607207000001,1676066212011000000] 1676066475.26s 286mb exceeds soft limit 100mb by more than 50%" + - "L2.1173[0,983] 104ns 100mb|L2.1173| " + - "L2.1174[984,1966] 104ns 100mb|L2.1174| " + - "L2.1175[1967,1999] 104ns 3mb|L2.1175| " + - "L2.1176[2000,2983] 108ns 100mb|L2.1176| " + - "L2.1177[2984,3966] 108ns 100mb |L2.1177| " + - "L2.1178[3967,3999] 108ns 3mb |L2.1178| " + - "L2.1179[4000,4978] 108ns 100mb |L2.1179| " + - "L2.1180[4979,5956] 108ns 100mb |L2.1180| " + - "L2.1181[5957,5999] 108ns 4mb |L2.1181| " + - "L2.1182[6000,6978] 108ns 100mb |L2.1182| " + - "L2.1183[6979,7956] 108ns 100mb |L2.1183| " + - "L2.1184[7957,7999] 108ns 5mb |L2.1184| " + - "L2.1185[8000,8979] 113ns 100mb |L2.1185| " + - "L2.1186[8980,9958] 113ns 100mb |L2.1186| " + - "L2.1187[9959,9999] 113ns 4mb |L2.1187| " + - "L2.1188[10000,10980] 113ns 100mb |L2.1188| " + - "L2.1189[10981,11960] 113ns 100mb |L2.1189| " + - "L2.1190[11961,11999] 113ns 4mb |L2.1190| " + - "L2.1191[12000,12980] 113ns 100mb |L2.1191| " + - "L2.1192[12981,13960] 113ns 100mb |L2.1192| " + - "L2.1193[13961,13999] 113ns 4mb |L2.1193| " + - "L2.1194[14000,14981] 117ns 100mb |L2.1194| " + - "L2.1195[14982,15962] 117ns 100mb |L2.1195| " + - "L2.1196[15963,15999] 117ns 4mb |L2.1196| " + - "L2.1197[16000,16980] 117ns 100mb |L2.1197| " + - "L2.1198[16981,17960] 117ns 100mb |L2.1198| " + - "L2.1199[17961,17999] 117ns 4mb |L2.1199| " + - "L2.1200[18000,18981] 123ns 100mb |L2.1200| " + - "L2.1201[18982,19962] 123ns 100mb |L2.1201| " + - "L2.1202[19963,19999] 123ns 4mb |L2.1202| " + - "L2.1203[20000,20981] 123ns 100mb |L2.1203| " + - "L2.1204[20982,21962] 123ns 100mb |L2.1204| " + - "L2.1205[21963,21999] 123ns 4mb |L2.1205| " + - "L2.1206[22000,22981] 127ns 100mb |L2.1206| " + - "L2.1207[22982,23962] 127ns 100mb |L2.1207| " + - "L2.1208[23963,23999] 127ns 4mb |L2.1208| " + - "L2.1209[24000,24984] 127ns 100mb |L2.1209| " + - "L2.1210[24985,25968] 127ns 100mb |L2.1210| " + - "L2.1211[25969,25999] 127ns 3mb |L2.1211| " + - "L2.1212[26000,26981] 131ns 100mb |L2.1212| " + - "L2.1213[26982,27962] 131ns 100mb |L2.1213| " + - "L2.1214[27963,27999] 131ns 4mb |L2.1214| " + - "L2.1215[28000,28978] 131ns 100mb |L2.1215| " + - "L2.1216[28979,29956] 131ns 100mb |L2.1216| " + - "L2.1217[29957,29999] 131ns 5mb |L2.1217| " + - "L2.1218[30000,30977] 131ns 100mb |L2.1218| " + - "L2.1219[30978,31954] 131ns 100mb |L2.1219| " + - "L2.1220[31955,31999] 131ns 5mb |L2.1220| " + - "L2.1221[32000,32981] 137ns 100mb |L2.1221| " + - "L2.1222[32982,33962] 137ns 100mb |L2.1222| " + - "L2.1223[33963,33999] 137ns 4mb |L2.1223| " + - "L2.1224[34000,34981] 137ns 100mb |L2.1224| " + - "L2.1225[34982,35962] 137ns 100mb |L2.1225| " + - "L2.1226[35963,35999] 137ns 4mb |L2.1226| " + - "L2.1227[36000,36981] 141ns 100mb |L2.1227| " + - "L2.1228[36982,37962] 141ns 100mb |L2.1228| " + - "L2.1229[37963,37999] 141ns 4mb |L2.1229| " + - "L2.1230[38000,38985] 141ns 100mb |L2.1230| " + - "L2.1231[38986,39970] 141ns 100mb |L2.1231| " + - "L2.1232[39971,39999] 141ns 3mb |L2.1232| " + - "L2.1233[40000,40983] 146ns 100mb |L2.1233| " + - "L2.1234[40984,41966] 146ns 100mb |L2.1234| " + - "L2.1235[41967,41999] 146ns 3mb |L2.1235| " + - "L2.1236[42000,42977] 146ns 100mb |L2.1236| " + - "L2.1237[42978,43954] 146ns 100mb |L2.1237| " + - "L2.1238[43955,43999] 146ns 5mb |L2.1238| " + - "L2.1239[44000,44976] 146ns 100mb |L2.1239| " + - "L2.1240[44977,45952] 146ns 100mb |L2.1240| " + - "L2.1241[45953,45999] 146ns 5mb |L2.1241| " + - "L2.1242[46000,46980] 151ns 100mb |L2.1242| " + - "L2.1243[46981,47960] 151ns 100mb |L2.1243| " + - "L2.1244[47961,47999] 151ns 4mb |L2.1244| " + - "L2.1245[48000,48980] 151ns 100mb |L2.1245| " + - "L2.1246[48981,49960] 151ns 100mb |L2.1246| " + - "L2.1247[49961,49999] 151ns 4mb |L2.1247| " + - "L2.1248[50000,50980] 151ns 100mb |L2.1248| " + - "L2.1249[50981,51960] 151ns 100mb |L2.1249| " + - "L2.1250[51961,51999] 151ns 4mb |L2.1250| " + - "L2.1251[52000,52981] 155ns 100mb |L2.1251| " + - "L2.1252[52982,53962] 155ns 100mb |L2.1252| " + - "L2.1253[53963,53999] 155ns 4mb |L2.1253| " + - "L2.1254[54000,54980] 155ns 100mb |L2.1254| " + - "L2.1255[54981,55960] 155ns 100mb |L2.1255| " + - "L2.1256[55961,55999] 155ns 4mb |L2.1256| " + - "L2.1257[56000,56981] 160ns 100mb |L2.1257| " + - "L2.1258[56982,57962] 160ns 100mb |L2.1258| " + - "L2.1259[57963,57999] 160ns 4mb |L2.1259| " + - "L2.1260[58000,58981] 160ns 100mb |L2.1260| " + - "L2.1261[58982,59962] 160ns 100mb |L2.1261| " + - "L2.1262[59963,59999] 160ns 4mb |L2.1262| " + - "L2.1263[60000,60980] 165ns 100mb |L2.1263| " + - "L2.1264[60981,61960] 165ns 100mb |L2.1264| " + - "L2.1265[61961,61999] 165ns 4mb |L2.1265| " + - "L2.1266[62000,62981] 165ns 100mb |L2.1266| " + - "L2.1267[62982,63962] 165ns 100mb |L2.1267| " + - "L2.1268[63963,63999] 165ns 4mb |L2.1268| " + - "L2.1269[64000,64980] 165ns 100mb |L2.1269| " + - "L2.1270[64981,65960] 165ns 100mb |L2.1270| " + - "L2.1271[65961,65999] 165ns 4mb |L2.1271| " + - "L2.1272[66000,66981] 169ns 100mb |L2.1272| " + - "L2.1273[66982,67962] 169ns 100mb |L2.1273| " + - "L2.1274[67963,67999] 169ns 4mb |L2.1274| " + - "L2.1275[68000,68980] 169ns 100mb |L2.1275| " + - "L2.1276[68981,69960] 169ns 100mb |L2.1276| " + - "L2.1277[69961,69999] 169ns 4mb |L2.1277| " + - "L2.1278[70000,70981] 175ns 100mb |L2.1278| " + - "L2.1279[70982,71962] 175ns 100mb |L2.1279| " + - "L2.1280[71963,71999] 175ns 4mb |L2.1280| " + - "L2.1281[72000,72981] 175ns 100mb |L2.1281| " + - "L2.1282[72982,73962] 175ns 100mb |L2.1282| " + - "L2.1283[73963,73999] 175ns 4mb |L2.1283| " + - "L2.1284[74000,74981] 179ns 100mb |L2.1284| " + - "L2.1285[74982,75962] 179ns 100mb |L2.1285| " + - "L2.1286[75963,75999] 179ns 4mb |L2.1286| " + - "L2.1287[76000,76984] 179ns 100mb |L2.1287| " + - "L2.1288[76985,77968] 179ns 100mb |L2.1288| " + - "L2.1289[77969,77999] 179ns 3mb |L2.1289| " + - "L2.1290[78000,78982] 184ns 100mb |L2.1290| " + - "L2.1291[78983,79964] 184ns 100mb |L2.1291| " + - "L2.1292[79965,79999] 184ns 4mb |L2.1292| " + - "L2.1293[80000,80977] 184ns 100mb |L2.1293| " + - "L2.1294[80978,81954] 184ns 100mb |L2.1294| " + - "L2.1295[81955,81999] 184ns 5mb |L2.1295| " + - "L2.1296[82000,82977] 184ns 100mb |L2.1296| " + - "L2.1297[82978,83954] 184ns 100mb |L2.1297| " + - "L2.1298[83955,83999] 184ns 5mb |L2.1298| " + - "L2.1299[84000,84983] 188ns 100mb |L2.1299| " + - "L2.1300[84984,85966] 188ns 100mb |L2.1300| " + - "L2.1301[85967,85999] 188ns 3mb |L2.1301| " + - "L2.1302[86000,86983] 192ns 100mb |L2.1302| " + - "L2.1303[86984,87966] 192ns 100mb |L2.1303| " + - "L2.1304[87967,87999] 192ns 3mb |L2.1304| " + - "L2.1305[88000,88978] 192ns 100mb |L2.1305| " + - "L2.1306[88979,89956] 192ns 100mb |L2.1306| " + - "L2.1307[89957,89999] 192ns 5mb |L2.1307| " + - "L2.1308[90000,90978] 192ns 100mb |L2.1308| " + - "L2.1309[90979,91956] 192ns 100mb |L2.1309| " + - "L2.1310[91957,91999] 192ns 5mb |L2.1310| " + - "L2.1311[92000,92979] 197ns 100mb |L2.1311| " + - "L2.1312[92980,93958] 197ns 100mb |L2.1312| " + - "L2.1313[93959,93999] 197ns 4mb |L2.1313| " + - "L2.1314[94000,94980] 197ns 100mb |L2.1314| " + - "L2.1315[94981,95960] 197ns 100mb |L2.1315| " + - "L2.1316[95961,95999] 197ns 4mb |L2.1316| " + - "L2.1317[96000,96980] 197ns 100mb |L2.1317| " + - "L2.1318[96981,97960] 197ns 100mb |L2.1318| " + - "L2.1319[97961,97999] 197ns 4mb |L2.1319| " + - "L2.1320[98000,98981] 201ns 100mb |L2.1320| " + - "L2.1321[98982,99962] 201ns 100mb |L2.1321| " + - "L2.1322[99963,99999] 201ns 4mb |L2.1322| " + - "L2.1323[100000,100980] 201ns 100mb |L2.1323| " + - "L2.1324[100981,101960] 201ns 100mb |L2.1324| " + - "L2.1325[101961,101999] 201ns 4mb |L2.1325| " + - "L2.1326[102000,102981] 207ns 100mb |L2.1326| " + - "L2.1327[102982,103962] 207ns 100mb |L2.1327| " + - "L2.1328[103963,103999] 207ns 4mb |L2.1328| " + - "L2.1329[104000,104981] 207ns 100mb |L2.1329| " + - "L2.1330[104982,105962] 207ns 100mb |L2.1330| " + - "L2.1331[105963,105999] 207ns 4mb |L2.1331| " + - "L2.1332[106000,106981] 211ns 100mb |L2.1332| " + - "L2.1333[106982,107962] 211ns 100mb |L2.1333| " + - "L2.1334[107963,107999] 211ns 4mb |L2.1334| " + - "L2.1335[108000,108984] 211ns 100mb |L2.1335| " + - "L2.1336[108985,109968] 211ns 100mb |L2.1336| " + - "L2.1337[109969,109999] 211ns 3mb |L2.1337| " + - "L2.1338[110000,110981] 215ns 100mb |L2.1338| " + - "L2.1339[110982,111962] 215ns 100mb |L2.1339| " + - "L2.1340[111963,111999] 215ns 4mb |L2.1340| " + - "L2.1341[112000,112978] 215ns 100mb |L2.1341| " + - "L2.1342[112979,113956] 215ns 100mb |L2.1342| " + - "L2.1343[113957,113999] 215ns 5mb |L2.1343| " + - "L2.1344[114000,114977] 215ns 100mb |L2.1344| " + - "L2.1345[114978,115954] 215ns 100mb |L2.1345| " + - "L2.1346[115955,115999] 215ns 5mb |L2.1346| " + - "L2.1347[116000,116981] 221ns 100mb |L2.1347| " + - "L2.1348[116982,117962] 221ns 100mb |L2.1348| " + - "L2.1349[117963,117999] 221ns 4mb |L2.1349| " + - "L2.1350[118000,118981] 221ns 100mb |L2.1350| " + - "L2.1351[118982,119962] 221ns 100mb |L2.1351| " + - "L2.1352[119963,119999] 221ns 4mb |L2.1352| " + - "L2.1353[120000,120981] 225ns 100mb |L2.1353| " + - "L2.1354[120982,121962] 225ns 100mb |L2.1354| " + - "L2.1355[121963,121999] 225ns 4mb |L2.1355| " + - "L2.1356[122000,122986] 225ns 100mb |L2.1356| " + - "L2.1357[122987,123972] 225ns 100mb |L2.1357| " + - "L2.1358[123973,123999] 225ns 3mb |L2.1358| " + - "L2.1359[124000,124984] 230ns 100mb |L2.1359| " + - "L2.1360[124985,125968] 230ns 100mb |L2.1360| " + - "L2.1361[125969,125999] 230ns 3mb |L2.1361| " + - "L2.1362[126000,126976] 230ns 100mb |L2.1362| " + - "L2.1363[126977,127952] 230ns 100mb |L2.1363| " + - "L2.1364[127953,127999] 230ns 5mb |L2.1364| " + - "L2.1365[128000,128976] 230ns 100mb |L2.1365| " + - "L2.1366[128977,129952] 230ns 100mb |L2.1366| " + - "L2.1367[129953,129999] 230ns 5mb |L2.1367| " + - "L2.1368[130000,130979] 235ns 100mb |L2.1368| " + - "L2.1369[130980,131958] 235ns 100mb |L2.1369| " + - "L2.1370[131959,131999] 235ns 4mb |L2.1370| " + - "L2.1371[132000,132980] 235ns 100mb |L2.1371| " + - "L2.1372[132981,133960] 235ns 100mb |L2.1372| " + - "L2.1373[133961,133999] 235ns 4mb |L2.1373| " + - "L2.1374[134000,134980] 235ns 100mb |L2.1374| " + - "L2.1375[134981,135960] 235ns 100mb |L2.1375| " + - "L2.1376[135961,135999] 235ns 4mb |L2.1376| " + - "L2.1377[136000,136981] 239ns 100mb |L2.1377| " + - "L2.1378[136982,137962] 239ns 100mb |L2.1378| " + - "L2.1379[137963,137999] 239ns 4mb |L2.1379| " + - "L2.1380[138000,138980] 239ns 100mb |L2.1380| " + - "L2.1381[138981,139960] 239ns 100mb |L2.1381| " + - "L2.1382[139961,139999] 239ns 4mb |L2.1382| " + - "L2.1383[140000,140981] 244ns 100mb |L2.1383| " + - "L2.1384[140982,141962] 244ns 100mb |L2.1384| " + - "L2.1385[141963,141999] 244ns 4mb |L2.1385| " + - "L2.1386[142000,142981] 244ns 100mb |L2.1386| " + - "L2.1387[142982,143962] 244ns 100mb |L2.1387| " + - "L2.1388[143963,143999] 244ns 4mb |L2.1388| " + - "L2.1389[144000,144981] 249ns 100mb |L2.1389| " + - "L2.1390[144982,145962] 249ns 100mb |L2.1390| " + - "L2.1391[145963,145999] 249ns 4mb |L2.1391| " + - "L2.1392[146000,146983] 249ns 100mb |L2.1392| " + - "L2.1393[146984,147966] 249ns 100mb |L2.1393| " + - "L2.1394[147967,147999] 249ns 3mb |L2.1394| " + - "L2.1395[148000,148980] 253ns 100mb |L2.1395| " + - "L2.1396[148981,149960] 253ns 100mb |L2.1396| " + - "L2.1397[149961,149999] 253ns 4mb |L2.1397| " + - "L2.1398[150000,150979] 253ns 100mb |L2.1398| " + - "L2.1399[150980,151958] 253ns 100mb |L2.1399| " + - "L2.1400[151959,151999] 253ns 4mb |L2.1400| " + - "L2.1401[152000,152978] 253ns 100mb |L2.1401| " + - "L2.1402[152979,153956] 253ns 100mb |L2.1402| " + - "L2.1403[153957,153999] 253ns 5mb |L2.1403| " + - "L2.1404[154000,154981] 259ns 100mb |L2.1404| " + - "L2.1405[154982,155962] 259ns 100mb |L2.1405| " + - "L2.1406[155963,155999] 259ns 4mb |L2.1406| " + - "L2.1407[156000,156981] 259ns 100mb |L2.1407| " + - "L2.1408[156982,157962] 259ns 100mb |L2.1408| " + - "L2.1409[157963,157999] 259ns 4mb |L2.1409| " + - "L2.1410[158000,158981] 263ns 100mb |L2.1410| " + - "L2.1411[158982,159962] 263ns 100mb |L2.1411| " + - "L2.1412[159963,159999] 263ns 4mb |L2.1412| " + - "L2.1413[160000,160985] 263ns 100mb |L2.1413| " + - "L2.1414[160986,161970] 263ns 100mb |L2.1414| " + - "L2.1415[161971,161999] 263ns 3mb |L2.1415| " + - "L2.1416[162000,162982] 268ns 100mb |L2.1416| " + - "L2.1417[162983,163964] 268ns 100mb |L2.1417| " + - "L2.1418[163965,163999] 268ns 4mb |L2.1418| " + - "L2.1419[164000,164977] 268ns 100mb |L2.1419| " + - "L2.1420[164978,165954] 268ns 100mb |L2.1420| " + - "L2.1421[165955,165999] 268ns 5mb |L2.1421| " + - "L2.1422[166000,166977] 268ns 100mb |L2.1422| " + - "L2.1423[166978,167954] 268ns 100mb |L2.1423| " + - "L2.1424[167955,167999] 268ns 5mb |L2.1424| " + - "L2.1425[168000,168983] 272ns 100mb |L2.1425| " + - "L2.1426[168984,169966] 272ns 100mb |L2.1426| " + - "L2.1427[169967,169999] 272ns 3mb |L2.1427| " + - "L2.1428[170000,170984] 276ns 100mb |L2.1428| " + - "L2.1429[170985,171968] 276ns 100mb |L2.1429| " + - "L2.1430[171969,171999] 276ns 3mb |L2.1430| " + - "L2.1431[172000,172978] 276ns 100mb |L2.1431| " + - "L2.1432[172979,173956] 276ns 100mb |L2.1432| " + - "L2.1433[173957,173999] 276ns 5mb |L2.1433| " + - "L2.1434[174000,174978] 276ns 100mb |L2.1434| " + - "L2.1435[174979,175956] 276ns 100mb |L2.1435| " + - "L2.1436[175957,175999] 276ns 5mb |L2.1436| " + - "L2.1437[176000,176979] 281ns 100mb |L2.1437| " + - "L2.1438[176980,177958] 281ns 100mb |L2.1438| " + - "L2.1439[177959,177999] 281ns 4mb |L2.1439| " + - "L2.1440[178000,178980] 281ns 100mb |L2.1440| " + - "L2.1441[178981,179960] 281ns 100mb |L2.1441| " + - "L2.1442[179961,179999] 281ns 4mb |L2.1442| " + - "L2.1443[180000,180980] 281ns 100mb |L2.1443|" + - "L2.1444[180981,181960] 281ns 100mb |L2.1444|" + - "L2.1445[181961,181999] 281ns 4mb |L2.1445|" + - "L2.1446[182000,182981] 285ns 100mb |L2.1446|" + - "L2.1447[182982,183962] 285ns 100mb |L2.1447|" + - "L2.1448[183963,183999] 285ns 4mb |L2.1448|" + - "L2.1449[184000,184980] 285ns 100mb |L2.1449|" + - "L2.1450[184981,185960] 285ns 100mb |L2.1450|" + - "L2.1451[185961,185999] 285ns 4mb |L2.1451|" + - "L2.1452[186000,186981] 291ns 100mb |L2.1452|" + - "L2.1453[186982,187962] 291ns 100mb |L2.1453|" + - "L2.1454[187963,187999] 291ns 4mb |L2.1454|" + - "L2.1455[188000,188981] 291ns 100mb |L2.1455|" + - "L2.1456[188982,189962] 291ns 100mb |L2.1456|" + - "L2.1457[189963,189999] 291ns 4mb |L2.1457|" + - "L2.1458[190000,190980] 295ns 100mb |L2.1458|" + - "L2.1459[190981,191960] 295ns 100mb |L2.1459|" + - "L2.1460[191961,191999] 295ns 4mb |L2.1460|" + - "L2.1461[192000,192981] 295ns 100mb |L2.1461|" + - "L2.1462[192982,193962] 295ns 100mb |L2.1462|" + - "L2.1463[193963,193999] 295ns 4mb |L2.1463|" + - "L2.1464[194000,194980] 295ns 100mb |L2.1464|" + - "L2.1465[194981,195960] 295ns 100mb |L2.1465|" + - "L2.1466[195961,195999] 295ns 4mb |L2.1466|" + - "L2.1467[196000,196981] 299ns 100mb |L2.1467|" + - "L2.1468[196982,197962] 299ns 100mb |L2.1468|" + - "L2.1470[197963,198943] 299ns 100mb |L2.1470|" + - "L2.1471[198944,199923] 299ns 100mb |L2.1471|" + - "L2.1472[199924,200000] 299ns 8mb |L2.1472|" "### ); }
adc5c2bf069fdcf56e3a84213df147758f15f4d8
Carol (Nichols || Goulding)
2023-01-09 13:39:44
Add a gRPC API to the catalog service to get Parquet files by namespace
Tests that write line protocol (that may contain writes to multiple tables) need to be able to see when new Parquet files are saved.
null
feat: Add a gRPC API to the catalog service to get Parquet files by namespace Tests that write line protocol (that may contain writes to multiple tables) need to be able to see when new Parquet files are saved.
diff --git a/generated_types/protos/influxdata/iox/catalog/v1/service.proto b/generated_types/protos/influxdata/iox/catalog/v1/service.proto index 4cd65b466b..c232479b80 100644 --- a/generated_types/protos/influxdata/iox/catalog/v1/service.proto +++ b/generated_types/protos/influxdata/iox/catalog/v1/service.proto @@ -13,6 +13,9 @@ service CatalogService { // Get the parquet_file catalog records in the given namespace and table name rpc GetParquetFilesByNamespaceTable(GetParquetFilesByNamespaceTableRequest) returns (GetParquetFilesByNamespaceTableResponse); + + // Get the parquet_file catalog records in the given namespace + rpc GetParquetFilesByNamespace(GetParquetFilesByNamespaceRequest) returns (GetParquetFilesByNamespaceResponse); } message GetParquetFilesByPartitionIdRequest { @@ -63,3 +66,13 @@ message GetParquetFilesByNamespaceTableResponse { // the parquet_file records in the table in the namespace repeated ParquetFile parquet_files = 1; } + +message GetParquetFilesByNamespaceRequest { + // the namespace name + string namespace_name = 1; +} + +message GetParquetFilesByNamespaceResponse { + // the parquet_file records in the namespace + repeated ParquetFile parquet_files = 1; +} diff --git a/influxdb_iox_client/src/client/catalog.rs b/influxdb_iox_client/src/client/catalog.rs index dece931e1c..d4ea865c70 100644 --- a/influxdb_iox_client/src/client/catalog.rs +++ b/influxdb_iox_client/src/client/catalog.rs @@ -66,4 +66,17 @@ impl Client { Ok(response.into_inner().parquet_files) } + + /// Get the Parquet file records by their namespace + pub async fn get_parquet_files_by_namespace( + &mut self, + namespace_name: String, + ) -> Result<Vec<ParquetFile>, Error> { + let response = self + .inner + .get_parquet_files_by_namespace(GetParquetFilesByNamespaceRequest { namespace_name }) + .await?; + + Ok(response.into_inner().parquet_files) + } } diff --git a/service_grpc_catalog/src/lib.rs b/service_grpc_catalog/src/lib.rs index 9ba75c563d..d8a2e44aac 100644 --- a/service_grpc_catalog/src/lib.rs +++ b/service_grpc_catalog/src/lib.rs @@ -126,6 +126,42 @@ impl catalog_service_server::CatalogService for CatalogService { Ok(Response::new(response)) } + + async fn get_parquet_files_by_namespace( + &self, + request: Request<GetParquetFilesByNamespaceRequest>, + ) -> Result<Response<GetParquetFilesByNamespaceResponse>, Status> { + let mut repos = self.catalog.repositories().await; + let req = request.into_inner(); + + let namespace = repos + .namespaces() + .get_by_name(&req.namespace_name) + .await + .map_err(|e| Status::unknown(e.to_string()))? + .ok_or_else(|| { + Status::not_found(format!("Namespace {} not found", req.namespace_name)) + })?; + + let parquet_files = repos + .parquet_files() + .list_by_namespace_not_to_delete(namespace.id) + .await + .map_err(|e| { + warn!( + error=%e, + %req.namespace_name, + "failed to get parquet_files for namespace" + ); + Status::not_found(e.to_string()) + })?; + + let parquet_files: Vec<_> = parquet_files.into_iter().map(to_parquet_file).collect(); + + let response = GetParquetFilesByNamespaceResponse { parquet_files }; + + Ok(Response::new(response)) + } } // converts the catalog ParquetFile to protobuf
d10ad87f2c7aa4f34e0da3cd351a9b0afa9a8fdb
Trevor Hilton
2024-12-20 11:02:36
write metrics (#25692)
Added prometheus metrics to track lines written and bytes written per database. The write buffer does the tracking after validation of incoming line protocol. Tests added to verify.
null
feat: write metrics (#25692) Added prometheus metrics to track lines written and bytes written per database. The write buffer does the tracking after validation of incoming line protocol. Tests added to verify.
diff --git a/influxdb3/src/commands/serve.rs b/influxdb3/src/commands/serve.rs index 2f0511716d..bc1a433b6f 100644 --- a/influxdb3/src/commands/serve.rs +++ b/influxdb3/src/commands/serve.rs @@ -484,6 +484,7 @@ pub async fn command(config: Config) -> Result<()> { executor: Arc::clone(&exec), wal_config, parquet_cache, + metric_registry: Arc::clone(&metrics), }) .await .map_err(|e| Error::WriteBufferInit(e.into()))?; diff --git a/influxdb3_server/src/lib.rs b/influxdb3_server/src/lib.rs index 9f3f6a2946..bf99517402 100644 --- a/influxdb3_server/src/lib.rs +++ b/influxdb3_server/src/lib.rs @@ -815,6 +815,7 @@ mod tests { executor: Arc::clone(&exec), wal_config: WalConfig::test_config(), parquet_cache: Some(parquet_cache), + metric_registry: Arc::clone(&metrics), }, ) .await diff --git a/influxdb3_server/src/query_executor.rs b/influxdb3_server/src/query_executor.rs index ae12b1358c..1973685272 100644 --- a/influxdb3_server/src/query_executor.rs +++ b/influxdb3_server/src/query_executor.rs @@ -700,6 +700,7 @@ mod tests { snapshot_size: 1, }, parquet_cache: Some(parquet_cache), + metric_registry: Default::default(), }) .await .unwrap(); diff --git a/influxdb3_write/Cargo.toml b/influxdb3_write/Cargo.toml index 6c7fe29728..6d0fa9b45c 100644 --- a/influxdb3_write/Cargo.toml +++ b/influxdb3_write/Cargo.toml @@ -18,6 +18,7 @@ iox_catalog.workspace = true iox_http.workspace = true iox_query.workspace = true iox_time.workspace = true +metric.workspace = true parquet_file.workspace = true observability_deps.workspace = true schema.workspace = true @@ -71,7 +72,6 @@ optional = true # Core Crates arrow_util.workspace = true insta.workspace = true -metric.workspace = true pretty_assertions.workspace = true test_helpers.workspace = true test-log.workspace = true diff --git a/influxdb3_write/src/write_buffer/metrics.rs b/influxdb3_write/src/write_buffer/metrics.rs new file mode 100644 index 0000000000..a2d1863c1e --- /dev/null +++ b/influxdb3_write/src/write_buffer/metrics.rs @@ -0,0 +1,94 @@ +use std::borrow::Cow; + +use metric::{Metric, Registry, U64Counter}; + +#[derive(Debug)] +pub(super) struct WriteMetrics { + write_lines_total: Metric<U64Counter>, + write_bytes_total: Metric<U64Counter>, +} + +pub(super) const WRITE_LINES_TOTAL_NAME: &str = "influxdb3_write_lines_total"; +pub(super) const WRITE_BYTES_TOTAL_NAME: &str = "influxdb3_write_bytes_total"; + +impl WriteMetrics { + pub(super) fn new(metric_registry: &Registry) -> Self { + let write_lines_total = metric_registry.register_metric::<U64Counter>( + WRITE_LINES_TOTAL_NAME, + "track total number of lines written to the database", + ); + let write_bytes_total = metric_registry.register_metric::<U64Counter>( + WRITE_BYTES_TOTAL_NAME, + "track total number of bytes written to the database", + ); + Self { + write_lines_total, + write_bytes_total, + } + } + + pub(super) fn record_lines<D: Into<String>>(&self, db: D, lines: u64) { + let db: Cow<'static, str> = Cow::from(db.into()); + self.write_lines_total.recorder([("db", db)]).inc(lines); + } + + pub(super) fn record_bytes<D: Into<String>>(&self, db: D, bytes: u64) { + let db: Cow<'static, str> = Cow::from(db.into()); + self.write_bytes_total.recorder([("db", db)]).inc(bytes); + } +} + +#[cfg(test)] +mod tests { + use metric::{Attributes, Registry}; + + use super::WriteMetrics; + + #[test] + fn record_lines() { + let metric_registry = Registry::new(); + let metrics = WriteMetrics::new(&metric_registry); + metrics.record_lines("foo", 64); + metrics.record_lines(String::from("bar"), 256); + assert_eq!( + 64, + metrics + .write_lines_total + .get_observer(&Attributes::from(&[("db", "foo")])) + .unwrap() + .fetch() + ); + assert_eq!( + 256, + metrics + .write_lines_total + .get_observer(&Attributes::from(&[("db", "bar")])) + .unwrap() + .fetch() + ); + } + + #[test] + fn record_bytes() { + let metric_registry = Registry::new(); + let metrics = WriteMetrics::new(&metric_registry); + metrics.record_bytes("foo", 64); + metrics.record_bytes(String::from("bar"), 256); + assert_eq!( + 64, + metrics + .write_bytes_total + .get_observer(&Attributes::from(&[("db", "foo")])) + .unwrap() + .fetch() + ); + assert_eq!( + 256, + metrics + .write_bytes_total + .get_observer(&Attributes::from(&[("db", "bar")])) + .unwrap() + .fetch() + ); + } +} diff --git a/influxdb3_write/src/write_buffer/mod.rs b/influxdb3_write/src/write_buffer/mod.rs index b7223fedca..7fcc64e414 100644 --- a/influxdb3_write/src/write_buffer/mod.rs +++ b/influxdb3_write/src/write_buffer/mod.rs @@ -1,5 +1,6 @@ //! Implementation of an in-memory buffer for writes that persists data into a wal if it is configured. +mod metrics; pub mod persisted_files; #[allow(dead_code)] pub mod plugins; @@ -46,6 +47,8 @@ use influxdb3_wal::{DatabaseDefinition, FieldDefinition}; use iox_query::chunk_statistics::{create_chunk_statistics, NoColumnRanges}; use iox_query::QueryChunk; use iox_time::{Time, TimeProvider}; +use metric::Registry; +use metrics::WriteMetrics; use object_store::path::Path as ObjPath; use object_store::{ObjectMeta, ObjectStore}; use observability_deps::tracing::{debug, error}; @@ -154,6 +157,7 @@ pub struct WriteBufferImpl { wal_config: WalConfig, wal: Arc<dyn Wal>, time_provider: Arc<dyn TimeProvider>, + metrics: WriteMetrics, meta_cache: Arc<MetaCacheProvider>, last_cache: Arc<LastCacheProvider>, } @@ -171,6 +175,7 @@ pub struct WriteBufferImplArgs { pub executor: Arc<iox_query::exec::Executor>, pub wal_config: WalConfig, pub parquet_cache: Option<Arc<dyn ParquetCacheOracle>>, + pub metric_registry: Arc<Registry>, } impl WriteBufferImpl { @@ -184,6 +189,7 @@ impl WriteBufferImpl { executor, wal_config, parquet_cache, + metric_registry, }: WriteBufferImplArgs, ) -> Result<Arc<Self>> { // load snapshots and replay the wal into the in memory buffer @@ -241,6 +247,7 @@ impl WriteBufferImpl { last_cache, persisted_files, buffer: queryable_buffer, + metrics: WriteMetrics::new(&metric_registry), }); let write_buffer: Arc<dyn WriteBuffer> = result.clone(); let triggers = result.catalog().triggers(); @@ -295,6 +302,12 @@ impl WriteBufferImpl { // Thus, after this returns, the data is both durable and queryable. self.wal.write_ops(ops).await?; + // record metrics for lines written and bytes written, for valid lines + self.metrics + .record_lines(&db_name, result.line_count as u64); + self.metrics + .record_bytes(&db_name, result.valid_bytes_count); + Ok(BufferedWriteRequest { db_name, invalid_lines: result.errors, @@ -970,6 +983,8 @@ mod tests { use influxdb3_wal::{Gen1Duration, SnapshotSequenceNumber, WalFileSequenceNumber}; use iox_query::exec::IOxSessionContext; use iox_time::{MockProvider, Time}; + use metric::{Attributes, Metric, U64Counter}; + use metrics::{WRITE_BYTES_TOTAL_NAME, WRITE_LINES_TOTAL_NAME}; use object_store::local::LocalFileSystem; use object_store::memory::InMemory; use object_store::{ObjectStore, PutPayload}; @@ -1026,6 +1041,7 @@ mod tests { executor: crate::test_help::make_exec(), wal_config: WalConfig::test_config(), parquet_cache: Some(Arc::clone(&parquet_cache)), + metric_registry: Default::default(), }) .await .unwrap(); @@ -1109,6 +1125,7 @@ mod tests { snapshot_size: 100, }, parquet_cache: Some(Arc::clone(&parquet_cache)), + metric_registry: Default::default(), }) .await .unwrap(); @@ -1176,6 +1193,7 @@ mod tests { snapshot_size: 1, }, parquet_cache: wbuf.parquet_cache.clone(), + metric_registry: Default::default(), }) .await .unwrap() @@ -1402,6 +1420,7 @@ mod tests { snapshot_size: 2, }, parquet_cache: write_buffer.parquet_cache.clone(), + metric_registry: Default::default(), }) .await .unwrap(); @@ -2350,6 +2369,125 @@ mod tests { assert!(result.is_ok()); } + #[tokio::test] + async fn write_metrics() { + let object_store = Arc::new(InMemory::new()); + let (buf, metrics) = setup_with_metrics( + Time::from_timestamp_nanos(0), + object_store, + WalConfig::test_config(), + ) + .await; + let lines_observer = metrics + .get_instrument::<Metric<U64Counter>>(WRITE_LINES_TOTAL_NAME) + .unwrap(); + let bytes_observer = metrics + .get_instrument::<Metric<U64Counter>>(WRITE_BYTES_TOTAL_NAME) + .unwrap(); + + let db_1 = "foo"; + let db_2 = "bar"; + + // do a write and check the metrics: + let lp = "\ + cpu,region=us,host=a usage=10\n\ + cpu,region=eu,host=b usage=10\n\ + cpu,region=ca,host=c usage=10\n\ + "; + do_writes( + db_1, + buf.as_ref(), + &[TestWrite { + lp, + time_seconds: 1, + }], + ) + .await; + assert_eq!( + 3, + lines_observer + .get_observer(&Attributes::from(&[("db", db_1)])) + .unwrap() + .fetch() + ); + let mut bytes: usize = lp.lines().map(|l| l.len()).sum(); + assert_eq!( + bytes as u64, + bytes_observer + .get_observer(&Attributes::from(&[("db", db_1)])) + .unwrap() + .fetch() + ); + + // do another write to that db and check again for updates: + let lp = "\ + mem,region=us,host=a used=1,swap=4\n\ + mem,region=eu,host=b used=1,swap=4\n\ + mem,region=ca,host=c used=1,swap=4\n\ + "; + do_writes( + db_1, + buf.as_ref(), + &[TestWrite { + lp, + time_seconds: 1, + }], + ) + .await; + assert_eq!( + 6, + lines_observer + .get_observer(&Attributes::from(&[("db", db_1)])) + .unwrap() + .fetch() + ); + bytes += lp.lines().map(|l| l.len()).sum::<usize>(); + assert_eq!( + bytes as u64, + bytes_observer + .get_observer(&Attributes::from(&[("db", db_1)])) + .unwrap() + .fetch() + ); + + // now do a write that will only be partially accepted to ensure that + // the metrics are only calculated for writes that get accepted: + + // the legume will not be accepted, because it contains a new tag, + // so should not be included in metric calculations: + let lp = "\ + produce,type=fruit,name=banana price=1.50\n\ + produce,type=fruit,name=papaya price=5.50\n\ + produce,type=vegetable,name=lettuce price=1.00\n\ + produce,type=fruit,name=lentils,family=legume price=2.00\n\ + "; + do_writes_partial( + db_2, + buf.as_ref(), + &[TestWrite { + lp, + time_seconds: 1, + }], + ) + .await; + assert_eq!( + 3, + lines_observer + .get_observer(&Attributes::from(&[("db", db_2)])) + .unwrap() + .fetch() + ); + // only take first three (valid) lines to get expected bytes: + let bytes: usize = lp.lines().take(3).map(|l| l.len()).sum(); + assert_eq!( + bytes as u64, + bytes_observer + .get_observer(&Attributes::from(&[("db", db_2)])) + .unwrap() + .fetch() + ); + } + struct TestWrite<LP> { lp: LP, time_seconds: i64, @@ -2374,6 +2512,25 @@ mod tests { } } + async fn do_writes_partial<W: WriteBuffer, LP: AsRef<str>>( + db: &'static str, + buffer: &W, + writes: &[TestWrite<LP>], + ) { + for w in writes { + buffer + .write_lp( + NamespaceName::new(db).unwrap(), + w.lp.as_ref(), + Time::from_timestamp_nanos(w.time_seconds * 1_000_000_000), + true, + Precision::Nanosecond, + ) + .await + .unwrap(); + } + } + async fn verify_catalog_count(n: usize, object_store: Arc<dyn ObjectStore>) { let mut checks = 0; loop { @@ -2435,7 +2592,9 @@ mod tests { IOxSessionContext, Arc<dyn TimeProvider>, ) { - setup_cache_optional(start, object_store, wal_config, true).await + let (buf, ctx, time_provider, _metrics) = + setup_inner(start, object_store, wal_config, true).await; + (buf, ctx, time_provider) } async fn setup_cache_optional( @@ -2447,8 +2606,35 @@ mod tests { Arc<WriteBufferImpl>, IOxSessionContext, Arc<dyn TimeProvider>, + ) { + let (buf, ctx, time_provider, _metrics) = + setup_inner(start, object_store, wal_config, use_cache).await; + (buf, ctx, time_provider) + } + + async fn setup_with_metrics( + start: Time, + object_store: Arc<dyn ObjectStore>, + wal_config: WalConfig, + ) -> (Arc<WriteBufferImpl>, Arc<Registry>) { + let (buf, _ctx, _time_provider, metrics) = + setup_inner(start, object_store, wal_config, false).await; + (buf, metrics) + } + + async fn setup_inner( + start: Time, + object_store: Arc<dyn ObjectStore>, + wal_config: WalConfig, + use_cache: bool, + ) -> ( + Arc<WriteBufferImpl>, + IOxSessionContext, + Arc<dyn TimeProvider>, + Arc<Registry>, ) { let time_provider: Arc<dyn TimeProvider> = Arc::new(MockProvider::new(start)); + let metric_registry = Arc::new(Registry::new()); let (object_store, parquet_cache) = if use_cache { let (object_store, parquet_cache) = test_cached_obj_store_and_oracle( object_store, @@ -2474,13 +2660,14 @@ mod tests { executor: crate::test_help::make_exec(), wal_config, parquet_cache, + metric_registry: Arc::clone(&metric_registry), }) .await .unwrap(); let ctx = IOxSessionContext::with_testing(); let runtime_env = ctx.inner().runtime_env(); register_iox_object_store(runtime_env, "influxdb3", Arc::clone(&object_store)); - (wbuf, ctx, time_provider) + (wbuf, ctx, time_provider, metric_registry) } async fn get_table_batches( diff --git a/influxdb3_write/src/write_buffer/validator.rs b/influxdb3_write/src/write_buffer/validator.rs index 68c86559b9..9f331627c2 100644 --- a/influxdb3_write/src/write_buffer/validator.rs +++ b/influxdb3_write/src/write_buffer/validator.rs @@ -31,6 +31,7 @@ pub struct WithCatalog { pub struct LinesParsed { catalog: WithCatalog, lines: Vec<QualifiedLine>, + bytes: u64, catalog_batch: Option<CatalogBatch>, errors: Vec<WriteLineError>, } @@ -89,6 +90,7 @@ impl WriteValidator<WithCatalog> { let mut errors = vec![]; let mut lp_lines = lp.lines(); let mut lines = vec![]; + let mut bytes = 0; let mut catalog_updates = vec![]; let mut schema = Cow::Borrowed(self.state.db_schema.as_ref()); @@ -100,14 +102,16 @@ impl WriteValidator<WithCatalog> { error_message: e.to_string(), }) .and_then(|line| { + let raw_line = lp_lines.next().unwrap(); validate_and_qualify_v3_line( &mut schema, line_idx, line, - lp_lines.next().unwrap(), + raw_line, ingest_time, precision, ) + .inspect(|_| bytes += raw_line.len() as u64) }) { Ok((qualified_line, catalog_ops)) => (qualified_line, catalog_ops), Err(error) => { @@ -144,6 +148,7 @@ impl WriteValidator<WithCatalog> { state: LinesParsed { catalog: self.state, lines, + bytes, catalog_batch, errors, }, @@ -170,6 +175,7 @@ impl WriteValidator<WithCatalog> { let mut errors = vec![]; let mut lp_lines = lp.lines(); let mut lines = vec![]; + let mut bytes = 0; let mut catalog_updates = vec![]; let mut schema = Cow::Borrowed(self.state.db_schema.as_ref()); @@ -183,14 +189,9 @@ impl WriteValidator<WithCatalog> { error_message: e.to_string(), }) .and_then(|l| { - validate_and_qualify_v1_line( - &mut schema, - line_idx, - l, - lp_lines.next().unwrap(), - ingest_time, - precision, - ) + let raw_line = lp_lines.next().unwrap(); + validate_and_qualify_v1_line(&mut schema, line_idx, l, ingest_time, precision) + .inspect(|_| bytes += raw_line.len() as u64) }) { Ok((qualified_line, catalog_op)) => (qualified_line, catalog_op), Err(e) => { @@ -231,6 +232,7 @@ impl WriteValidator<WithCatalog> { catalog: self.state, lines, errors, + bytes, catalog_batch, }, }) @@ -504,7 +506,6 @@ fn validate_and_qualify_v1_line( db_schema: &mut Cow<'_, DatabaseSchema>, line_number: usize, line: ParsedLine, - _raw_line: &str, ingest_time: Time, precision: Precision, ) -> Result<(QualifiedLine, Option<CatalogOp>), WriteLineError> { @@ -710,6 +711,8 @@ fn validate_and_qualify_v1_line( pub struct ValidatedLines { /// Number of lines passed in pub(crate) line_count: usize, + /// Number of bytes of all valid lines written + pub(crate) valid_bytes_count: u64, /// Number of fields passed in pub(crate) field_count: usize, /// Number of index columns passed in, whether tags (v1) or series keys (v3) @@ -763,6 +766,7 @@ impl WriteValidator<LinesParsed> { ValidatedLines { line_count, + valid_bytes_count: self.state.bytes, field_count, index_count, errors: self.state.errors,
b1790760119b99f3f785201985d6a262ee1f8f2c
Fraser Savage
2023-09-18 14:11:26
Use separate attribute for ingester WAL replay failure reason
By separating the failure reason from the result of the WAL file replay metric, it keeps flexibility to include other failure modes in future.
null
refactor(ingester): Use separate attribute for ingester WAL replay failure reason By separating the failure reason from the result of the WAL file replay metric, it keeps flexibility to include other failure modes in future.
diff --git a/ingester/src/init/wal_replay.rs b/ingester/src/init/wal_replay.rs index e45c39be74..125f582b61 100644 --- a/ingester/src/init/wal_replay.rs +++ b/ingester/src/init/wal_replay.rs @@ -138,9 +138,9 @@ where "ingester_wal_replay_files_finished", "Number of WAL files that have been replayed", ); - let whole_file_count_metric = replayed_file_count_metric.recorder(&[("outcome", "whole")]); - let truncated_file_count_metric = - replayed_file_count_metric.recorder(&[("outcome", "truncated")]); + let file_count_success_metric = replayed_file_count_metric.recorder(&[("result", "success")]); + let file_count_error_truncated_metric = + replayed_file_count_metric.recorder(&[("result", "error"), ("reason", "truncated")]); let op_count_metric = metrics.register_metric::<U64Counter>( "ingester_wal_replay_ops", @@ -183,7 +183,7 @@ where let replay_result = replay_file(reader, sink, &ok_op_count_metric, &empty_op_count_metric) .await .map(|v| { - whole_file_count_metric.inc(1); + file_count_success_metric.inc(1); v }) .map_err(|e| { @@ -193,7 +193,7 @@ where { if io_err.kind() == std::io::ErrorKind::UnexpectedEof && file_number == n_files { max_sequence = max_sequence.max(*seq); - truncated_file_count_metric.inc(1); + file_count_error_truncated_metric.inc(1); warn!(%e, %file_id, "detected truncated WAL write, ending replay for file early"); return Ok(None); } @@ -620,14 +620,17 @@ mod tests { let whole_files = metrics .get_instrument::<Metric<U64Counter>>("ingester_wal_replay_files_finished") .expect("file counter not found") - .get_observer(&Attributes::from(&[("outcome", "whole")])) + .get_observer(&Attributes::from(&[("result", "success")])) .expect("attributes not found") .fetch(); assert_eq!(whole_files, 3); let truncated_files = metrics .get_instrument::<Metric<U64Counter>>("ingester_wal_replay_files_finished") .expect("file counter not found") - .get_observer(&Attributes::from(&[("outcome", "truncated")])) + .get_observer(&Attributes::from(&[ + ("result", "error"), + ("reason", "truncated"), + ])) .expect("attributes not found") .fetch(); assert_eq!(truncated_files, 0); @@ -800,14 +803,17 @@ mod tests { let whole_files = metrics .get_instrument::<Metric<U64Counter>>("ingester_wal_replay_files_finished") .expect("file counter not found") - .get_observer(&Attributes::from(&[("outcome", "whole")])) + .get_observer(&Attributes::from(&[("result", "success")])) .expect("attributes not found") .fetch(); assert_eq!(whole_files, 2); let truncated_files = metrics .get_instrument::<Metric<U64Counter>>("ingester_wal_replay_files_finished") .expect("file counter not found") - .get_observer(&Attributes::from(&[("outcome", "truncated")])) + .get_observer(&Attributes::from(&[ + ("result", "error"), + ("reason", "truncated"), + ])) .expect("attributes not found") .fetch(); assert_eq!(truncated_files, 1);
6aa6d924c65ba1ad88e7a40a3f4093f27991e2c3
Paul Dix
2024-08-05 18:08:11
wal skip persist and notify if empty buffer (#25211)
* fix: wal skip persist and notify if empty buffer This fixes the WAL so that it will skip persisting a file and notifying the file notifier if the wal buffer is empty. * fix: fix last cache persist test
null
fix: wal skip persist and notify if empty buffer (#25211) * fix: wal skip persist and notify if empty buffer This fixes the WAL so that it will skip persisting a file and notifying the file notifier if the wal buffer is empty. * fix: fix last cache persist test
diff --git a/influxdb3_wal/src/object_store.rs b/influxdb3_wal/src/object_store.rs index 0fad1fb01a..3a01331c23 100644 --- a/influxdb3_wal/src/object_store.rs +++ b/influxdb3_wal/src/object_store.rs @@ -10,7 +10,7 @@ use futures_util::stream::StreamExt; use hashbrown::HashMap; use object_store::path::Path; use object_store::{ObjectStore, PutPayload}; -use observability_deps::tracing::error; +use observability_deps::tracing::{debug, error, info}; use std::sync::Arc; use std::time::Duration; use tokio::sync::Mutex; @@ -170,17 +170,15 @@ impl WalObjectStore { SnapshotInfo, OwnedSemaphorePermit, )> { - let (wal_contents, responses, snapshot) = self - .flush_buffer - .lock() - .await - .flush_buffer_into_contents_and_responses() - .await; - - // don't persist a wal file if there's nothing there - if wal_contents.is_empty() { - return None; - } + let (wal_contents, responses, snapshot) = { + let mut flush_buffer = self.flush_buffer.lock().await; + if flush_buffer.wal_buffer.is_empty() { + return None; + } + flush_buffer + .flush_buffer_into_contents_and_responses() + .await + }; let wal_path = wal_path(wal_contents.wal_file_number); let data = crate::serialize::serialize_to_file_bytes(&wal_contents) @@ -226,6 +224,7 @@ impl WalObjectStore { // now that we've persisted this latest notify and start the snapshot, if set let snapshot_response = match wal_contents.snapshot { Some(snapshot_details) => { + info!(?snapshot_details, "snapshotting wal"); let snapshot_done = self .file_notifier .notify_and_snapshot(wal_contents, snapshot_details) @@ -235,6 +234,10 @@ impl WalObjectStore { Some((snapshot_done, snapshot_info, snapshot_permit)) } None => { + debug!( + "notify sent to buffer for wal file {}", + wal_contents.wal_file_number.get() + ); self.file_notifier.notify(wal_contents); None } @@ -437,6 +440,12 @@ struct WalBuffer { write_op_responses: Vec<oneshot::Sender<WriteResult>>, } +impl WalBuffer { + fn is_empty(&self) -> bool { + self.database_to_write_batch.is_empty() + } +} + // Writes should only fail if the underlying WAL throws an error. They are validated before they // are buffered. The WAL should continuously retry the write until it succeeds. But if a timeout // passes, we can use this to pass the object store error back to the client. @@ -912,6 +921,30 @@ mod tests { assert_eq!(*snapshot_details, file_3_contents.snapshot); } + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn flush_for_empty_buffer_skips_notify() { + let object_store: Arc<dyn ObjectStore> = Arc::new(InMemory::new()); + let notifier: Arc<dyn WalFileNotifier> = Arc::new(TestNotfiier::default()); + let wal_config = WalConfig { + max_write_buffer_size: 100, + flush_interval: Duration::from_secs(1), + snapshot_size: 2, + level_0_duration: Duration::from_nanos(10), + }; + let wal = WalObjectStore::new_without_replay( + Arc::clone(&object_store), + Arc::clone(&notifier), + wal_config, + ); + + assert!(wal.flush_buffer().await.is_none()); + let notifier = notifier.as_any().downcast_ref::<TestNotfiier>().unwrap(); + assert!(notifier.notified_writes.lock().is_empty()); + + // make sure no wal file was written + assert!(object_store.list(None).next().await.is_none()); + } + #[derive(Debug, Default)] struct TestNotfiier { notified_writes: parking_lot::Mutex<Vec<WalContents>>, diff --git a/influxdb3_write/src/write_buffer/mod.rs b/influxdb3_write/src/write_buffer/mod.rs index 08e4e1c84f..dff54620b1 100644 --- a/influxdb3_write/src/write_buffer/mod.rs +++ b/influxdb3_write/src/write_buffer/mod.rs @@ -717,19 +717,18 @@ mod tests { ) .await .unwrap(); - // Advance time to allow for persistence of segment data: - wbuf.time_provider - .set(Time::from_timestamp(800, 0).unwrap()); - let mut count = 0; - loop { - count += 1; - tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; - if !wbuf.persisted_files.get_files(db_name, tbl_name).is_empty() { - break; - } else if count > 9 { - panic!("not persisting"); - } - } + + // do another write, which will force a snapshot of the WAL and thus the persistence of + // the catalog + wbuf.write_lp( + NamespaceName::new(db_name).unwrap(), + format!("{tbl_name},t1=b f1=false").as_str(), + Time::from_timestamp(40, 0).unwrap(), + false, + Precision::Nanosecond, + ) + .await + .unwrap(); // Check the catalog again, to make sure it still has the last cache with the correct // configuration: let catalog_json = fetch_catalog_as_json(Arc::clone(&object_store)).await;
eba7eb748667333249e78ddca5119bf604621fba
Fraser Savage
2023-03-30 15:38:35
DRY error definitions for namespace commands
Unused redefinitions of Error were made in namespace commands. This commit removes those and consolidates error definition to the main namespace CLI module.
null
refactor(cli): DRY error definitions for namespace commands Unused redefinitions of Error were made in namespace commands. This commit removes those and consolidates error definition to the main namespace CLI module.
diff --git a/influxdb_iox/src/commands/namespace/create.rs b/influxdb_iox/src/commands/namespace/create.rs index 0046dde448..1e2111e082 100644 --- a/influxdb_iox/src/commands/namespace/create.rs +++ b/influxdb_iox/src/commands/namespace/create.rs @@ -1,15 +1,6 @@ use influxdb_iox_client::connection::Connection; -use thiserror::Error; -#[allow(clippy::enum_variant_names)] -#[derive(Debug, Error)] -pub enum Error { - #[error("JSON Serialization error: {0}")] - Serde(#[from] serde_json::Error), - - #[error("Client error: {0}")] - ClientError(#[from] influxdb_iox_client::error::Error), -} +use crate::commands::namespace::Result; /// Write data into the specified database #[derive(Debug, clap::Parser)] @@ -30,10 +21,7 @@ pub struct Config { retention_hours: u32, } -pub async fn command( - connection: Connection, - config: Config, -) -> Result<(), crate::commands::namespace::Error> { +pub async fn command(connection: Connection, config: Config) -> Result<()> { let Config { namespace, retention_hours, diff --git a/influxdb_iox/src/commands/namespace/delete.rs b/influxdb_iox/src/commands/namespace/delete.rs index ff1c015f05..93246beec2 100644 --- a/influxdb_iox/src/commands/namespace/delete.rs +++ b/influxdb_iox/src/commands/namespace/delete.rs @@ -1,15 +1,6 @@ use influxdb_iox_client::connection::Connection; -use thiserror::Error; -#[allow(clippy::enum_variant_names)] -#[derive(Debug, Error)] -pub enum Error { - #[error("JSON Serialization error: {0}")] - Serde(#[from] serde_json::Error), - - #[error("Client error: {0}")] - ClientError(#[from] influxdb_iox_client::error::Error), -} +use crate::commands::namespace::Result; #[derive(Debug, clap::Parser)] pub struct Config { @@ -18,10 +9,7 @@ pub struct Config { namespace: String, } -pub async fn command( - connection: Connection, - config: Config, -) -> Result<(), crate::commands::namespace::Error> { +pub async fn command(connection: Connection, config: Config) -> Result<()> { let Config { namespace } = config; let mut client = influxdb_iox_client::namespace::Client::new(connection); diff --git a/influxdb_iox/src/commands/namespace/retention.rs b/influxdb_iox/src/commands/namespace/retention.rs index a4a10ee2c2..d896d79bc6 100644 --- a/influxdb_iox/src/commands/namespace/retention.rs +++ b/influxdb_iox/src/commands/namespace/retention.rs @@ -1,15 +1,6 @@ use influxdb_iox_client::connection::Connection; -use thiserror::Error; -#[allow(clippy::enum_variant_names)] -#[derive(Debug, Error)] -pub enum Error { - #[error("JSON Serialization error: {0}")] - Serde(#[from] serde_json::Error), - - #[error("Client error: {0}")] - ClientError(#[from] influxdb_iox_client::error::Error), -} +use crate::commands::namespace::Result; /// Update the specified namespace's data retention period #[derive(Debug, clap::Parser)] @@ -24,10 +15,7 @@ pub struct Config { retention_hours: u32, } -pub async fn command( - connection: Connection, - config: Config, -) -> Result<(), crate::commands::namespace::Error> { +pub async fn command(connection: Connection, config: Config) -> Result<()> { let Config { namespace, retention_hours,
082e8db9ef65b645092d6651453207e50bd20f0e
Fraser Savage
2023-04-04 11:21:32
Make NamespaceCache an async_trait
In order to implement a read-through NamespaceCache decorator the `get_cache()` call will need to interact with async catalog methods, so this allows implementations to call await within the `get_cache()` body.
null
refactor(router): Make NamespaceCache an async_trait In order to implement a read-through NamespaceCache decorator the `get_cache()` call will need to interact with async catalog methods, so this allows implementations to call await within the `get_cache()` body.
diff --git a/ioxd_router/src/lib.rs b/ioxd_router/src/lib.rs index 68049799f7..a523208bcb 100644 --- a/ioxd_router/src/lib.rs +++ b/ioxd_router/src/lib.rs @@ -799,7 +799,10 @@ mod tests { .expect("pre-warming failed"); let name = NamespaceName::new("test_ns").unwrap(); - let got = cache.get_schema(&name).expect("should contain a schema"); + let got = cache + .get_schema(&name) + .await + .expect("should contain a schema"); assert!(got.tables.get("name").is_some()); } diff --git a/router/src/dml_handlers/retention_validation.rs b/router/src/dml_handlers/retention_validation.rs index 50f19abeca..48ae26da5b 100644 --- a/router/src/dml_handlers/retention_validation.rs +++ b/router/src/dml_handlers/retention_validation.rs @@ -77,7 +77,7 @@ where // Load the namespace schema from the cache, falling back to pulling it // from the global catalog (if it exists). - let schema = self.cache.get_schema(namespace); + let schema = self.cache.get_schema(namespace).await; let schema = match schema { Some(v) => v, None => { diff --git a/router/src/dml_handlers/schema_validation.rs b/router/src/dml_handlers/schema_validation.rs index 41b72603f4..a9807c5931 100644 --- a/router/src/dml_handlers/schema_validation.rs +++ b/router/src/dml_handlers/schema_validation.rs @@ -180,7 +180,7 @@ where // Load the namespace schema from the cache, falling back to pulling it // from the global catalog (if it exists). - let schema = self.cache.get_schema(namespace); + let schema = self.cache.get_schema(namespace).await; let schema = match schema { Some(v) => v, None => { @@ -785,7 +785,7 @@ mod tests { (catalog, namespace) } - fn assert_cache<C>(handler: &SchemaValidator<C>, table: &str, col: &str, want: ColumnType) + async fn assert_cache<C>(handler: &SchemaValidator<C>, table: &str, col: &str, want: ColumnType) where C: NamespaceCache, { @@ -793,6 +793,7 @@ mod tests { let ns = handler .cache .get_schema(&NAMESPACE) + .await .expect("cache should be populated"); let table = ns.tables.get(table).expect("table should exist in cache"); assert_eq!( @@ -826,10 +827,10 @@ mod tests { .expect("request should succeed"); // The cache should be populated. - assert_cache(&handler, "bananas", "tag1", ColumnType::Tag); - assert_cache(&handler, "bananas", "tag2", ColumnType::Tag); - assert_cache(&handler, "bananas", "val", ColumnType::I64); - assert_cache(&handler, "bananas", "time", ColumnType::Time); + assert_cache(&handler, "bananas", "tag1", ColumnType::Tag).await; + assert_cache(&handler, "bananas", "tag2", ColumnType::Tag).await; + assert_cache(&handler, "bananas", "val", ColumnType::I64).await; + assert_cache(&handler, "bananas", "time", ColumnType::Time).await; // Validate the table ID mapping. let (name, _data) = got.get(&want_id).expect("table not in output"); @@ -857,7 +858,7 @@ mod tests { assert_matches!(err, SchemaError::NamespaceLookup(_)); // The cache should not have retained the schema. - assert!(handler.cache.get_schema(&ns).is_none()); + assert!(handler.cache.get_schema(&ns).await.is_none()); } #[tokio::test] @@ -890,10 +891,10 @@ mod tests { }); // The cache should retain the original schema. - assert_cache(&handler, "bananas", "tag1", ColumnType::Tag); - assert_cache(&handler, "bananas", "tag2", ColumnType::Tag); - assert_cache(&handler, "bananas", "val", ColumnType::I64); // original type - assert_cache(&handler, "bananas", "time", ColumnType::Time); + assert_cache(&handler, "bananas", "tag1", ColumnType::Tag).await; + assert_cache(&handler, "bananas", "tag2", ColumnType::Tag).await; + assert_cache(&handler, "bananas", "val", ColumnType::I64).await; // original type + assert_cache(&handler, "bananas", "time", ColumnType::Time).await; assert_eq!(1, handler.schema_conflict.fetch()); } @@ -1031,6 +1032,6 @@ mod tests { .expect("request should succeed"); // Deletes have no effect on the cache. - assert!(handler.cache.get_schema(&ns).is_none()); + assert!(handler.cache.get_schema(&ns).await.is_none()); } } diff --git a/router/src/namespace_cache.rs b/router/src/namespace_cache.rs index bcee97db63..f552a588b5 100644 --- a/router/src/namespace_cache.rs +++ b/router/src/namespace_cache.rs @@ -10,12 +10,14 @@ pub mod metrics; use std::{fmt::Debug, sync::Arc}; +use async_trait::async_trait; use data_types::{NamespaceName, NamespaceSchema}; /// An abstract cache of [`NamespaceSchema`]. +#[async_trait] pub trait NamespaceCache: Debug + Send + Sync { /// Return the [`NamespaceSchema`] for `namespace`. - fn get_schema(&self, namespace: &NamespaceName<'_>) -> Option<Arc<NamespaceSchema>>; + async fn get_schema(&self, namespace: &NamespaceName<'_>) -> Option<Arc<NamespaceSchema>>; /// Place `schema` in the cache, unconditionally overwriting any existing /// [`NamespaceSchema`] mapped to `namespace`, returning diff --git a/router/src/namespace_cache/memory.rs b/router/src/namespace_cache/memory.rs index 1baeed6a4d..8671f80f39 100644 --- a/router/src/namespace_cache/memory.rs +++ b/router/src/namespace_cache/memory.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use async_trait::async_trait; use data_types::{NamespaceName, NamespaceSchema}; use hashbrown::HashMap; use parking_lot::RwLock; @@ -13,8 +14,9 @@ pub struct MemoryNamespaceCache { cache: RwLock<HashMap<NamespaceName<'static>, Arc<NamespaceSchema>>>, } +#[async_trait] impl NamespaceCache for Arc<MemoryNamespaceCache> { - fn get_schema(&self, namespace: &NamespaceName<'_>) -> Option<Arc<NamespaceSchema>> { + async fn get_schema(&self, namespace: &NamespaceName<'_>) -> Option<Arc<NamespaceSchema>> { self.cache.read().get(namespace).map(Arc::clone) } @@ -33,12 +35,12 @@ mod tests { use super::*; - #[test] - fn test_put_get() { + #[tokio::test] + async fn test_put_get() { let ns = NamespaceName::new("test").expect("namespace name is valid"); let cache = Arc::new(MemoryNamespaceCache::default()); - assert!(cache.get_schema(&ns).is_none()); + assert!(cache.get_schema(&ns).await.is_none()); let schema1 = NamespaceSchema { id: NamespaceId::new(42), @@ -50,7 +52,10 @@ mod tests { retention_period_ns: Some(876), }; assert!(cache.put_schema(ns.clone(), schema1.clone()).is_none()); - assert_eq!(*cache.get_schema(&ns).expect("lookup failure"), schema1); + assert_eq!( + *cache.get_schema(&ns).await.expect("lookup failure"), + schema1 + ); let schema2 = NamespaceSchema { id: NamespaceId::new(2), @@ -68,6 +73,9 @@ mod tests { .expect("should have existing schema"), schema1 ); - assert_eq!(*cache.get_schema(&ns).expect("lookup failure"), schema2); + assert_eq!( + *cache.get_schema(&ns).await.expect("lookup failure"), + schema2 + ); } } diff --git a/router/src/namespace_cache/metrics.rs b/router/src/namespace_cache/metrics.rs index 015a8a34ec..4b8eecc7d1 100644 --- a/router/src/namespace_cache/metrics.rs +++ b/router/src/namespace_cache/metrics.rs @@ -2,6 +2,7 @@ use std::sync::Arc; +use async_trait::async_trait; use data_types::{NamespaceName, NamespaceSchema}; use iox_time::{SystemProvider, TimeProvider}; use metric::{DurationHistogram, Metric, U64Gauge}; @@ -69,14 +70,15 @@ impl<T> InstrumentedCache<T> { } } +#[async_trait] impl<T, P> NamespaceCache for Arc<InstrumentedCache<T, P>> where T: NamespaceCache, P: TimeProvider, { - fn get_schema(&self, namespace: &NamespaceName<'_>) -> Option<Arc<NamespaceSchema>> { + async fn get_schema(&self, namespace: &NamespaceName<'_>) -> Option<Arc<NamespaceSchema>> { let t = self.time_provider.now(); - let res = self.inner.get_schema(namespace); + let res = self.inner.get_schema(namespace).await; // Avoid exploding if time goes backwards - simply drop the measurement // if it happens. @@ -224,8 +226,8 @@ mod tests { ); } - #[test] - fn test_put() { + #[tokio::test] + async fn test_put() { let ns = NamespaceName::new("test").expect("namespace name is valid"); let registry = metric::Registry::default(); let cache = Arc::new(MemoryNamespaceCache::default()); @@ -376,7 +378,7 @@ mod tests { assert_eq!(cache.table_count.observe(), Observation::U64Gauge(5)); assert_eq!(cache.column_count.observe(), Observation::U64Gauge(42)); - let _got = cache.get_schema(&ns).expect("should exist"); + let _got = cache.get_schema(&ns).await.expect("should exist"); assert_histogram_hit( &registry, "namespace_cache_get_duration", diff --git a/router/src/namespace_cache/sharded_cache.rs b/router/src/namespace_cache/sharded_cache.rs index 8125e7aa92..e85f2b86f7 100644 --- a/router/src/namespace_cache/sharded_cache.rs +++ b/router/src/namespace_cache/sharded_cache.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use async_trait::async_trait; use data_types::{NamespaceName, NamespaceSchema}; use sharder::JumpHash; @@ -21,12 +22,13 @@ impl<T> ShardedCache<T> { } } +#[async_trait] impl<T> NamespaceCache for Arc<ShardedCache<T>> where T: NamespaceCache, { - fn get_schema(&self, namespace: &NamespaceName<'_>) -> Option<Arc<NamespaceSchema>> { - self.shards.hash(namespace).get_schema(namespace) + async fn get_schema(&self, namespace: &NamespaceName<'_>) -> Option<Arc<NamespaceSchema>> { + self.shards.hash(namespace).get_schema(namespace).await } fn put_schema( @@ -70,8 +72,8 @@ mod tests { } } - #[test] - fn test_stable_cache_sharding() { + #[tokio::test] + async fn test_stable_cache_sharding() { // The number of namespaces to test with. const N: usize = 100; @@ -92,7 +94,7 @@ mod tests { // The cache should be empty. for name in names.keys() { - assert!(cache.get_schema(name).is_none()); + assert!(cache.get_schema(name).await.is_none()); } // Populate the cache @@ -104,7 +106,7 @@ mod tests { // The mapping should be stable for (name, id) in names { let want = schema_with_id(id as _); - assert_eq!(cache.get_schema(&name), Some(Arc::new(want))); + assert_eq!(cache.get_schema(&name).await, Some(Arc::new(want))); } } } diff --git a/router/src/namespace_resolver.rs b/router/src/namespace_resolver.rs index ebab033ec6..b4c5364987 100644 --- a/router/src/namespace_resolver.rs +++ b/router/src/namespace_resolver.rs @@ -65,7 +65,7 @@ where ) -> Result<NamespaceId, Error> { // Load the namespace schema from the cache, falling back to pulling it // from the global catalog (if it exists). - match self.cache.get_schema(namespace) { + match self.cache.get_schema(namespace).await { Some(v) => Ok(v.id), None => { let mut repos = self.catalog.repositories().await; @@ -142,7 +142,7 @@ mod tests { .await .expect("lookup should succeed"); - assert!(cache.get_schema(&ns).is_some()); + assert!(cache.get_schema(&ns).await.is_some()); // The cache hit should mean the catalog SHOULD NOT see a create request // for the namespace. @@ -186,7 +186,7 @@ mod tests { .expect("lookup should succeed"); // The cache should be populated as a result of the lookup. - assert!(cache.get_schema(&ns).is_some()); + assert!(cache.get_schema(&ns).await.is_some()); } #[tokio::test] @@ -226,7 +226,7 @@ mod tests { ); // The cache should NOT be populated as a result of the lookup. - assert!(cache.get_schema(&ns).is_none()); + assert!(cache.get_schema(&ns).await.is_none()); } #[tokio::test] @@ -245,6 +245,6 @@ mod tests { .expect_err("lookup should error"); assert_matches!(err, Error::Lookup(_)); - assert!(cache.get_schema(&ns).is_none()); + assert!(cache.get_schema(&ns).await.is_none()); } } diff --git a/router/src/namespace_resolver/ns_autocreation.rs b/router/src/namespace_resolver/ns_autocreation.rs index 4ab0e40e15..8595109242 100644 --- a/router/src/namespace_resolver/ns_autocreation.rs +++ b/router/src/namespace_resolver/ns_autocreation.rs @@ -88,7 +88,7 @@ where &self, namespace: &NamespaceName<'static>, ) -> Result<NamespaceId, super::Error> { - if self.cache.get_schema(namespace).is_none() { + if self.cache.get_schema(namespace).await.is_none() { trace!(%namespace, "namespace not found in cache"); match self.action {
7c7e5d77df5dfc5d4be8cd3a3adf409b5877d6eb
Trevor Hilton
2025-01-02 16:59:32
remove table_name clause requirement for parquet files system table (#25733)
Updated the tests to check for correct behaviour on queries to the system.parquet_files table.
null
feat: remove table_name clause requirement for parquet files system table (#25733) Updated the tests to check for correct behaviour on queries to the system.parquet_files table.
diff --git a/influxdb3_server/src/query_executor/mod.rs b/influxdb3_server/src/query_executor/mod.rs index ee30f8f96b..cc53c10c2e 100644 --- a/influxdb3_server/src/query_executor/mod.rs +++ b/influxdb3_server/src/query_executor/mod.rs @@ -620,7 +620,7 @@ mod tests { use arrow::array::RecordBatch; use data_types::NamespaceName; - use datafusion::{assert_batches_sorted_eq, error::DataFusionError}; + use datafusion::assert_batches_sorted_eq; use futures::TryStreamExt; use influxdb3_cache::{ last_cache::LastCacheProvider, meta_cache::MetaCacheProvider, @@ -641,11 +641,7 @@ mod tests { use object_store::{local::LocalFileSystem, ObjectStore}; use parquet_file::storage::{ParquetStorage, StorageId}; - use crate::{ - query_executor::QueryExecutorImpl, - system_tables::{table_name_predicate_error, PARQUET_FILES_TABLE_NAME}, - QueryExecutor, - }; + use crate::{query_executor::QueryExecutorImpl, QueryExecutor}; use super::CreateQueryExecutorArgs; @@ -768,7 +764,10 @@ mod tests { let test_cases = [ TestCase { - query: "SELECT table_name, size_bytes, row_count, min_time, max_time FROM system.parquet_files WHERE table_name = 'cpu'", + query: "\ + SELECT table_name, size_bytes, row_count, min_time, max_time \ + FROM system.parquet_files \ + WHERE table_name = 'cpu'", expected: &[ "+------------+------------+-----------+----------+----------+", "| table_name | size_bytes | row_count | min_time | max_time |", @@ -781,7 +780,10 @@ mod tests { ], }, TestCase { - query: "SELECT table_name, size_bytes, row_count, min_time, max_time FROM system.parquet_files WHERE table_name = 'mem'", + query: "\ + SELECT table_name, size_bytes, row_count, min_time, max_time \ + FROM system.parquet_files \ + WHERE table_name = 'mem'", expected: &[ "+------------+------------+-----------+----------+----------+", "| table_name | size_bytes | row_count | min_time | max_time |", @@ -793,6 +795,43 @@ mod tests { "+------------+------------+-----------+----------+----------+", ], }, + TestCase { + query: "\ + SELECT table_name, size_bytes, row_count, min_time, max_time \ + FROM system.parquet_files", + expected: &[ + "+------------+------------+-----------+----------+----------+", + "| table_name | size_bytes | row_count | min_time | max_time |", + "+------------+------------+-----------+----------+----------+", + "| cpu | 1956 | 2 | 0 | 10 |", + "| cpu | 1956 | 2 | 20 | 30 |", + "| cpu | 1956 | 2 | 40 | 50 |", + "| cpu | 1956 | 2 | 60 | 70 |", + "| mem | 1956 | 2 | 0 | 10 |", + "| mem | 1956 | 2 | 20 | 30 |", + "| mem | 1956 | 2 | 40 | 50 |", + "| mem | 1956 | 2 | 60 | 70 |", + "+------------+------------+-----------+----------+----------+", + ], + }, + TestCase { + query: "\ + SELECT table_name, size_bytes, row_count, min_time, max_time \ + FROM system.parquet_files \ + LIMIT 6", + expected: &[ + "+------------+------------+-----------+----------+----------+", + "| table_name | size_bytes | row_count | min_time | max_time |", + "+------------+------------+-----------+----------+----------+", + "| cpu | 1956 | 2 | 0 | 10 |", + "| cpu | 1956 | 2 | 20 | 30 |", + "| cpu | 1956 | 2 | 40 | 50 |", + "| cpu | 1956 | 2 | 60 | 70 |", + "| mem | 1956 | 2 | 40 | 50 |", + "| mem | 1956 | 2 | 60 | 70 |", + "+------------+------------+-----------+----------+----------+", + ], + }, ]; for t in test_cases { @@ -804,36 +843,4 @@ mod tests { assert_batches_sorted_eq!(t.expected, &batches); } } - - #[tokio::test] - async fn system_parquet_files_predicate_error() { - let (write_buffer, query_executor, time_provider) = setup().await; - // make some writes, so that we have a database that we can query against: - let db_name = "test_db"; - let _ = write_buffer - .write_lp( - NamespaceName::new(db_name).unwrap(), - "cpu,host=a,region=us-east usage=0.1 1", - Time::from_timestamp_nanos(0), - false, - influxdb3_write::Precision::Nanosecond, - ) - .await - .unwrap(); - - // Bump time to trick the persister into persisting to parquet: - time_provider.set(Time::from_timestamp(60 * 10, 0).unwrap()); - - // query without the `WHERE table_name =` clause to trigger the error: - let query = "SELECT * FROM system.parquet_files"; - let stream = query_executor - .query(db_name, query, None, crate::QueryKind::Sql, None, None) - .await - .unwrap(); - let error: DataFusionError = stream.try_collect::<Vec<RecordBatch>>().await.unwrap_err(); - assert_eq!( - error.message(), - table_name_predicate_error(PARQUET_FILES_TABLE_NAME).message() - ); - } } diff --git a/influxdb3_server/src/system_tables/mod.rs b/influxdb3_server/src/system_tables/mod.rs index 37ca1af576..3b59bf6d29 100644 --- a/influxdb3_server/src/system_tables/mod.rs +++ b/influxdb3_server/src/system_tables/mod.rs @@ -171,7 +171,7 @@ impl SchemaProvider for AllSystemSchemaTablesProvider { /// ```sql /// SELECT * FROM system.parquet_files WHERE table_name = 'foo' /// ``` -pub(crate) fn find_table_name_in_filter(filters: Option<Vec<Expr>>) -> Option<String> { +pub(crate) fn find_table_name_in_filter(filters: Option<Vec<Expr>>) -> Option<Arc<str>> { filters.map(|all_filters| { all_filters.iter().find_map(|f| match f { Expr::BinaryExpr(BinaryExpr { left, op, right }) => { @@ -181,7 +181,7 @@ pub(crate) fn find_table_name_in_filter(filters: Option<Vec<Expr>>) -> Option<St ScalarValue::Utf8(Some(s)) | ScalarValue::LargeUtf8(Some(s)) | ScalarValue::Utf8View(Some(s)), - ) => Some(s.to_owned()), + ) => Some(s.as_str().into()), _ => None, } } else { @@ -192,10 +192,3 @@ pub(crate) fn find_table_name_in_filter(filters: Option<Vec<Expr>>) -> Option<St }) })? } - -pub(crate) fn table_name_predicate_error(table_name: &str) -> DataFusionError { - DataFusionError::Plan(format!( - "must provide a {TABLE_NAME_PREDICATE} = '<table_name>' predicate in queries to \ - {SYSTEM_SCHEMA_NAME}.{table_name}" - )) -} diff --git a/influxdb3_server/src/system_tables/parquet_files.rs b/influxdb3_server/src/system_tables/parquet_files.rs index d925bde34c..0d210de081 100644 --- a/influxdb3_server/src/system_tables/parquet_files.rs +++ b/influxdb3_server/src/system_tables/parquet_files.rs @@ -8,9 +8,7 @@ use influxdb3_id::DbId; use influxdb3_write::{ParquetFile, WriteBuffer}; use iox_system_tables::IoxSystemTable; -use crate::system_tables::{find_table_name_in_filter, table_name_predicate_error}; - -use super::PARQUET_FILES_TABLE_NAME; +use crate::system_tables::find_table_name_in_filter; #[derive(Debug)] pub(super) struct ParquetFilesTable { @@ -50,68 +48,88 @@ impl IoxSystemTable for ParquetFilesTable { async fn scan( &self, filters: Option<Vec<Expr>>, - _limit: Option<usize>, + limit: Option<usize>, ) -> Result<RecordBatch, DataFusionError> { let schema = self.schema(); + let limit = limit.unwrap_or(usize::MAX); // extract `table_name` from filters - let table_name = find_table_name_in_filter(filters) - .ok_or_else(|| table_name_predicate_error(PARQUET_FILES_TABLE_NAME))?; + let table_name = find_table_name_in_filter(filters); - let parquet_files: Vec<ParquetFile> = self.buffer.parquet_files( - self.db_id, - self.buffer + let parquet_files = if let Some(table_name) = table_name { + let table_id = self + .buffer .catalog() .db_schema_by_id(&self.db_id) .expect("db exists") - .table_name_to_id(table_name.as_str()) - .expect("table exists"), - ); + .table_name_to_id(Arc::clone(&table_name)) + .expect("table exists"); + self.buffer + .parquet_files(self.db_id, table_id) + .into_iter() + .map(|file| (Arc::clone(&table_name), file)) + .collect() + } else { + self.buffer + .catalog() + .list_db_schema() + .iter() + .flat_map(|db| db.tables()) + .flat_map(|table_def| { + self.buffer + .parquet_files(self.db_id, table_def.table_id) + .into_iter() + .map(move |file| (Arc::clone(&table_def.table_name), file)) + }) + .take(limit) + .collect() + }; - from_parquet_files(&table_name, schema, parquet_files) + from_parquet_files(schema, parquet_files) } } +/// Produce a record batch listing parquet file information based on the given `schema` and +/// `parquet_files`, a list of table name and parquet file pairs. fn from_parquet_files( - table_name: &str, schema: SchemaRef, - parquet_files: Vec<ParquetFile>, + parquet_files: Vec<(Arc<str>, ParquetFile)>, ) -> Result<RecordBatch, DataFusionError> { let columns: Vec<ArrayRef> = vec![ Arc::new( - vec![table_name; parquet_files.len()] + parquet_files .iter() - .map(|s| Some(s.to_string())) + .map(|(table_name, _)| Some(table_name)) .collect::<StringArray>(), ), Arc::new( parquet_files .iter() - .map(|f| Some(f.path.to_string())) + .map(|(_, f)| Some(f.path.to_string())) .collect::<StringArray>(), ), Arc::new( parquet_files .iter() - .map(|f| Some(f.size_bytes)) + .map(|(_, f)| Some(f.size_bytes)) .collect::<UInt64Array>(), ), Arc::new( parquet_files .iter() - .map(|f| Some(f.row_count)) + .map(|(_, f)| Some(f.row_count)) .collect::<UInt64Array>(), ), Arc::new( parquet_files .iter() - .map(|f| Some(f.min_time)) + .map(|(_, f)| Some(f.min_time)) .collect::<Int64Array>(), ), Arc::new( parquet_files .iter() - .map(|f| Some(f.max_time)) + .map(|(_, f)| Some(f.max_time)) .collect::<Int64Array>(), ), ];
fdffa871c33e75b64f9baddb617e60e4c7d2b298
Carol (Nichols || Goulding)
2023-09-06 09:33:25
Optionally specify a table name to get just its schema (#8650)
Rather than always having to request all of a namespace's schema then filtering to the one you want. Will make this more consistent with upserting schema by namespace+table. Fixes #4997.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
feat: Optionally specify a table name to get just its schema (#8650) Rather than always having to request all of a namespace's schema then filtering to the one you want. Will make this more consistent with upserting schema by namespace+table. Fixes #4997. Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/generated_types/protos/influxdata/iox/schema/v1/service.proto b/generated_types/protos/influxdata/iox/schema/v1/service.proto index a028132295..127a8aee1d 100644 --- a/generated_types/protos/influxdata/iox/schema/v1/service.proto +++ b/generated_types/protos/influxdata/iox/schema/v1/service.proto @@ -3,13 +3,17 @@ package influxdata.iox.schema.v1; option go_package = "github.com/influxdata/iox/schema/v1"; service SchemaService { - // Get the schema for a namespace + // Get the schema for a namespace and, optionally, a table within that namespace rpc GetSchema(GetSchemaRequest) returns (GetSchemaResponse); } message GetSchemaRequest { // The namespace for which to fetch the schema string namespace = 1; + + // If specified, the table in the namespace for which to fetch the schema. If not specified, the + // schemas for all tables in this namespace will be returned. + optional string table = 2; } message GetSchemaResponse { diff --git a/influxdb_iox/src/commands/debug/schema.rs b/influxdb_iox/src/commands/debug/schema.rs index f1cd112bc6..aeb47d8b42 100644 --- a/influxdb_iox/src/commands/debug/schema.rs +++ b/influxdb_iox/src/commands/debug/schema.rs @@ -39,7 +39,7 @@ pub async fn command(connection: Connection, config: Config) -> Result<(), Error match config.command { Command::Get(command) => { let mut client = schema::Client::new(connection); - let schema = client.get_schema(&command.namespace).await?; + let schema = client.get_schema(&command.namespace, None).await?; println!("{}", serde_json::to_string_pretty(&schema)?); } // Deliberately not adding _ => so the compiler will direct people here to impl new // commands diff --git a/influxdb_iox/src/commands/debug/wal/regenerate_lp.rs b/influxdb_iox/src/commands/debug/wal/regenerate_lp.rs index 8f0f747264..4120843a76 100644 --- a/influxdb_iox/src/commands/debug/wal/regenerate_lp.rs +++ b/influxdb_iox/src/commands/debug/wal/regenerate_lp.rs @@ -65,7 +65,7 @@ impl TableIndexFetcher { let ns_schema = self .schema_client .clone() - .get_schema(namespace_name) + .get_schema(namespace_name, None) .await .map_err(Box::new)?; @@ -74,7 +74,13 @@ impl TableIndexFetcher { .into_iter() .map(|(table_name, table_schema)| { let table_id = TableId::new(table_schema.id); - debug!(%table_name, %table_id, %namespace_id, %namespace_name, "discovered ID to name mapping for table in namespace"); + debug!( + %table_name, + %table_id, + %namespace_id, + %namespace_name, + "discovered ID to name mapping for table in namespace" + ); (table_id, table_name) }) .collect()) diff --git a/influxdb_iox/tests/end_to_end_cases/schema.rs b/influxdb_iox/tests/end_to_end_cases/schema.rs index f89f1d4b5c..9d201b49f3 100644 --- a/influxdb_iox/tests/end_to_end_cases/schema.rs +++ b/influxdb_iox/tests/end_to_end_cases/schema.rs @@ -52,7 +52,7 @@ impl SchemaTest { ); let response = client - .get_schema(state.cluster().namespace()) + .get_schema(state.cluster().namespace(), Some(cloned_self.table_name)) .await .expect("successful response"); diff --git a/influxdb_iox_client/src/client/schema.rs b/influxdb_iox_client/src/client/schema.rs index 8f9e7bcd8b..cdfd040af5 100644 --- a/influxdb_iox_client/src/client/schema.rs +++ b/influxdb_iox_client/src/client/schema.rs @@ -24,12 +24,17 @@ impl Client { } } - /// Get the schema for a namespace. - pub async fn get_schema(&mut self, namespace: &str) -> Result<NamespaceSchema, Error> { + /// Get the schema for a namespace and, optionally, one table within that namespace. + pub async fn get_schema( + &mut self, + namespace: &str, + table: Option<&str>, + ) -> Result<NamespaceSchema, Error> { let response = self .inner .get_schema(GetSchemaRequest { namespace: namespace.to_string(), + table: table.map(ToString::to_string), }) .await?; diff --git a/iox_catalog/src/interface.rs b/iox_catalog/src/interface.rs index ffdc963b3e..24aeb49eed 100644 --- a/iox_catalog/src/interface.rs +++ b/iox_catalog/src/interface.rs @@ -79,6 +79,9 @@ pub enum Error { #[snafu(display("table {} not found", id))] TableNotFound { id: TableId }, + #[snafu(display("table {} not found", name))] + TableNotFoundByName { name: String }, + #[snafu(display("partition {} not found", id))] PartitionNotFound { id: TransitionPartitionId }, @@ -599,6 +602,44 @@ where Ok(namespace) } +/// Gets the schema for one particular table in a namespace. +pub async fn get_schema_by_namespace_and_table<R>( + name: &str, + table_name: &str, + repos: &mut R, + deleted: SoftDeletedRows, +) -> Result<NamespaceSchema> +where + R: RepoCollection + ?Sized, +{ + let namespace = repos + .namespaces() + .get_by_name(name, deleted) + .await? + .context(NamespaceNotFoundByNameSnafu { name })?; + + let table = repos + .tables() + .get_by_namespace_and_name(namespace.id, table_name) + .await? + .context(TableNotFoundByNameSnafu { + name: table_name.to_string(), + })?; + let mut table_schema = TableSchema::new_empty_from(&table); + + let columns = repos.columns().list_by_table_id(table.id).await?; + for c in columns { + table_schema.add_column(c); + } + + let mut namespace = NamespaceSchema::new_empty_from(&namespace); + namespace + .tables + .insert(table_name.to_string(), table_schema); + + Ok(namespace) +} + /// Gets all the table's columns. pub async fn get_table_columns_by_id<R>(id: TableId, repos: &mut R) -> Result<ColumnsByName> where diff --git a/service_grpc_schema/src/lib.rs b/service_grpc_schema/src/lib.rs index 612a6da6a4..89c6b0bac4 100644 --- a/service_grpc_schema/src/lib.rs +++ b/service_grpc_schema/src/lib.rs @@ -19,7 +19,9 @@ use workspace_hack as _; use std::{ops::DerefMut, sync::Arc}; use generated_types::influxdata::iox::schema::v1::*; -use iox_catalog::interface::{get_schema_by_name, Catalog, SoftDeletedRows}; +use iox_catalog::interface::{ + get_schema_by_name, get_schema_by_namespace_and_table, Catalog, SoftDeletedRows, +}; use observability_deps::tracing::warn; use tonic::{Request, Response, Status}; @@ -45,17 +47,32 @@ impl schema_service_server::SchemaService for SchemaService { let mut repos = self.catalog.repositories().await; let req = request.into_inner(); - let schema = get_schema_by_name( - &req.namespace, - repos.deref_mut(), - SoftDeletedRows::ExcludeDeleted, - ) - .await + + let schema = match req.table { + Some(table_name) => { + get_schema_by_namespace_and_table( + &req.namespace, + &table_name, + repos.deref_mut(), + SoftDeletedRows::ExcludeDeleted, + ) + .await + } + None => { + get_schema_by_name( + &req.namespace, + repos.deref_mut(), + SoftDeletedRows::ExcludeDeleted, + ) + .await + } + } .map_err(|e| { warn!(error=%e, %req.namespace, "failed to retrieve namespace schema"); Status::not_found(e.to_string()) }) .map(Arc::new)?; + Ok(Response::new(schema_to_proto(schema))) } } @@ -104,6 +121,7 @@ mod tests { test_helpers::{arbitrary_namespace, arbitrary_table}, }; use std::sync::Arc; + use tonic::Code; #[tokio::test] async fn test_schema() { @@ -113,40 +131,91 @@ mod tests { let catalog = Arc::new(MemCatalog::new(metrics)); let mut repos = catalog.repositories().await; let namespace = arbitrary_namespace(&mut *repos, "namespace_schema_test").await; + let table = arbitrary_table(&mut *repos, "schema_test_table", &namespace).await; repos .columns() .create_or_get("schema_test_column", table.id, ColumnType::Tag) .await .unwrap(); + + let another_table = + arbitrary_table(&mut *repos, "another_schema_test_table", &namespace).await; + repos + .columns() + .create_or_get( + "another_schema_test_column", + another_table.id, + ColumnType::Tag, + ) + .await + .unwrap(); Arc::clone(&catalog) }; // create grpc schema service let grpc = super::SchemaService::new(catalog); + + // request all tables for a namespace let request = GetSchemaRequest { namespace: "namespace_schema_test".to_string(), + table: None, }; - let tonic_response = grpc .get_schema(Request::new(request)) .await .expect("rpc request should succeed"); let response = tonic_response.into_inner(); let schema = response.schema.expect("schema should be Some()"); + let mut table_names: Vec<_> = schema.tables.keys().collect(); + table_names.sort(); assert_eq!( - schema.tables.keys().collect::<Vec<&String>>(), - vec![&"schema_test_table".to_string()] + table_names, + ["another_schema_test_table", "schema_test_table"] ); assert_eq!( schema .tables - .get(&"schema_test_table".to_string()) + .get("schema_test_table") .expect("test table should exist") .columns .keys() - .collect::<Vec<&String>>(), - vec![&"schema_test_column".to_string()] + .collect::<Vec<_>>(), + ["schema_test_column"] ); + + // request one table for a namespace + let request = GetSchemaRequest { + namespace: "namespace_schema_test".to_string(), + table: Some("schema_test_table".to_string()), + }; + let tonic_response = grpc + .get_schema(Request::new(request)) + .await + .expect("rpc request should succeed"); + let response = tonic_response.into_inner(); + let schema = response.schema.expect("schema should be Some()"); + let mut table_names: Vec<_> = schema.tables.keys().collect(); + table_names.sort(); + assert_eq!(table_names, ["schema_test_table"]); + assert_eq!( + schema + .tables + .get("schema_test_table") + .expect("test table should exist") + .columns + .keys() + .collect::<Vec<_>>(), + ["schema_test_column"] + ); + + // request a nonexistent table for a namespace, which fails + let request = GetSchemaRequest { + namespace: "namespace_schema_test".to_string(), + table: Some("does_not_exist".to_string()), + }; + let tonic_status = grpc.get_schema(Request::new(request)).await.unwrap_err(); + assert_eq!(tonic_status.code(), Code::NotFound); + assert_eq!(tonic_status.message(), "table does_not_exist not found"); } } diff --git a/test_helpers_end_to_end/src/mini_cluster.rs b/test_helpers_end_to_end/src/mini_cluster.rs index 1863f2c8cd..5ab9833bc2 100644 --- a/test_helpers_end_to_end/src/mini_cluster.rs +++ b/test_helpers_end_to_end/src/mini_cluster.rs @@ -399,6 +399,7 @@ impl MiniCluster { let id = SchemaServiceClient::new(c) .get_schema(GetSchemaRequest { namespace: self.namespace().to_string(), + table: None, }) .await .expect("failed to query for namespace ID") @@ -424,6 +425,7 @@ impl MiniCluster { let id = SchemaServiceClient::new(c) .get_schema(GetSchemaRequest { namespace: self.namespace().to_string(), + table: Some(name.to_string()), }) .await .expect("failed to query for namespace ID") @@ -456,6 +458,7 @@ impl MiniCluster { let table_id = SchemaServiceClient::new(c.clone()) .get_schema(GetSchemaRequest { namespace: namespace_name.clone(), + table: Some(table_name.to_string()), }) .await .expect("failed to query for namespace ID")
dcba47ab5833adeb70ef0e5637d24e3a88fbdb70
Marco Neumann
2023-02-08 10:32:21
allow the compactor to process all known partitions (#6887)
* feat: `PartitionRepo::list_ids` * refactor: `CatalogPartitionsSource` => `CatalogToCompactPartitionsSource` * feat: allow the compactor to process all known partitions Closes #6648. * docs: improve Co-authored-by: Andrew Lamb <[email protected]> ---------
Co-authored-by: Andrew Lamb <[email protected]>
feat: allow the compactor to process all known partitions (#6887) * feat: `PartitionRepo::list_ids` * refactor: `CatalogPartitionsSource` => `CatalogToCompactPartitionsSource` * feat: allow the compactor to process all known partitions Closes #6648. * docs: improve Co-authored-by: Andrew Lamb <[email protected]> --------- Co-authored-by: Andrew Lamb <[email protected]>
diff --git a/clap_blocks/src/compactor2.rs b/clap_blocks/src/compactor2.rs index 5e1df449dd..be62aefe87 100644 --- a/clap_blocks/src/compactor2.rs +++ b/clap_blocks/src/compactor2.rs @@ -236,4 +236,12 @@ pub struct Compactor2Config { action )] pub process_once: bool, + + /// Compact all partitions found in the catalog, no matter if/when the received writes. + #[clap( + long = "compaction-process-all-partitions", + env = "INFLUXDB_IOX_COMPACTION_PROCESS_ALL_PARTITIONS", + action + )] + pub process_all_partitions: bool, } diff --git a/compactor2/src/components/combos/unique_partitions.rs b/compactor2/src/components/combos/unique_partitions.rs index fad8b6fdfe..f81dd36d68 100644 --- a/compactor2/src/components/combos/unique_partitions.rs +++ b/compactor2/src/components/combos/unique_partitions.rs @@ -33,7 +33,7 @@ use crate::components::{ /// /// | Step | Name | Type | Description | /// | ---- | --------------------- | ----------------------------------------------------------- | ----------- | -/// | 1 | **Actual source** | `inner_source`/`T1`/[`PartitionsSource`], wrapped | This is the actual source, e.g. a [catalog](crate::components::partitions_source::catalog::CatalogPartitionsSource) | +/// | 1 | **Actual source** | `inner_source`/`T1`/[`PartitionsSource`], wrapped | This is the actual source, e.g. a [catalog](crate::components::partitions_source::catalog_to_compact::CatalogToCompactPartitionsSource) | /// | 2 | **Unique IDs source** | [`UniquePartionsSourceWrapper`], wraps `inner_source`/`T1` | Outputs that [`PartitionId`]s from the `inner_source` but filters out partitions that have not yet reached the uniqueness sink (step 4) | /// | 3 | **Critical section** | -- | Here it is always ensured that a single [`PartitionId`] does NOT occur more than once. | /// | 4 | **Unique IDs sink** | [`UniquePartitionDoneSinkWrapper`], wraps `inner_sink`/`T2` | Observes incoming IDs and removes them from the filter applied in step 2. | diff --git a/compactor2/src/components/hardcoded.rs b/compactor2/src/components/hardcoded.rs index b406e18062..b340b8381e 100644 --- a/compactor2/src/components/hardcoded.rs +++ b/compactor2/src/components/hardcoded.rs @@ -12,7 +12,7 @@ use crate::{ namespaces_source::catalog::CatalogNamespacesSource, tables_source::catalog::CatalogTablesSource, }, - config::{AlgoVersion, Config}, + config::{AlgoVersion, Config, PartitionsSourceConfig}, error::ErrorKind, }; @@ -36,8 +36,7 @@ use super::{ target_level_upgrade_split::TargetLevelUpgradeSplit, FilesSplit, }, id_only_partition_filter::{ - and::AndIdOnlyPartitionFilter, by_id::ByIdPartitionFilter, shard::ShardPartitionFilter, - IdOnlyPartitionFilter, + and::AndIdOnlyPartitionFilter, shard::ShardPartitionFilter, IdOnlyPartitionFilter, }, level_exist::one_level::OneLevelExist, parquet_file_sink::{ @@ -66,9 +65,11 @@ use super::{ endless::EndlessPartititionStream, once::OncePartititionStream, PartitionStream, }, partitions_source::{ - catalog::CatalogPartitionsSource, filter::FilterPartitionsSourceWrapper, - logging::LoggingPartitionsSourceWrapper, metrics::MetricsPartitionsSourceWrapper, - mock::MockPartitionsSource, not_empty::NotEmptyPartitionsSourceWrapper, + catalog_all::CatalogAllPartitionsSource, + catalog_to_compact::CatalogToCompactPartitionsSource, + filter::FilterPartitionsSourceWrapper, logging::LoggingPartitionsSourceWrapper, + metrics::MetricsPartitionsSourceWrapper, mock::MockPartitionsSource, + not_empty::NotEmptyPartitionsSourceWrapper, randomize_order::RandomizeOrderPartitionsSourcesWrapper, PartitionsSource, }, round_split::all_now::AllNowRoundSplit, @@ -86,22 +87,25 @@ pub fn hardcoded_components(config: &Config) -> Arc<Components> { // TODO: partitions source: Implementing ID-based sharding / hash-partitioning so we can run multiple compactors in // parallel. This should be a wrapper around the existing partions source. - let partitions_source: Arc<dyn PartitionsSource> = if let Some(ids) = &config.partition_filter { - Arc::new(MockPartitionsSource::new(ids.iter().cloned().collect())) - } else { - Arc::new(CatalogPartitionsSource::new( + let partitions_source: Arc<dyn PartitionsSource> = match &config.partitions_source { + PartitionsSourceConfig::CatalogRecentWrites => { + Arc::new(CatalogToCompactPartitionsSource::new( + config.backoff_config.clone(), + Arc::clone(&config.catalog), + config.partition_threshold, + Arc::clone(&config.time_provider), + )) + } + PartitionsSourceConfig::CatalogAll => Arc::new(CatalogAllPartitionsSource::new( config.backoff_config.clone(), Arc::clone(&config.catalog), - config.partition_threshold, - Arc::clone(&config.time_provider), - )) + )), + PartitionsSourceConfig::Fixed(ids) => { + Arc::new(MockPartitionsSource::new(ids.iter().cloned().collect())) + } }; let mut id_only_partition_filters: Vec<Arc<dyn IdOnlyPartitionFilter>> = vec![]; - if let Some(ids) = &config.partition_filter { - // filter as early as possible, so we don't need any catalog lookups for the filtered partitions - id_only_partition_filters.push(Arc::new(ByIdPartitionFilter::new(ids.clone()))); - } if let Some(shard_config) = &config.shard_config { // add shard filter before performing any catalog IO id_only_partition_filters.push(Arc::new(ShardPartitionFilter::new( diff --git a/compactor2/src/components/id_only_partition_filter/by_id.rs b/compactor2/src/components/id_only_partition_filter/by_id.rs index 6258cdbc73..5dcffb351d 100644 --- a/compactor2/src/components/id_only_partition_filter/by_id.rs +++ b/compactor2/src/components/id_only_partition_filter/by_id.rs @@ -10,6 +10,7 @@ pub struct ByIdPartitionFilter { } impl ByIdPartitionFilter { + #[allow(dead_code)] // not used anywhere pub fn new(ids: HashSet<PartitionId>) -> Self { Self { ids } } diff --git a/compactor2/src/components/partitions_source/catalog_all.rs b/compactor2/src/components/partitions_source/catalog_all.rs new file mode 100644 index 0000000000..7222169ab8 --- /dev/null +++ b/compactor2/src/components/partitions_source/catalog_all.rs @@ -0,0 +1,47 @@ +use std::{fmt::Display, sync::Arc}; + +use async_trait::async_trait; +use backoff::{Backoff, BackoffConfig}; +use data_types::PartitionId; +use iox_catalog::interface::Catalog; + +use super::PartitionsSource; + +#[derive(Debug)] +/// Returns all partitions in the catalog, regardless of any other condition +pub struct CatalogAllPartitionsSource { + backoff_config: BackoffConfig, + catalog: Arc<dyn Catalog>, +} + +impl CatalogAllPartitionsSource { + pub fn new(backoff_config: BackoffConfig, catalog: Arc<dyn Catalog>) -> Self { + Self { + backoff_config, + catalog, + } + } +} + +impl Display for CatalogAllPartitionsSource { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "catalog_all") + } +} + +#[async_trait] +impl PartitionsSource for CatalogAllPartitionsSource { + async fn fetch(&self) -> Vec<PartitionId> { + Backoff::new(&self.backoff_config) + .retry_all_errors("list_ids", || async { + self.catalog + .repositories() + .await + .partitions() + .list_ids() + .await + }) + .await + .expect("retry forever") + } +} diff --git a/compactor2/src/components/partitions_source/catalog.rs b/compactor2/src/components/partitions_source/catalog_to_compact.rs similarity index 80% rename from compactor2/src/components/partitions_source/catalog.rs rename to compactor2/src/components/partitions_source/catalog_to_compact.rs index d22be27481..5ba3401d34 100644 --- a/compactor2/src/components/partitions_source/catalog.rs +++ b/compactor2/src/components/partitions_source/catalog_to_compact.rs @@ -9,14 +9,15 @@ use iox_time::TimeProvider; use super::PartitionsSource; #[derive(Debug)] -pub struct CatalogPartitionsSource { +/// Returns all partitions that had a new parquet file written more than `threshold` ago. +pub struct CatalogToCompactPartitionsSource { backoff_config: BackoffConfig, catalog: Arc<dyn Catalog>, threshold: Duration, time_provider: Arc<dyn TimeProvider>, } -impl CatalogPartitionsSource { +impl CatalogToCompactPartitionsSource { pub fn new( backoff_config: BackoffConfig, catalog: Arc<dyn Catalog>, @@ -32,14 +33,14 @@ impl CatalogPartitionsSource { } } -impl Display for CatalogPartitionsSource { +impl Display for CatalogToCompactPartitionsSource { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "catalog") + write!(f, "catalog_to_compact") } } #[async_trait] -impl PartitionsSource for CatalogPartitionsSource { +impl PartitionsSource for CatalogToCompactPartitionsSource { async fn fetch(&self) -> Vec<PartitionId> { let cutoff = self.time_provider.now() - self.threshold; diff --git a/compactor2/src/components/partitions_source/mod.rs b/compactor2/src/components/partitions_source/mod.rs index 0d2b5e20a6..6a9710620d 100644 --- a/compactor2/src/components/partitions_source/mod.rs +++ b/compactor2/src/components/partitions_source/mod.rs @@ -6,7 +6,8 @@ use std::{ use async_trait::async_trait; use data_types::PartitionId; -pub mod catalog; +pub mod catalog_all; +pub mod catalog_to_compact; pub mod filter; pub mod logging; pub mod metrics; diff --git a/compactor2/src/components/report.rs b/compactor2/src/components/report.rs index ca19eeac78..7d3caad99b 100644 --- a/compactor2/src/components/report.rs +++ b/compactor2/src/components/report.rs @@ -26,7 +26,7 @@ pub fn log_config(config: &Config) { percentage_max_file_size, split_percentage, partition_timeout, - partition_filter, + partitions_source, shadow_mode, ignore_partition_skip_marker, max_input_files_per_partition, @@ -63,7 +63,7 @@ pub fn log_config(config: &Config) { percentage_max_file_size, split_percentage, partition_timeout_secs=partition_timeout.as_secs_f32(), - partition_filter=?partition_filter.as_ref().map(|ids| ids.iter().map(|id| id.get()).collect::<Vec<_>>()), + %partitions_source, shadow_mode, ignore_partition_skip_marker, max_input_files_per_partition, diff --git a/compactor2/src/config.rs b/compactor2/src/config.rs index 91e0f83a36..d3707870f6 100644 --- a/compactor2/src/config.rs +++ b/compactor2/src/config.rs @@ -1,5 +1,5 @@ //! Config-related stuff. -use std::{collections::HashSet, num::NonZeroUsize, sync::Arc, time::Duration}; +use std::{collections::HashSet, fmt::Display, num::NonZeroUsize, sync::Arc, time::Duration}; use backoff::{Backoff, BackoffConfig}; use data_types::{PartitionId, ShardId, ShardIndex}; @@ -74,10 +74,8 @@ pub struct Config { /// Maximum duration of the per-partition compaction task. pub partition_timeout: Duration, - /// Filter partitions to the given set of IDs. - /// - /// This is mostly useful for debugging. - pub partition_filter: Option<HashSet<PartitionId>>, + /// Source of partitions to consider for comapction. + pub partitions_source: PartitionsSourceConfig, /// Shadow mode. /// @@ -191,3 +189,34 @@ pub enum AlgoVersion { /// NOT yet ready for production. TargetLevel, } + +/// Partitions source config. +#[derive(Debug, Clone)] +pub enum PartitionsSourceConfig { + /// Use the catalog to determine which partitions have recently received writes. + CatalogRecentWrites, + + /// Use all partitions from the catalog. + /// + /// This does NOT consider if/when a partition received any writes. + CatalogAll, + + /// Use a fixed set of partitions. + /// + /// This is mostly useful for debugging. + Fixed(HashSet<PartitionId>), +} + +impl Display for PartitionsSourceConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::CatalogRecentWrites => write!(f, "catalog_recent_writes"), + Self::CatalogAll => write!(f, "catalog_all"), + Self::Fixed(p_ids) => { + let mut p_ids = p_ids.iter().copied().collect::<Vec<_>>(); + p_ids.sort(); + write!(f, "fixed({p_ids:?})") + } + } + } +} diff --git a/compactor2/src/test_util.rs b/compactor2/src/test_util.rs index c95095d78c..3de3cf99b0 100644 --- a/compactor2/src/test_util.rs +++ b/compactor2/src/test_util.rs @@ -28,7 +28,7 @@ use uuid::Uuid; use crate::{ components::namespaces_source::mock::NamespaceWrapper, - config::{AlgoVersion, Config}, + config::{AlgoVersion, Config, PartitionsSourceConfig}, partition_info::PartitionInfo, }; @@ -495,7 +495,7 @@ impl TestSetupBuilder { percentage_max_file_size: PERCENTAGE_MAX_FILE_SIZE, split_percentage: SPLIT_PERCENTAGE, partition_timeout: Duration::from_secs(3_600), - partition_filter: None, + partitions_source: PartitionsSourceConfig::CatalogRecentWrites, shadow_mode: self.shadow_mode, ignore_partition_skip_marker: false, max_input_files_per_partition: usize::MAX, diff --git a/influxdb_iox/src/commands/run/all_in_one.rs b/influxdb_iox/src/commands/run/all_in_one.rs index b07256893b..8cc9169de1 100644 --- a/influxdb_iox/src/commands/run/all_in_one.rs +++ b/influxdb_iox/src/commands/run/all_in_one.rs @@ -433,6 +433,7 @@ impl Config { compact_version: CompactorAlgoVersion::AllAtOnce, min_num_l1_files_to_compact: 1, process_once: false, + process_all_partitions: false, }; let querier_config = QuerierConfig { diff --git a/iox_catalog/src/interface.rs b/iox_catalog/src/interface.rs index 9e00ae8b51..edb1e08c98 100644 --- a/iox_catalog/src/interface.rs +++ b/iox_catalog/src/interface.rs @@ -459,6 +459,9 @@ pub trait PartitionRepo: Send + Sync { /// return the partitions by table id async fn list_by_table_id(&mut self, table_id: TableId) -> Result<Vec<Partition>>; + /// return all partitions IDs + async fn list_ids(&mut self) -> Result<Vec<PartitionId>>; + /// Update the sort key for the partition, setting it to `new_sort_key` iff /// the current value matches `old_sort_key`. /// @@ -955,6 +958,7 @@ pub(crate) mod test_helpers { }; use metric::{Attributes, DurationHistogram, Metric}; use std::{ + collections::BTreeSet, ops::{Add, DerefMut}, sync::Arc, time::Duration, @@ -1645,6 +1649,16 @@ pub(crate) mod test_helpers { created.insert(other_partition.id, other_partition.clone()); assert_eq!(created, listed); + let listed = repos + .partitions() + .list_ids() + .await + .expect("failed to list partitions") + .into_iter() + .collect::<BTreeSet<_>>(); + + assert_eq!(created.keys().copied().collect::<BTreeSet<_>>(), listed); + // test list_by_namespace let namespace2 = repos .namespaces() diff --git a/iox_catalog/src/mem.rs b/iox_catalog/src/mem.rs index d3aed1fb5c..2fa3b9fe4e 100644 --- a/iox_catalog/src/mem.rs +++ b/iox_catalog/src/mem.rs @@ -873,6 +873,14 @@ impl PartitionRepo for MemTxn { Ok(partitions) } + async fn list_ids(&mut self) -> Result<Vec<PartitionId>> { + let stage = self.stage(); + + let partitions: Vec<_> = stage.partitions.iter().map(|p| p.id).collect(); + + Ok(partitions) + } + async fn cas_sort_key( &mut self, partition_id: PartitionId, diff --git a/iox_catalog/src/metrics.rs b/iox_catalog/src/metrics.rs index f39797467f..462def83d9 100644 --- a/iox_catalog/src/metrics.rs +++ b/iox_catalog/src/metrics.rs @@ -246,6 +246,7 @@ decorate!( "partition_list_by_shard" = list_by_shard(&mut self, shard_id: ShardId) -> Result<Vec<Partition>>; "partition_list_by_namespace" = list_by_namespace(&mut self, namespace_id: NamespaceId) -> Result<Vec<Partition>>; "partition_list_by_table_id" = list_by_table_id(&mut self, table_id: TableId) -> Result<Vec<Partition>>; + "partition_list_ids" = list_ids(&mut self) -> Result<Vec<PartitionId>>; "partition_update_sort_key" = cas_sort_key(&mut self, partition_id: PartitionId, old_sort_key: Option<Vec<String>>, new_sort_key: &[&str]) -> Result<Partition, CasFailure<Vec<String>>>; "partition_record_skipped_compaction" = record_skipped_compaction(&mut self, partition_id: PartitionId, reason: &str, num_files: usize, limit_num_files: usize, limit_num_files_first_in_partition: usize, estimated_bytes: u64, limit_bytes: u64) -> Result<()>; "partition_list_skipped_compactions" = list_skipped_compactions(&mut self) -> Result<Vec<SkippedCompaction>>; diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs index 604d1ded92..a35d2a2ae1 100644 --- a/iox_catalog/src/postgres.rs +++ b/iox_catalog/src/postgres.rs @@ -1287,6 +1287,18 @@ WHERE table_id = $1; .map_err(|e| Error::SqlxError { source: e }) } + async fn list_ids(&mut self) -> Result<Vec<PartitionId>> { + sqlx::query_as( + r#" + SELECT p.id as partition_id + FROM partition p + "#, + ) + .fetch_all(&mut self.inner) + .await + .map_err(|e| Error::SqlxError { source: e }) + } + /// Update the sort key for `partition_id` if and only if `old_sort_key` /// matches the current value in the database. /// diff --git a/iox_catalog/src/sqlite.rs b/iox_catalog/src/sqlite.rs index 3380c810c4..8123a99ed6 100644 --- a/iox_catalog/src/sqlite.rs +++ b/iox_catalog/src/sqlite.rs @@ -1114,6 +1114,18 @@ WHERE table_id = $1; .collect()) } + async fn list_ids(&mut self) -> Result<Vec<PartitionId>> { + sqlx::query_as( + r#" + SELECT p.id as partition_id + FROM partition p + "#, + ) + .fetch_all(self.inner.get_mut()) + .await + .map_err(|e| Error::SqlxError { source: e }) + } + /// Update the sort key for `partition_id` if and only if `old_sort_key` /// matches the current value in the database. /// diff --git a/ioxd_compactor2/src/lib.rs b/ioxd_compactor2/src/lib.rs index f5677bed60..0609ecedff 100644 --- a/ioxd_compactor2/src/lib.rs +++ b/ioxd_compactor2/src/lib.rs @@ -3,7 +3,7 @@ use backoff::BackoffConfig; use clap_blocks::compactor2::{Compactor2Config, CompactorAlgoVersion}; use compactor2::{ compactor::Compactor2, - config::{AlgoVersion, Config, ShardConfig}, + config::{AlgoVersion, Config, PartitionsSourceConfig, ShardConfig}, }; use data_types::{PartitionId, TRANSITION_SHARD_NUMBER}; use hyper::{Body, Request, Response}; @@ -163,6 +163,20 @@ pub async fn create_compactor2_server_type( CompactorAlgoVersion::TargetLevel => AlgoVersion::TargetLevel, }; + let partitions_source = match ( + compactor_config.partition_filter, + compactor_config.process_all_partitions, + ) { + (None, false) => PartitionsSourceConfig::CatalogRecentWrites, + (None, true) => PartitionsSourceConfig::CatalogAll, + (Some(ids), false) => { + PartitionsSourceConfig::Fixed(ids.into_iter().map(PartitionId::new).collect()) + } + (Some(_), true) => panic!( + "provided partition ID filter and specific 'process all', this does not make sense" + ), + }; + let compactor = Compactor2::start(Config { shard_id, metric_registry: Arc::clone(&metric_registry), @@ -183,9 +197,7 @@ pub async fn create_compactor2_server_type( percentage_max_file_size: compactor_config.percentage_max_file_size, split_percentage: compactor_config.split_percentage, partition_timeout: Duration::from_secs(compactor_config.partition_timeout_secs), - partition_filter: compactor_config - .partition_filter - .map(|parts| parts.into_iter().map(PartitionId::new).collect()), + partitions_source, shadow_mode: compactor_config.shadow_mode, ignore_partition_skip_marker: compactor_config.ignore_partition_skip_marker, max_input_files_per_partition: compactor_config.max_input_files_per_partition,
5c146317aab3ea77015959c68fbcb91db6d0e3b8
Michael Gattozzi
2024-06-13 13:56:39
Update Rust to 1.79.0 (#25061)
Fairly quiet update for us. The only change was around using the numeric constants now inbuilt into the primitives not the ones from `std` https://rust-lang.github.io/rust-clippy/master/index.html#/legacy_numeric_constants Release post: https://blog.rust-lang.org/2024/06/13/Rust-1.79.0.html
null
chore: Update Rust to 1.79.0 (#25061) Fairly quiet update for us. The only change was around using the numeric constants now inbuilt into the primitives not the ones from `std` https://rust-lang.github.io/rust-clippy/master/index.html#/legacy_numeric_constants Release post: https://blog.rust-lang.org/2024/06/13/Rust-1.79.0.html
diff --git a/influxdb3_write/src/write_buffer/mod.rs b/influxdb3_write/src/write_buffer/mod.rs index ec0ae4b586..e0135792bb 100644 --- a/influxdb3_write/src/write_buffer/mod.rs +++ b/influxdb3_write/src/write_buffer/mod.rs @@ -41,7 +41,6 @@ use sha2::Digest; use sha2::Sha256; use std::borrow::Cow; use std::collections::HashMap; -use std::i64; use std::sync::{Arc, OnceLock}; use thiserror::Error; use tokio::sync::watch; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 1ec3ae9268..50543c83aa 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.78.0" +channel = "1.79.0" components = ["rustfmt", "clippy", "rust-analyzer"]
95216055d8bbca2b36149f225fcf1de71c08b87b
Dom Dwyer
2022-11-25 15:35:39
stream BufferTree partition data
This commit implements the QueryExec trait for the BufferTree, allow it to be queried for the partition data it contains. With this change, the BufferTree now provides "read your writes" functionality. Notably the implementation streams the contents of individual partitions to the caller on demand (pull-based execution), deferring acquiring the partition lock until actually necessary and minimising the duration of time a strong reference to a specific RecordBatch is held in order to minimise the memory overhead.
During query execution a client sees a consistent snapshot of partitions: once a client begins streaming the query response, incoming writes that create new partitions do not become visible. However incoming writes to an existing partition that forms part of the snapshot set become visible iff they are ordered before the acquisition of the partition lock when streaming that partition data to the client.
perf(ingester2): stream BufferTree partition data This commit implements the QueryExec trait for the BufferTree, allow it to be queried for the partition data it contains. With this change, the BufferTree now provides "read your writes" functionality. Notably the implementation streams the contents of individual partitions to the caller on demand (pull-based execution), deferring acquiring the partition lock until actually necessary and minimising the duration of time a strong reference to a specific RecordBatch is held in order to minimise the memory overhead. During query execution a client sees a consistent snapshot of partitions: once a client begins streaming the query response, incoming writes that create new partitions do not become visible. However incoming writes to an existing partition that forms part of the snapshot set become visible iff they are ordered before the acquisition of the partition lock when streaming that partition data to the client.
diff --git a/ingester2/Cargo.toml b/ingester2/Cargo.toml index 98b26da5c0..87583b472f 100644 --- a/ingester2/Cargo.toml +++ b/ingester2/Cargo.toml @@ -14,6 +14,7 @@ backoff = { version = "0.1.0", path = "../backoff" } bytes = "1.3.0" data_types = { version = "0.1.0", path = "../data_types" } datafusion.workspace = true +datafusion_util = { path = "../datafusion_util" } dml = { version = "0.1.0", path = "../dml" } flatbuffers = "22" futures = "0.3.25" diff --git a/ingester2/src/buffer_tree/namespace.rs b/ingester2/src/buffer_tree/namespace.rs index c383835ae6..a73766a384 100644 --- a/ingester2/src/buffer_tree/namespace.rs +++ b/ingester2/src/buffer_tree/namespace.rs @@ -9,12 +9,18 @@ use data_types::{NamespaceId, TableId}; use dml::DmlOperation; use metric::U64Counter; use observability_deps::tracing::warn; +use trace::span::Span; use super::{ partition::resolver::PartitionProvider, table::{name_resolver::TableNameProvider, TableData}, }; -use crate::{arcmap::ArcMap, deferred_load::DeferredLoad, dml_sink::DmlSink}; +use crate::{ + arcmap::ArcMap, + deferred_load::DeferredLoad, + dml_sink::DmlSink, + query::{response::QueryResponse, tracing::QueryExecTracing, QueryError, QueryExec}, +}; /// The string name / identifier of a Namespace. /// @@ -106,11 +112,6 @@ impl NamespaceData { self.namespace_id } - #[cfg(test)] - pub(super) fn table_count(&self) -> &U64Counter { - &self.table_count - } - /// Returns the [`NamespaceName`] for this namespace. pub(crate) fn namespace_name(&self) -> &DeferredLoad<NamespaceName> { &self.namespace_name @@ -168,6 +169,37 @@ impl DmlSink for NamespaceData { } } +#[async_trait] +impl QueryExec for NamespaceData { + type Response = QueryResponse; + + async fn query_exec( + &self, + namespace_id: NamespaceId, + table_id: TableId, + columns: Vec<String>, + span: Option<Span>, + ) -> Result<Self::Response, QueryError> { + assert_eq!( + self.namespace_id, namespace_id, + "buffer tree index inconsistency" + ); + + // Extract the table if it exists. + let inner = self + .table(table_id) + .ok_or(QueryError::TableNotFound(namespace_id, table_id))?; + + // Delegate query execution to the namespace, wrapping the execution in + // a tracing delegate to emit a child span. + Ok(QueryResponse::new( + QueryExecTracing::new(inner, "table") + .query_exec(namespace_id, table_id, columns, span) + .await?, + )) + } +} + #[cfg(test)] mod tests { use std::{sync::Arc, time::Duration}; diff --git a/ingester2/src/buffer_tree/root.rs b/ingester2/src/buffer_tree/root.rs index 773a994ca6..076c196f8d 100644 --- a/ingester2/src/buffer_tree/root.rs +++ b/ingester2/src/buffer_tree/root.rs @@ -1,17 +1,71 @@ use std::sync::Arc; use async_trait::async_trait; -use data_types::NamespaceId; +use data_types::{NamespaceId, TableId}; use dml::DmlOperation; use metric::U64Counter; +use trace::span::Span; use super::{ namespace::{name_resolver::NamespaceNameProvider, NamespaceData}, partition::resolver::PartitionProvider, table::name_resolver::TableNameProvider, }; -use crate::{arcmap::ArcMap, dml_sink::DmlSink}; +use crate::{ + arcmap::ArcMap, + dml_sink::DmlSink, + query::{response::QueryResponse, tracing::QueryExecTracing, QueryError, QueryExec}, +}; +/// A [`BufferTree`] is the root of an in-memory tree of many [`NamespaceData`] +/// containing one or more child [`TableData`] nodes, which in turn contain one +/// or more [`PartitionData`] nodes: +/// +/// ```text +/// +/// ╔════════════════╗ +/// ║ BufferTree ║ +/// ╚═══════╦════════╝ +/// ▼ +/// ┌────────────┐ +/// │ Namespace ├┐ +/// └┬───────────┘├┐ +/// └┬───────────┘│ +/// └────┬───────┘ +/// ▼ +/// ┌────────────┐ +/// │ Table ├┐ +/// └┬───────────┘├┐ +/// └┬───────────┘│ +/// └────┬───────┘ +/// ▼ +/// ┌────────────┐ +/// │ Partition ├┐ +/// └┬───────────┘├┐ +/// └┬───────────┘│ +/// └────────────┘ +/// ``` +/// +/// A buffer tree is a mutable data structure that implements [`DmlSink`] to +/// apply successive [`DmlOperation`] to its internal state, and makes the +/// materialised result available through a streaming [`QueryExec`] execution. +/// +/// # Read Consistency +/// +/// When [`BufferTree::query_exec()`] is called for a given table, a snapshot of +/// the table's current set of partitions is created and the data within these +/// partitions will be streamed to the client as they consume the response. New +/// partitions that are created concurrently to the query execution do not ever +/// become visible. +/// +/// Concurrent writes during query execution to a partition that forms part of +/// this snapshot will be visible iff the write has been fully applied to the +/// partition's data buffer before the query stream reads the data from that +/// partition. Once a partition has been read, the data within it is immutable +/// from the caller's perspective, and subsequent writes DO NOT become visible. +/// +/// [`TableData`]: crate::buffer_tree::table::TableData +/// [`PartitionData`]: crate::buffer_tree::partition::PartitionData #[derive(Debug)] pub(crate) struct BufferTree { /// The resolver of `(table_id, partition_key)` to [`PartitionData`]. @@ -95,3 +149,701 @@ impl DmlSink for BufferTree { namespace_data.apply(op).await } } + +#[async_trait] +impl QueryExec for BufferTree { + type Response = QueryResponse; + + async fn query_exec( + &self, + namespace_id: NamespaceId, + table_id: TableId, + columns: Vec<String>, + span: Option<Span>, + ) -> Result<Self::Response, QueryError> { + // Extract the namespace if it exists. + let inner = self + .namespace(namespace_id) + .ok_or(QueryError::NamespaceNotFound(namespace_id))?; + + // Delegate query execution to the namespace, wrapping the execution in + // a tracing delegate to emit a child span. + QueryExecTracing::new(inner, "namespace") + .query_exec(namespace_id, table_id, columns, span) + .await + } +} + +#[cfg(test)] +mod tests { + use std::{sync::Arc, time::Duration}; + + use data_types::{PartitionId, PartitionKey}; + use datafusion::{assert_batches_eq, assert_batches_sorted_eq}; + use futures::{StreamExt, TryStreamExt}; + use metric::{Attributes, Metric}; + + use super::*; + use crate::{ + buffer_tree::{ + namespace::{name_resolver::mock::MockNamespaceNameProvider, NamespaceData}, + partition::{resolver::mock::MockPartitionProvider, PartitionData, SortKeyState}, + table::{name_resolver::mock::MockTableNameProvider, TableName}, + }, + deferred_load::{self, DeferredLoad}, + query::partition_response::PartitionResponse, + test_util::make_write_op, + }; + + const TABLE_ID: TableId = TableId::new(44); + const TABLE_NAME: &str = "bananas"; + const NAMESPACE_NAME: &str = "platanos"; + const NAMESPACE_ID: NamespaceId = NamespaceId::new(42); + + #[tokio::test] + async fn test_namespace_init_table() { + let metrics = Arc::new(metric::Registry::default()); + + // Configure the mock partition provider to return a partition for this + // table ID. + let partition_provider = Arc::new(MockPartitionProvider::default().with_partition( + PartitionData::new( + PartitionId::new(0), + PartitionKey::from("banana-split"), + NAMESPACE_ID, + TABLE_ID, + Arc::new(DeferredLoad::new(Duration::from_secs(1), async { + TableName::from(TABLE_NAME) + })), + SortKeyState::Provided(None), + None, + ), + )); + + // Init the namespace + let ns = NamespaceData::new( + NAMESPACE_ID, + DeferredLoad::new(Duration::from_millis(1), async { NAMESPACE_NAME.into() }), + Arc::new(MockTableNameProvider::new(TABLE_NAME)), + partition_provider, + &metrics, + ); + + // Assert the namespace name was stored + let name = ns.namespace_name().to_string(); + assert!( + (name == NAMESPACE_NAME) || (name == deferred_load::UNRESOLVED_DISPLAY_STRING), + "unexpected namespace name: {name}" + ); + + // Assert the namespace does not contain the test data + assert!(ns.table(TABLE_ID).is_none()); + + // Write some test data + ns.apply(DmlOperation::Write(make_write_op( + &PartitionKey::from("banana-split"), + NAMESPACE_ID, + TABLE_NAME, + TABLE_ID, + 0, + r#"bananas,city=Madrid day="sun",temp=55 22"#, + ))) + .await + .expect("buffer op should succeed"); + + // Referencing the table should succeed + assert!(ns.table(TABLE_ID).is_some()); + + // And the table counter metric should increase + let tables = metrics + .get_instrument::<Metric<U64Counter>>("ingester_tables") + .expect("failed to read metric") + .get_observer(&Attributes::from([])) + .expect("failed to get observer") + .fetch(); + assert_eq!(tables, 1); + + // Ensure the deferred namespace name is loaded. + let name = ns.namespace_name().get().await; + assert_eq!(&**name, NAMESPACE_NAME); + assert_eq!(ns.namespace_name().to_string(), NAMESPACE_NAME); + } + + /// Generate a test that performs a set of writes and assert the data within + /// the table with TABLE_ID in the namespace with NAMESPACE_ID. + macro_rules! test_write_query { + ( + $name:ident, + partitions = [$($partition:expr), +], // The set of PartitionData for the mock partition provider + writes = [$($write:expr), *], // The set of DmlWrite to apply() + want = $want:expr // The expected results of querying NAMESPACE_ID and TABLE_ID + ) => { + paste::paste! { + #[tokio::test] + async fn [<test_write_query_ $name>]() { + // Configure the mock partition provider with the provided + // partitions. + let partition_provider = Arc::new(MockPartitionProvider::default() + $( + .with_partition($partition) + )+ + ); + + // Init the buffer tree + let buf = BufferTree::new( + Arc::new(MockNamespaceNameProvider::default()), + Arc::new(MockTableNameProvider::new(TABLE_NAME)), + partition_provider, + Arc::new(metric::Registry::default()), + ); + + // Write the provided DmlWrites + $( + buf.apply(DmlOperation::Write($write)) + .await + .expect("failed to perform write"); + )* + + // Execute the query against NAMESPACE_ID and TABLE_ID + let batches = buf + .query_exec(NAMESPACE_ID, TABLE_ID, vec![], None) + .await + .expect("query should succeed") + .into_record_batches() + .try_collect::<Vec<_>>() + .await + .expect("query failed"); + + // Assert the contents of NAMESPACE_ID and TABLE_ID + assert_batches_sorted_eq!( + $want, + &batches + ); + } + } + }; + } + + // A simple "read your writes" test. + test_write_query!( + read_writes, + partitions = [PartitionData::new( + PartitionId::new(0), + PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_ID, + Arc::new(DeferredLoad::new(Duration::from_secs(1), async { + TableName::from(TABLE_NAME) + })), + SortKeyState::Provided(None), + None, + )], + writes = [make_write_op( + &PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_NAME, + TABLE_ID, + 0, + r#"bananas,region=Asturias temp=35 4242424242"#, + )], + want = [ + "+----------+------+-------------------------------+", + "| region | temp | time |", + "+----------+------+-------------------------------+", + "| Asturias | 35 | 1970-01-01T00:00:04.242424242 |", + "+----------+------+-------------------------------+", + ] + ); + + // A query that ensures the data across multiple partitions within a single + // table are returned. + test_write_query!( + multiple_partitions, + partitions = [ + PartitionData::new( + PartitionId::new(0), + PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_ID, + Arc::new(DeferredLoad::new(Duration::from_secs(1), async { + TableName::from(TABLE_NAME) + })), + SortKeyState::Provided(None), + None, + ), + PartitionData::new( + PartitionId::new(1), + PartitionKey::from("p2"), + NAMESPACE_ID, + TABLE_ID, + Arc::new(DeferredLoad::new(Duration::from_secs(1), async { + TableName::from(TABLE_NAME) + })), + SortKeyState::Provided(None), + None, + ) + ], + writes = [ + make_write_op( + &PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_NAME, + TABLE_ID, + 0, + r#"bananas,region=Madrid temp=35 4242424242"#, + ), + make_write_op( + &PartitionKey::from("p2"), + NAMESPACE_ID, + TABLE_NAME, + TABLE_ID, + 0, + r#"bananas,region=Asturias temp=25 4242424242"#, + ) + ], + want = [ + "+----------+------+-------------------------------+", + "| region | temp | time |", + "+----------+------+-------------------------------+", + "| Madrid | 35 | 1970-01-01T00:00:04.242424242 |", + "| Asturias | 25 | 1970-01-01T00:00:04.242424242 |", + "+----------+------+-------------------------------+", + ] + ); + + // A query that ensures the data across multiple namespaces is correctly + // filtered to return only the queried table. + test_write_query!( + filter_multiple_namespaces, + partitions = [ + PartitionData::new( + PartitionId::new(0), + PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_ID, + Arc::new(DeferredLoad::new(Duration::from_secs(1), async { + TableName::from(TABLE_NAME) + })), + SortKeyState::Provided(None), + None, + ), + PartitionData::new( + PartitionId::new(1), + PartitionKey::from("p2"), + NamespaceId::new(4321), // A different namespace ID. + TableId::new(1234), // A different table ID. + Arc::new(DeferredLoad::new(Duration::from_secs(1), async { + TableName::from(TABLE_NAME) + })), + SortKeyState::Provided(None), + None, + ) + ], + writes = [ + make_write_op( + &PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_NAME, + TABLE_ID, + 0, + r#"bananas,region=Madrid temp=25 4242424242"#, + ), + make_write_op( + &PartitionKey::from("p2"), + NamespaceId::new(4321), // A different namespace ID. + TABLE_NAME, + TableId::new(1234), // A different table ID + 0, + r#"bananas,region=Asturias temp=35 4242424242"#, + ) + ], + want = [ + "+--------+------+-------------------------------+", + "| region | temp | time |", + "+--------+------+-------------------------------+", + "| Madrid | 25 | 1970-01-01T00:00:04.242424242 |", + "+--------+------+-------------------------------+", + ] + ); + + // A query that ensures the data across multiple tables (with the same table + // name!) is correctly filtered to return only the queried table. + test_write_query!( + filter_multiple_tabls, + partitions = [ + PartitionData::new( + PartitionId::new(0), + PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_ID, + Arc::new(DeferredLoad::new(Duration::from_secs(1), async { + TableName::from(TABLE_NAME) + })), + SortKeyState::Provided(None), + None, + ), + PartitionData::new( + PartitionId::new(1), + PartitionKey::from("p2"), + NAMESPACE_ID, + TableId::new(1234), // A different table ID. + Arc::new(DeferredLoad::new(Duration::from_secs(1), async { + TableName::from(TABLE_NAME) + })), + SortKeyState::Provided(None), + None, + ) + ], + writes = [ + make_write_op( + &PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_NAME, + TABLE_ID, + 0, + r#"bananas,region=Madrid temp=25 4242424242"#, + ), + make_write_op( + &PartitionKey::from("p2"), + NAMESPACE_ID, + TABLE_NAME, + TableId::new(1234), // A different table ID + 0, + r#"bananas,region=Asturias temp=35 4242424242"#, + ) + ], + want = [ + "+--------+------+-------------------------------+", + "| region | temp | time |", + "+--------+------+-------------------------------+", + "| Madrid | 25 | 1970-01-01T00:00:04.242424242 |", + "+--------+------+-------------------------------+", + ] + ); + + // Assert that no dedupe operations are performed when querying a partition + // that contains duplicate rows for a single series/primary key, but the + // operations maintain their ordering (later writes appear after earlier + // writes). + test_write_query!( + duplicate_writes, + partitions = [PartitionData::new( + PartitionId::new(0), + PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_ID, + Arc::new(DeferredLoad::new(Duration::from_secs(1), async { + TableName::from(TABLE_NAME) + })), + SortKeyState::Provided(None), + None, + )], + writes = [ + make_write_op( + &PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_NAME, + TABLE_ID, + 0, + r#"bananas,region=Asturias temp=35 4242424242"#, + ), + make_write_op( + &PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_NAME, + TABLE_ID, + 1, + r#"bananas,region=Asturias temp=12 4242424242"#, + ) + ], + want = [ + "+----------+------+-------------------------------+", + "| region | temp | time |", + "+----------+------+-------------------------------+", + "| Asturias | 35 | 1970-01-01T00:00:04.242424242 |", + "| Asturias | 12 | 1970-01-01T00:00:04.242424242 |", + "+----------+------+-------------------------------+", + ] + ); + + /// Assert that multiple writes to a single namespace/table results in a + /// single namespace being created, and matching metrics. + #[tokio::test] + async fn test_metrics() { + // Configure the mock partition provider to return a single partition, named + // p1. + let partition_provider = Arc::new( + MockPartitionProvider::default() + .with_partition(PartitionData::new( + PartitionId::new(0), + PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_ID, + Arc::new(DeferredLoad::new(Duration::from_secs(1), async { + TableName::from(TABLE_NAME) + })), + SortKeyState::Provided(None), + None, + )) + .with_partition(PartitionData::new( + PartitionId::new(0), + PartitionKey::from("p2"), + NAMESPACE_ID, + TABLE_ID, + Arc::new(DeferredLoad::new(Duration::from_secs(1), async { + TableName::from(TABLE_NAME) + })), + SortKeyState::Provided(None), + None, + )), + ); + + let metrics = Arc::new(metric::Registry::default()); + + // Init the buffer tree + let buf = BufferTree::new( + Arc::new(MockNamespaceNameProvider::default()), + Arc::new(MockTableNameProvider::new(TABLE_NAME)), + partition_provider, + Arc::clone(&metrics), + ); + + // Write data to partition p1, in table "bananas". + buf.apply(DmlOperation::Write(make_write_op( + &PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_NAME, + TABLE_ID, + 0, + r#"bananas,region=Asturias temp=35 4242424242"#, + ))) + .await + .expect("failed to write initial data"); + + // Write a duplicate record with the same series key & timestamp, but a + // different temp value. + buf.apply(DmlOperation::Write(make_write_op( + &PartitionKey::from("p2"), + NAMESPACE_ID, + TABLE_NAME, + TABLE_ID, + 1, + r#"bananas,region=Asturias temp=12 4242424242"#, + ))) + .await + .expect("failed to overwrite data"); + + // Validate namespace count + assert_eq!(buf.namespaces.values().len(), 1); + let m = metrics + .get_instrument::<Metric<U64Counter>>("ingester_namespaces") + .expect("failed to read metric") + .get_observer(&Attributes::from(&[])) + .expect("failed to find metric with attributes") + .fetch(); + assert_eq!(m, 1, "namespace counter mismatch"); + + // Validate table count + let m = metrics + .get_instrument::<Metric<U64Counter>>("ingester_tables") + .expect("failed to read metric") + .get_observer(&Attributes::from(&[])) + .expect("failed to find metric with attributes") + .fetch(); + assert_eq!(m, 1, "tables counter mismatch"); + } + + /// Assert the correct "not found" errors are generated for missing + /// table/namespaces, and that querying an entirely empty buffer tree + /// returns no data (as opposed to panicking, etc). + #[tokio::test] + async fn test_not_found() { + let partition_provider = Arc::new(MockPartitionProvider::default().with_partition( + PartitionData::new( + PartitionId::new(0), + PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_ID, + Arc::new(DeferredLoad::new(Duration::from_secs(1), async { + TableName::from(TABLE_NAME) + })), + SortKeyState::Provided(None), + None, + ), + )); + + // Init the BufferTree + let buf = BufferTree::new( + Arc::new(MockNamespaceNameProvider::default()), + Arc::new(MockTableNameProvider::new(TABLE_NAME)), + partition_provider, + Arc::new(metric::Registry::default()), + ); + + // Query the empty tree + let err = buf + .query_exec(NAMESPACE_ID, TABLE_ID, vec![], None) + .await + .expect_err("query should fail"); + assert_matches::assert_matches!(err, QueryError::NamespaceNotFound(ns) => { + assert_eq!(ns, NAMESPACE_ID); + }); + + // Write data to partition p1, in table "bananas". + buf.apply(DmlOperation::Write(make_write_op( + &PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_NAME, + TABLE_ID, + 0, + r#"bananas,region=Asturias temp=35 4242424242"#, + ))) + .await + .expect("failed to write data"); + + // Ensure an unknown table errors + let err = buf + .query_exec(NAMESPACE_ID, TableId::new(1234), vec![], None) + .await + .expect_err("query should fail"); + assert_matches::assert_matches!(err, QueryError::TableNotFound(ns, t) => { + assert_eq!(ns, NAMESPACE_ID); + assert_eq!(t, TableId::new(1234)); + }); + + // Ensure a valid namespace / table does not error + buf.query_exec(NAMESPACE_ID, TABLE_ID, vec![], None) + .await + .expect("namespace / table should exist"); + } + + /// This test asserts the read consistency properties defined in the + /// [`BufferTree`] type docs. + /// + /// Specifically, this test ensures: + /// + /// * A read snapshot of the set of partitions is created during the + /// construction of the query stream. New partitions added (or existing + /// partitions removed) do not change the query results once the stream + /// has been initialised. + /// * Concurrent writes to partitions that form part of the read snapshot + /// become visible if they are ordered/applied before the acquisition of + /// the partition data by the query stream. Writes ordered after the + /// partition lock acquisition do not become readable. + /// + /// All writes use the same write timestamp as it is not a factor in + /// ordering of writes. + #[tokio::test] + async fn test_read_consistency() { + // Configure the mock partition provider to return two partitions, named + // p1 and p2. + let partition_provider = Arc::new( + MockPartitionProvider::default() + .with_partition(PartitionData::new( + PartitionId::new(0), + PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_ID, + Arc::new(DeferredLoad::new(Duration::from_secs(1), async { + TableName::from(TABLE_NAME) + })), + SortKeyState::Provided(None), + None, + )) + .with_partition(PartitionData::new( + PartitionId::new(1), + PartitionKey::from("p2"), + NAMESPACE_ID, + TABLE_ID, + Arc::new(DeferredLoad::new(Duration::from_secs(1), async { + TableName::from(TABLE_NAME) + })), + SortKeyState::Provided(None), + None, + )), + ); + + // Init the buffer tree + let buf = BufferTree::new( + Arc::new(MockNamespaceNameProvider::default()), + Arc::new(MockTableNameProvider::new(TABLE_NAME)), + partition_provider, + Arc::new(metric::Registry::default()), + ); + + // Write data to partition p1, in table "bananas". + buf.apply(DmlOperation::Write(make_write_op( + &PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_NAME, + TABLE_ID, + 0, + r#"bananas,region=Madrid temp=35 4242424242"#, + ))) + .await + .expect("failed to write initial data"); + + // Execute a query of the buffer tree, generating the result stream, but + // DO NOT consume it. + let stream = buf + .query_exec(NAMESPACE_ID, TABLE_ID, vec![], None) + .await + .expect("query should succeed") + .into_partition_stream(); + + // Perform a write concurrent to the consumption of the query stream + // that creates a new partition (p2) in the same table. + buf.apply(DmlOperation::Write(make_write_op( + &PartitionKey::from("p2"), + NAMESPACE_ID, + TABLE_NAME, + TABLE_ID, + 1, + r#"bananas,region=Asturias temp=20 4242424242"#, + ))) + .await + .expect("failed to perform concurrent write to new partition"); + + // Perform another write that hits the partition within the query + // results snapshot (p1) before the partition is read. + buf.apply(DmlOperation::Write(make_write_op( + &PartitionKey::from("p1"), + NAMESPACE_ID, + TABLE_NAME, + TABLE_ID, + 2, + r#"bananas,region=Murcia temp=30 4242424242"#, + ))) + .await + .expect("failed to perform concurrent write to existing partition"); + + // Consume the set of partitions within the query stream. + // + // Under the specified query consistency guarantees, both the first and + // third writes (both to p1) should be visible. The second write to p2 + // should not be visible. + let mut partitions: Vec<PartitionResponse> = stream.collect().await; + assert_eq!(partitions.len(), 1); // only p1, not p2 + let partition = partitions.pop().unwrap(); + + // Perform the partition read + let batches = + datafusion::physical_plan::common::collect(partition.into_record_batch_stream()) + .await + .expect("failed to collate query results"); + + // Assert the contents of p1 contains both the initial write, and the + // 3rd write in a single RecordBatch. + assert_batches_eq!( + [ + "+--------+------+-------------------------------+", + "| region | temp | time |", + "+--------+------+-------------------------------+", + "| Madrid | 35 | 1970-01-01T00:00:04.242424242 |", + "| Murcia | 30 | 1970-01-01T00:00:04.242424242 |", + "+--------+------+-------------------------------+", + ], + &batches + ); + } +} diff --git a/ingester2/src/buffer_tree/table.rs b/ingester2/src/buffer_tree/table.rs index 9e24c27615..244ea7b785 100644 --- a/ingester2/src/buffer_tree/table.rs +++ b/ingester2/src/buffer_tree/table.rs @@ -4,12 +4,22 @@ pub(crate) mod name_resolver; use std::sync::Arc; +use async_trait::async_trait; use data_types::{NamespaceId, PartitionId, PartitionKey, SequenceNumber, TableId}; +use datafusion_util::MemoryStream; use mutable_batch::MutableBatch; use parking_lot::{Mutex, RwLock}; +use schema::Projection; +use trace::span::{Span, SpanRecorder}; use super::partition::{resolver::PartitionProvider, PartitionData}; -use crate::{arcmap::ArcMap, deferred_load::DeferredLoad}; +use crate::{ + arcmap::ArcMap, + deferred_load::DeferredLoad, + query::{ + partition_response::PartitionResponse, response::PartitionStream, QueryError, QueryExec, + }, +}; /// A double-referenced map where [`PartitionData`] can be looked up by /// [`PartitionKey`], or ID. @@ -172,10 +182,7 @@ impl TableData { } /// Return the [`PartitionData`] for the specified ID. - pub(crate) fn get_partition( - &self, - partition_id: PartitionId, - ) -> Option<Arc<Mutex<PartitionData>>> { + pub(crate) fn partition(&self, partition_id: PartitionId) -> Option<Arc<Mutex<PartitionData>>> { self.partition_data.read().by_id(partition_id) } @@ -203,6 +210,57 @@ impl TableData { } } +#[async_trait] +impl QueryExec for TableData { + type Response = PartitionStream; + + async fn query_exec( + &self, + namespace_id: NamespaceId, + table_id: TableId, + columns: Vec<String>, + span: Option<Span>, + ) -> Result<Self::Response, QueryError> { + assert_eq!(self.table_id, table_id, "buffer tree index inconsistency"); + assert_eq!( + self.namespace_id, namespace_id, + "buffer tree index inconsistency" + ); + + // Gather the partition data from all of the partitions in this table. + let partitions = self.partitions().into_iter().filter_map(move |p| { + let mut span = SpanRecorder::new(span.clone().map(|s| s.child("partition read"))); + + let (id, data) = { + let mut p = p.lock(); + (p.partition_id(), p.get_query_data()?) + }; + assert_eq!(id, data.partition_id()); + + // Project the data if necessary + let columns = columns.iter().map(String::as_str).collect::<Vec<_>>(); + let selection = if columns.is_empty() { + Projection::All + } else { + Projection::Some(columns.as_ref()) + }; + + let ret = PartitionResponse::new( + Box::pin(MemoryStream::new( + data.project_selection(selection).into_iter().collect(), + )), + id, + None, + ); + + span.ok("read partition data"); + Some(ret) + }); + + Ok(PartitionStream::new(futures::stream::iter(partitions))) + } +} + #[cfg(test)] mod tests { use std::{sync::Arc, time::Duration};
fad11b524f43a6dd2d723fc49118cc543435a04d
Marco Neumann
2023-06-13 12:51:26
fix JDK for integration tests (#7980)
Broken after the first nightly CI image was generated after #7972. Move JDK installation to image generation and fix version.
null
chore: fix JDK for integration tests (#7980) Broken after the first nightly CI image was generated after #7972. Move JDK installation to image generation and fix version.
diff --git a/.circleci/config.yml b/.circleci/config.yml index f0ee2e8066..bcf1e3319a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -229,11 +229,6 @@ jobs: TEST_INFLUXDB_JDBC: "true" steps: - checkout - - run: - name: Install javac - command: | - sudo apt-get update - sudo apt-get install openjdk-11-jdk -y - rust_components - run: name: Download flight-sql-jdbc-driver-10.0.0.jar diff --git a/docker/Dockerfile.ci b/docker/Dockerfile.ci index 554a43f6ed..ccb88ffaea 100644 --- a/docker/Dockerfile.ci +++ b/docker/Dockerfile.ci @@ -29,7 +29,7 @@ RUN apt-get update \ git locales sudo openssh-client ca-certificates tar gzip parallel \ unzip zip bzip2 gnupg curl make pkg-config libssl-dev \ jq clang lld g++ shellcheck yamllint protobuf-compiler libprotobuf-dev \ - skopeo \ + skopeo openjdk-17-jdk \ --no-install-recommends \ && curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - \ && echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" >> /etc/apt/sources.list.d/google-cloud-sdk.list \
309386b8281dc6fbd7e018c36a547ff65142eac8
Dom Dwyer
2022-12-13 16:57:38
silence spurious lint
This is by design! Clippy just doesn't see the plan.
null
chore: silence spurious lint This is by design! Clippy just doesn't see the plan.
diff --git a/ingester2/src/wal/rotate_task.rs b/ingester2/src/wal/rotate_task.rs index f459587380..55cf814da3 100644 --- a/ingester2/src/wal/rotate_task.rs +++ b/ingester2/src/wal/rotate_task.rs @@ -118,7 +118,13 @@ pub(crate) async fn periodic_rotation( // operation that doesn't benefit from contention at all). .then(|(p, data)| { let persist = persist.clone(); - async move { persist.queue_persist(p, data).await } + + // Enqueue and retain the notification receiver, which will be + // awaited later. + #[allow(clippy::async_yields_async)] + async move { + persist.queue_persist(p, data).await + } }) .collect::<Vec<_>>() .await;
9a5424693cc8f166107990f7b5147f2b9a289756
Paul Dix
2025-01-27 12:31:49
Update trigger creation to validate plugin file present (#25918)
This updates trigger creation to load the plugin file before creating the trigger. Another small change is to make Github references use filenames and paths identical to what they would be in the plugin-dir. This makes it a little easier to have the plugins repo local and develop against it and then be able to reference the same file later with gh: once it's up on the repo.
null
feat: Update trigger creation to validate plugin file present (#25918) This updates trigger creation to load the plugin file before creating the trigger. Another small change is to make Github references use filenames and paths identical to what they would be in the plugin-dir. This makes it a little easier to have the plugins repo local and develop against it and then be able to reference the same file later with gh: once it's up on the repo.
diff --git a/influxdb3/src/commands/create.rs b/influxdb3/src/commands/create.rs index d9b97cc7d6..31876a980c 100644 --- a/influxdb3/src/commands/create.rs +++ b/influxdb3/src/commands/create.rs @@ -358,7 +358,8 @@ pub async fn command(config: Config) -> Result<(), Box<dyn Error>> { .collect::<HashMap<String, String>>() }); - client + //println!("does this work?"); + match client .api_v3_configure_processing_engine_trigger_create( database_name, &trigger_name, @@ -367,8 +368,14 @@ pub async fn command(config: Config) -> Result<(), Box<dyn Error>> { trigger_arguments, disabled, ) - .await?; - println!("Trigger {} created successfully", trigger_name); + .await + { + Err(e) => { + eprintln!("Failed to create trigger: {}", e); + return Err(e.into()); + } + Ok(_) => println!("Trigger {} created successfully", trigger_name), + } } } Ok(()) diff --git a/influxdb3/tests/server/cli.rs b/influxdb3/tests/server/cli.rs index f772c191ca..df59ab1d25 100644 --- a/influxdb3/tests/server/cli.rs +++ b/influxdb3/tests/server/cli.rs @@ -1374,7 +1374,7 @@ async fn test_load_wal_plugin_from_gh() { let db_name = "foo"; // this will pull from https://github.com/influxdata/influxdb3_plugins/blob/main/examples/wal_plugin/wal_plugin.py - let plugin_name = "gh:examples/wal_plugin"; + let plugin_name = "gh:examples/wal_plugin/wal_plugin.py"; // Run the test to make sure it'll load from GH let result = run_with_confirmation(&[ @@ -1530,6 +1530,42 @@ def process_request(influxdb3_local, query_parameters, request_headers, request_ assert_eq!(body, json!({"status": "updated"})); } +#[cfg(feature = "system-py")] +#[test_log::test(tokio::test)] +async fn test_trigger_create_validates_file_present() { + let plugin_dir = TempDir::new().unwrap(); + + let server = TestServer::configure() + .with_plugin_dir(plugin_dir.path().to_str().unwrap()) + .spawn() + .await; + let server_addr = server.client_addr(); + let db_name = "foo"; + + // Setup: create database and plugin + run_with_confirmation(&["create", "database", "--host", &server_addr, db_name]); + + let trigger_path = "foo"; + // creating the trigger should return a 404 error from github + let result = run_with_confirmation_and_err(&[ + "create", + "trigger", + "--database", + db_name, + "--host", + &server_addr, + "--plugin-filename", + "gh:not_a_file.py", + "--trigger-spec", + "request:foo", + "--trigger-arguments", + "test_arg=hello", + trigger_path, + ]); + debug!(result = ?result, "create trigger"); + assert_contains!(&result, "error reading file from Github: 404 Not Found"); +} + #[test_log::test(tokio::test)] async fn write_and_query_via_stdin() { let server = TestServer::spawn().await; diff --git a/influxdb3_processing_engine/src/lib.rs b/influxdb3_processing_engine/src/lib.rs index 29ded6f200..54fd786c9a 100644 --- a/influxdb3_processing_engine/src/lib.rs +++ b/influxdb3_processing_engine/src/lib.rs @@ -1,7 +1,7 @@ use crate::manager::{ProcessingEngineError, ProcessingEngineManager}; -use crate::plugins::Error; #[cfg(feature = "system-py")] use crate::plugins::PluginContext; +use crate::plugins::PluginError; use anyhow::Context; use bytes::Bytes; use hashbrown::HashMap; @@ -219,22 +219,23 @@ impl ProcessingEngineManagerImpl { } } - pub async fn read_plugin_code(&self, name: &str) -> Result<PluginCode, plugins::Error> { + pub async fn read_plugin_code(&self, name: &str) -> Result<PluginCode, plugins::PluginError> { // if the name starts with gh: then we need to get it from the public github repo at https://github.com/influxdata/influxdb3_plugins/tree/main if name.starts_with("gh:") { let plugin_path = name.strip_prefix("gh:").unwrap(); - // the filename should be the last part of the name after the last / - let plugin_name = plugin_path - .split('/') - .last() - .context("plugin name for github plugins must be <dir>/<name>")?; let url = format!( - "https://raw.githubusercontent.com/influxdata/influxdb3_plugins/main/{}/{}.py", - plugin_path, plugin_name + "https://raw.githubusercontent.com/influxdata/influxdb3_plugins/main/{}", + plugin_path ); let resp = reqwest::get(&url) .await .context("error getting plugin from github repo")?; + + // verify the response is a success + if !resp.status().is_success() { + return Err(PluginError::FetchingFromGithub(resp.status(), url)); + } + let resp_body = resp .text() .await @@ -326,6 +327,10 @@ impl ProcessingEngineManager for ProcessingEngineManagerImpl { let Some((db_id, db_schema)) = self.catalog.db_id_and_schema(db_name) else { return Err(ProcessingEngineError::DatabaseNotFound(db_name.to_string())); }; + + // validate that we can actually read the plugin file + self.read_plugin_code(&plugin_filename).await?; + let catalog_op = CatalogOp::CreateTrigger(TriggerDefinition { trigger_name, plugin_filename, @@ -597,7 +602,7 @@ impl ProcessingEngineManager for ProcessingEngineManagerImpl { &self, request: WalPluginTestRequest, query_executor: Arc<dyn QueryExecutor>, - ) -> Result<WalPluginTestResponse, plugins::Error> { + ) -> Result<WalPluginTestResponse, plugins::PluginError> { #[cfg(feature = "system-py")] { // create a copy of the catalog so we don't modify the original @@ -619,7 +624,7 @@ impl ProcessingEngineManager for ProcessingEngineManagerImpl { } #[cfg(not(feature = "system-py"))] - Err(plugins::Error::AnyhowError(anyhow::anyhow!( + Err(plugins::PluginError::AnyhowError(anyhow::anyhow!( "system-py feature not enabled" ))) } @@ -629,7 +634,7 @@ impl ProcessingEngineManager for ProcessingEngineManagerImpl { &self, request: SchedulePluginTestRequest, query_executor: Arc<dyn QueryExecutor>, - ) -> Result<SchedulePluginTestResponse, Error> { + ) -> Result<SchedulePluginTestResponse, PluginError> { #[cfg(feature = "system-py")] { // create a copy of the catalog so we don't modify the original @@ -657,7 +662,7 @@ impl ProcessingEngineManager for ProcessingEngineManagerImpl { } #[cfg(not(feature = "system-py"))] - Err(plugins::Error::AnyhowError(anyhow::anyhow!( + Err(plugins::PluginError::AnyhowError(anyhow::anyhow!( "system-py feature not enabled" ))) } diff --git a/influxdb3_processing_engine/src/manager.rs b/influxdb3_processing_engine/src/manager.rs index a7a0b8cc09..251aae4fd2 100644 --- a/influxdb3_processing_engine/src/manager.rs +++ b/influxdb3_processing_engine/src/manager.rs @@ -33,7 +33,7 @@ pub enum ProcessingEngineError { PluginNotFound(String), #[error("plugin error: {0}")] - PluginError(#[from] crate::plugins::Error), + PluginError(#[from] crate::plugins::PluginError), #[error("failed to shutdown trigger {trigger_name} in database {database}")] TriggerShutdownError { @@ -99,13 +99,13 @@ pub trait ProcessingEngineManager: Debug + Send + Sync + 'static { &self, request: WalPluginTestRequest, query_executor: Arc<dyn QueryExecutor>, - ) -> Result<WalPluginTestResponse, crate::plugins::Error>; + ) -> Result<WalPluginTestResponse, crate::plugins::PluginError>; async fn test_schedule_plugin( &self, request: SchedulePluginTestRequest, query_executor: Arc<dyn QueryExecutor>, - ) -> Result<SchedulePluginTestResponse, crate::plugins::Error>; + ) -> Result<SchedulePluginTestResponse, crate::plugins::PluginError>; async fn request_trigger( &self, diff --git a/influxdb3_processing_engine/src/plugins.rs b/influxdb3_processing_engine/src/plugins.rs index 44076cdd9e..a47e0aeffe 100644 --- a/influxdb3_processing_engine/src/plugins.rs +++ b/influxdb3_processing_engine/src/plugins.rs @@ -31,7 +31,7 @@ use thiserror::Error; use tokio::sync::mpsc; #[derive(Debug, Error)] -pub enum Error { +pub enum PluginError { #[error("invalid database {0}")] InvalidDatabase(String), @@ -68,6 +68,9 @@ pub enum Error { #[error("non-schedule plugin with schedule trigger: {0}")] NonSchedulePluginWithScheduleTrigger(String), + + #[error("error reading file from Github: {0} {1}")] + FetchingFromGithub(reqwest::StatusCode, String), } #[cfg(feature = "system-py")] @@ -101,11 +104,11 @@ pub(crate) fn run_schedule_plugin( time_provider: Arc<dyn TimeProvider>, context: PluginContext, plugin_receiver: mpsc::Receiver<ScheduleEvent>, -) -> Result<(), Error> { +) -> Result<(), PluginError> { // Ensure that the plugin is a schedule plugin let plugin_type = trigger_definition.trigger.plugin_type(); if !matches!(plugin_type, influxdb3_wal::PluginType::Schedule) { - return Err(Error::NonSchedulePluginWithScheduleTrigger(format!( + return Err(PluginError::NonSchedulePluginWithScheduleTrigger(format!( "{:?}", trigger_definition ))); @@ -201,7 +204,7 @@ mod python_plugin { pub(crate) async fn run_wal_contents_plugin( &self, mut receiver: Receiver<WalEvent>, - ) -> Result<(), Error> { + ) -> Result<(), PluginError> { info!(?self.trigger_definition.trigger_name, ?self.trigger_definition.database_name, ?self.trigger_definition.plugin_filename, "starting wal contents plugin"); loop { @@ -220,7 +223,7 @@ mod python_plugin { } } WalEvent::Shutdown(sender) => { - sender.send(()).map_err(|_| Error::FailedToShutdown)?; + sender.send(()).map_err(|_| PluginError::FailedToShutdown)?; break; } } @@ -234,7 +237,7 @@ mod python_plugin { mut receiver: Receiver<ScheduleEvent>, mut runner: ScheduleTriggerRunner, time_provider: Arc<dyn TimeProvider>, - ) -> Result<(), Error> { + ) -> Result<(), PluginError> { loop { let Some(next_run_instant) = runner.next_run_time() else { break; @@ -243,7 +246,7 @@ mod python_plugin { tokio::select! { _ = time_provider.sleep_until(next_run_instant) => { let Some(schema) = self.write_buffer.catalog().db_schema(self.db_name.as_str()) else { - return Err(Error::MissingDb); + return Err(PluginError::MissingDb); }; runner.run_at_time(self, schema).await?; } @@ -254,7 +257,7 @@ mod python_plugin { break; } Some(ScheduleEvent::Shutdown(sender)) => { - sender.send(()).map_err(|_| Error::FailedToShutdown)?; + sender.send(()).map_err(|_| PluginError::FailedToShutdown)?; break; } } @@ -268,7 +271,7 @@ mod python_plugin { pub(crate) async fn run_request_plugin( &self, mut receiver: Receiver<RequestEvent>, - ) -> Result<(), Error> { + ) -> Result<(), PluginError> { info!(?self.trigger_definition.trigger_name, ?self.trigger_definition.database_name, ?self.trigger_definition.plugin_filename, "starting request plugin"); loop { @@ -282,7 +285,7 @@ mod python_plugin { self.write_buffer.catalog().db_schema(self.db_name.as_str()) else { error!(?self.trigger_definition, "missing db schema"); - return Err(Error::MissingDb); + return Err(PluginError::MissingDb); }; let result = execute_request_trigger( self.plugin_code.code().as_ref(), @@ -340,7 +343,7 @@ mod python_plugin { } } Some(RequestEvent::Shutdown(sender)) => { - sender.send(()).map_err(|_| Error::FailedToShutdown)?; + sender.send(()).map_err(|_| PluginError::FailedToShutdown)?; break; } } @@ -349,9 +352,12 @@ mod python_plugin { Ok(()) } - async fn process_wal_contents(&self, wal_contents: Arc<WalContents>) -> Result<(), Error> { + async fn process_wal_contents( + &self, + wal_contents: Arc<WalContents>, + ) -> Result<(), PluginError> { let Some(schema) = self.write_buffer.catalog().db_schema(self.db_name.as_str()) else { - return Err(Error::MissingDb); + return Err(PluginError::MissingDb); }; for wal_op in &wal_contents.ops { @@ -481,7 +487,7 @@ mod python_plugin { pub(crate) fn try_new( trigger_spec: &TriggerSpecificationDefinition, time_provider: Arc<dyn TimeProvider>, - ) -> Result<Self, Error> { + ) -> Result<Self, PluginError> { match trigger_spec { TriggerSpecificationDefinition::AllTablesWalWrite | TriggerSpecificationDefinition::SingleTableWalWrite { .. } => { @@ -538,7 +544,7 @@ mod python_plugin { &mut self, plugin: &TriggerPlugin, db_schema: Arc<DatabaseSchema>, - ) -> Result<(), Error> { + ) -> Result<(), PluginError> { let Some(trigger_time) = self.next_trigger_time else { return Err(anyhow!("running a cron trigger that is finished.").into()); }; @@ -584,7 +590,7 @@ pub(crate) fn run_test_wal_plugin( query_executor: Arc<dyn QueryExecutor>, code: String, request: WalPluginTestRequest, -) -> Result<WalPluginTestResponse, Error> { +) -> Result<WalPluginTestResponse, PluginError> { use data_types::NamespaceName; use influxdb3_wal::Gen1Duration; use influxdb3_write::write_buffer::validator::WriteValidator; @@ -592,7 +598,7 @@ pub(crate) fn run_test_wal_plugin( let database = request.database; let namespace = NamespaceName::new(database.clone()) - .map_err(|_e| Error::InvalidDatabase(database.clone()))?; + .map_err(|_e| PluginError::InvalidDatabase(database.clone()))?; // parse the lp into a write batch let validator = WriteValidator::initialize( namespace.clone(), @@ -606,7 +612,7 @@ pub(crate) fn run_test_wal_plugin( Precision::Nanosecond, )?; let data = data.convert_lines_to_buffer(Gen1Duration::new_1m()); - let db = catalog.db_schema(&database).ok_or(Error::MissingDb)?; + let db = catalog.db_schema(&database).ok_or(PluginError::MissingDb)?; let plugin_return_state = influxdb3_py_api::system_py::execute_python_with_batch( &code, @@ -722,15 +728,17 @@ pub(crate) fn run_test_schedule_plugin( query_executor: Arc<dyn QueryExecutor>, code: String, request: influxdb3_client::plugin_development::SchedulePluginTestRequest, -) -> Result<influxdb3_client::plugin_development::SchedulePluginTestResponse, Error> { +) -> Result<influxdb3_client::plugin_development::SchedulePluginTestResponse, PluginError> { let database = request.database; - let db = catalog.db_schema(&database).ok_or(Error::MissingDb)?; + let db = catalog.db_schema(&database).ok_or(PluginError::MissingDb)?; let cron_schedule = request.schedule.as_deref().unwrap_or("* * * * * *"); let schedule = cron::Schedule::from_str(cron_schedule)?; let Some(schedule_time) = schedule.after(&now_time.date_time()).next() else { - return Err(Error::CronScheduleNeverTriggers(cron_schedule.to_string())); + return Err(PluginError::CronScheduleNeverTriggers( + cron_schedule.to_string(), + )); }; let plugin_return_state = influxdb3_py_api::system_py::execute_schedule_trigger( diff --git a/influxdb3_server/src/http.rs b/influxdb3_server/src/http.rs index 730ac992d5..e1299aa03f 100644 --- a/influxdb3_server/src/http.rs +++ b/influxdb3_server/src/http.rs @@ -220,7 +220,7 @@ pub enum Error { PythonPluginsNotEnabled, #[error("Plugin error: {0}")] - Plugin(#[from] influxdb3_processing_engine::plugins::Error), + Plugin(#[from] influxdb3_processing_engine::plugins::PluginError), #[error("Processing engine error: {0}")] ProcessingEngine(#[from] influxdb3_processing_engine::manager::ProcessingEngineError),
d1cbbd27b149fe7800910c4271b4f56a95a73aa5
Dom Dwyer
2023-06-21 15:47:32
config partition query rate limit
Allow the partition fetch queries to be (optionally) rate limited via runtime config.
null
feat(compactor): config partition query rate limit Allow the partition fetch queries to be (optionally) rate limited via runtime config.
diff --git a/clap_blocks/src/compactor.rs b/clap_blocks/src/compactor.rs index 7e69abaaf1..35ce013788 100644 --- a/clap_blocks/src/compactor.rs +++ b/clap_blocks/src/compactor.rs @@ -235,4 +235,15 @@ pub struct CompactorConfig { action )] pub max_num_columns_per_table: usize, + + /// Limit the number of partition fetch queries to at most the specified + /// number of queries per second. + /// + /// Queries are smoothed over the full second. + #[clap( + long = "max-partition-fetch-queries-per-second", + env = "INFLUXDB_IOX_MAX_PARTITION_FETCH_QUERIES_PER_SECOND", + action + )] + pub max_partition_fetch_queries_per_second: Option<usize>, } diff --git a/compactor/src/components/hardcoded.rs b/compactor/src/components/hardcoded.rs index 2c129930b5..4e9278a14a 100644 --- a/compactor/src/components/hardcoded.rs +++ b/compactor/src/components/hardcoded.rs @@ -47,7 +47,9 @@ use super::{ logging::LoggingPartitionDoneSinkWrapper, metrics::MetricsPartitionDoneSinkWrapper, mock::MockPartitionDoneSink, PartitionDoneSink, }, - partition_files_source::{catalog::CatalogPartitionFilesSource, PartitionFilesSource}, + partition_files_source::{ + catalog::CatalogPartitionFilesSource, rate_limit::QueryRateLimit, PartitionFilesSource, + }, partition_filter::{ and::AndPartitionFilter, greater_matching_files::GreaterMatchingFilesPartitionFilter, greater_size_matching_files::GreaterSizeMatchingFilesPartitionFilter, @@ -265,10 +267,16 @@ fn make_partition_info_source(config: &Config) -> Arc<dyn PartitionInfoSource> { } fn make_partition_files_source(config: &Config) -> Arc<dyn PartitionFilesSource> { - Arc::new(CatalogPartitionFilesSource::new( - config.backoff_config.clone(), - Arc::clone(&config.catalog), - )) + match config.max_partition_fetch_queries_per_second { + Some(rps) => Arc::new(CatalogPartitionFilesSource::new( + config.backoff_config.clone(), + QueryRateLimit::new(Arc::clone(&config.catalog), rps), + )), + None => Arc::new(CatalogPartitionFilesSource::new( + config.backoff_config.clone(), + Arc::clone(&config.catalog), + )), + } } fn make_round_info_source(config: &Config) -> Arc<dyn RoundInfoSource> { diff --git a/compactor/src/components/report.rs b/compactor/src/components/report.rs index 3f2e34934f..20f6300d46 100644 --- a/compactor/src/components/report.rs +++ b/compactor/src/components/report.rs @@ -39,6 +39,7 @@ pub fn log_config(config: &Config) { all_errors_are_fatal, max_num_columns_per_table, max_num_files_per_plan, + max_partition_fetch_queries_per_second, } = &config; let (shard_cfg_n_shards, shard_cfg_shard_id) = match shard_config { @@ -85,6 +86,7 @@ pub fn log_config(config: &Config) { all_errors_are_fatal, max_num_columns_per_table, max_num_files_per_plan, + max_partition_fetch_queries_per_second, "config", ); } diff --git a/compactor/src/config.rs b/compactor/src/config.rs index 9561c135e0..f6bbfc3e08 100644 --- a/compactor/src/config.rs +++ b/compactor/src/config.rs @@ -140,6 +140,12 @@ pub struct Config { /// max number of files per compaction plan pub max_num_files_per_plan: usize, + + /// Limit the number of partition fetch queries to at most the specified + /// number of queries per second. + /// + /// Queries are smoothed over the full second. + pub max_partition_fetch_queries_per_second: Option<usize>, } impl Config { diff --git a/compactor_test_utils/src/lib.rs b/compactor_test_utils/src/lib.rs index 54cca70f54..a61d508247 100644 --- a/compactor_test_utils/src/lib.rs +++ b/compactor_test_utils/src/lib.rs @@ -152,6 +152,7 @@ impl TestSetupBuilder<false> { all_errors_are_fatal: true, max_num_columns_per_table: 200, max_num_files_per_plan: 200, + max_partition_fetch_queries_per_second: None, }; let bytes_written = Arc::new(AtomicUsize::new(0)); diff --git a/influxdb_iox/src/commands/run/all_in_one.rs b/influxdb_iox/src/commands/run/all_in_one.rs index 6de669f22e..97e3694a0a 100644 --- a/influxdb_iox/src/commands/run/all_in_one.rs +++ b/influxdb_iox/src/commands/run/all_in_one.rs @@ -502,6 +502,7 @@ impl Config { process_once: false, max_num_columns_per_table: 200, max_num_files_per_plan: 200, + max_partition_fetch_queries_per_second: Some(500), }; let querier_config = QuerierConfig { diff --git a/ioxd_compactor/src/lib.rs b/ioxd_compactor/src/lib.rs index b60fa147b0..71821ce398 100644 --- a/ioxd_compactor/src/lib.rs +++ b/ioxd_compactor/src/lib.rs @@ -192,6 +192,8 @@ pub async fn create_compactor_server_type( all_errors_are_fatal: false, max_num_columns_per_table: compactor_config.max_num_columns_per_table, max_num_files_per_plan: compactor_config.max_num_files_per_plan, + max_partition_fetch_queries_per_second: compactor_config + .max_partition_fetch_queries_per_second, }); Arc::new(CompactorServerType::new(
c0b99011c8fd06335573814a11405885cb1506ed
Carol (Nichols || Goulding)
2023-03-20 17:22:08
Make some names more consistent and less redundant
I can't ever remember whether it's "compact or split" or "split or compact", so now I think it's always "split or compact". Also remove "FilesTo" from the enum variants because "FilesTo" is in the overall enum name.
null
refactor: Make some names more consistent and less redundant I can't ever remember whether it's "compact or split" or "split or compact", so now I think it's always "split or compact". Also remove "FilesTo" from the enum variants because "FilesTo" is in the overall enum name.
diff --git a/compactor2/src/components/file_classifier/split_based.rs b/compactor2/src/components/file_classifier/split_based.rs index 0895ebff6d..b6bdde39a1 100644 --- a/compactor2/src/components/file_classifier/split_based.rs +++ b/compactor2/src/components/file_classifier/split_based.rs @@ -4,7 +4,7 @@ use data_types::{CompactionLevel, ParquetFile}; use crate::{ components::{files_split::FilesSplit, split_or_compact::SplitOrCompact}, - file_classification::{FileClassification, FilesToCompactOrSplit}, + file_classification::{FileClassification, FilesToSplitOrCompact}, partition_info::PartitionInfo, RoundInfo, }; @@ -151,14 +151,14 @@ where self.upgrade_split.apply(files_to_compact, target_level); // See if we need to split start-level files due to over compaction size limit - let (files_to_compact_or_split, other_files) = + let (files_to_split_or_compact, other_files) = self.split_or_compact .apply(partition_info, files_to_compact, target_level); files_to_keep.extend(other_files); FileClassification { target_level, - files_to_compact_or_split, + files_to_split_or_compact, files_to_upgrade, files_to_keep, } @@ -183,7 +183,7 @@ fn file_classification_for_many_files( FileClassification { target_level, - files_to_compact_or_split: FilesToCompactOrSplit::FilesToCompact(files_to_compact), + files_to_split_or_compact: FilesToSplitOrCompact::Compact(files_to_compact), files_to_upgrade: vec![], files_to_keep: vec![], } diff --git a/compactor2/src/components/split_or_compact/files_to_compact.rs b/compactor2/src/components/split_or_compact/files_to_compact.rs index 30f0f32e88..7869895627 100644 --- a/compactor2/src/components/split_or_compact/files_to_compact.rs +++ b/compactor2/src/components/split_or_compact/files_to_compact.rs @@ -246,12 +246,12 @@ mod tests { #[test] fn test_compact_empty() { let files = vec![]; - let keep_and_compact_or_split = + let keep_and_split_or_compact = limit_files_to_compact(MAX_SIZE, files, CompactionLevel::Initial); - let files_to_compact = keep_and_compact_or_split.files_to_compact(); - let files_to_further_split = keep_and_compact_or_split.files_to_further_split(); - let files_to_keep = keep_and_compact_or_split.files_to_keep(); + let files_to_compact = keep_and_split_or_compact.files_to_compact(); + let files_to_further_split = keep_and_split_or_compact.files_to_further_split(); + let files_to_keep = keep_and_split_or_compact.files_to_keep(); assert!(files_to_compact.is_empty()); assert!(files_to_further_split.is_empty()); @@ -265,7 +265,7 @@ mod tests { let files = create_l1_files(1); // Target is L0 while all files are in L1 --> panic - let _keep_and_compact_or_split = + let _keep_and_split_or_compact = limit_files_to_compact(MAX_SIZE, files, CompactionLevel::Initial); } @@ -294,7 +294,7 @@ mod tests { ); // panic because it only handle at most 2 levels next to each other - let _keep_and_compact_or_split = + let _keep_and_split_or_compact = limit_files_to_compact(MAX_SIZE, files, CompactionLevel::FileNonOverlapped); } @@ -319,7 +319,7 @@ mod tests { ); // size limit > total size --> files to compact = all L0s and overalapped L1s - let _keep_and_compact_or_split = + let _keep_and_split_or_compact = limit_files_to_compact(MAX_SIZE * 5 + 1, files, CompactionLevel::FileNonOverlapped); } @@ -342,12 +342,12 @@ mod tests { ); // size limit > total size --> files to compact = all L0s and overalapped L1s - let keep_and_compact_or_split = + let keep_and_split_or_compact = limit_files_to_compact(MAX_SIZE * 5 + 1, files, CompactionLevel::FileNonOverlapped); - let files_to_compact = keep_and_compact_or_split.files_to_compact(); - let files_to_further_split = keep_and_compact_or_split.files_to_further_split(); - let files_to_keep = keep_and_compact_or_split.files_to_keep(); + let files_to_compact = keep_and_split_or_compact.files_to_compact(); + let files_to_further_split = keep_and_split_or_compact.files_to_further_split(); + let files_to_keep = keep_and_split_or_compact.files_to_keep(); assert_eq!(files_to_compact.len(), 5); assert_eq!(files_to_further_split.len(), 0); @@ -390,12 +390,12 @@ mod tests { ); // size limit too small to compact anything - let keep_and_compact_or_split = + let keep_and_split_or_compact = limit_files_to_compact(MAX_SIZE, files, CompactionLevel::FileNonOverlapped); - let files_to_compact = keep_and_compact_or_split.files_to_compact(); - let files_to_further_split = keep_and_compact_or_split.files_to_further_split(); - let files_to_keep = keep_and_compact_or_split.files_to_keep(); + let files_to_compact = keep_and_split_or_compact.files_to_compact(); + let files_to_further_split = keep_and_split_or_compact.files_to_further_split(); + let files_to_keep = keep_and_split_or_compact.files_to_keep(); assert_eq!(files_to_compact.len(), 0); assert_eq!(files_to_further_split.len(), 2); @@ -440,12 +440,12 @@ mod tests { ); // size limit < total size --> only enough to compact L0.1 with L1.12 - let keep_and_compact_or_split = + let keep_and_split_or_compact = limit_files_to_compact(MAX_SIZE * 3, files, CompactionLevel::FileNonOverlapped); - let files_to_compact = keep_and_compact_or_split.files_to_compact(); - let files_to_further_split = keep_and_compact_or_split.files_to_further_split(); - let files_to_keep = keep_and_compact_or_split.files_to_keep(); + let files_to_compact = keep_and_split_or_compact.files_to_compact(); + let files_to_further_split = keep_and_split_or_compact.files_to_further_split(); + let files_to_keep = keep_and_split_or_compact.files_to_keep(); assert_eq!(files_to_compact.len(), 2); assert_eq!(files_to_further_split.len(), 0); @@ -490,12 +490,12 @@ mod tests { ); // size limit < total size --> only enough to compact L0.1, L0.2 with L1.12 and L1.13 - let keep_and_compact_or_split = + let keep_and_split_or_compact = limit_files_to_compact(MAX_SIZE * 4, files, CompactionLevel::FileNonOverlapped); - let files_to_compact = keep_and_compact_or_split.files_to_compact(); - let files_to_further_split = keep_and_compact_or_split.files_to_further_split(); - let files_to_keep = keep_and_compact_or_split.files_to_keep(); + let files_to_compact = keep_and_split_or_compact.files_to_compact(); + let files_to_further_split = keep_and_split_or_compact.files_to_further_split(); + let files_to_keep = keep_and_split_or_compact.files_to_keep(); assert_eq!(files_to_compact.len(), 4); assert_eq!(files_to_further_split.len(), 0); @@ -547,12 +547,12 @@ mod tests { // -------------------- // size limit = MAX_SIZE to force the first choice: splitting L0.1 with L1.11 - let keep_and_compact_or_split = + let keep_and_split_or_compact = limit_files_to_compact(MAX_SIZE, files.clone(), CompactionLevel::FileNonOverlapped); - let files_to_compact = keep_and_compact_or_split.files_to_compact(); - let files_to_further_split = keep_and_compact_or_split.files_to_further_split(); - let files_to_keep = keep_and_compact_or_split.files_to_keep(); + let files_to_compact = keep_and_split_or_compact.files_to_compact(); + let files_to_further_split = keep_and_split_or_compact.files_to_further_split(); + let files_to_keep = keep_and_split_or_compact.files_to_keep(); assert_eq!(files_to_compact.len(), 0); assert_eq!(files_to_further_split.len(), 2); @@ -580,15 +580,15 @@ mod tests { // -------------------- // size limit = MAX_SIZE * 3 to force the second choice, L0.1 with L1.11 - let keep_and_compact_or_split = limit_files_to_compact( + let keep_and_split_or_compact = limit_files_to_compact( MAX_SIZE * 3, files.clone(), CompactionLevel::FileNonOverlapped, ); - let files_to_compact = keep_and_compact_or_split.files_to_compact(); - let files_to_further_split = keep_and_compact_or_split.files_to_further_split(); - let files_to_keep = keep_and_compact_or_split.files_to_keep(); + let files_to_compact = keep_and_split_or_compact.files_to_compact(); + let files_to_further_split = keep_and_split_or_compact.files_to_further_split(); + let files_to_keep = keep_and_split_or_compact.files_to_keep(); assert_eq!(files_to_compact.len(), 2); assert_eq!(files_to_further_split.len(), 0); @@ -617,15 +617,15 @@ mod tests { // -------------------- // size limit = MAX_SIZE * 4 to force the second choice, L0.1 with L1.11, because it still not enough to for second choice - let keep_and_compact_or_split = limit_files_to_compact( + let keep_and_split_or_compact = limit_files_to_compact( MAX_SIZE * 4, files.clone(), CompactionLevel::FileNonOverlapped, ); - let files_to_compact = keep_and_compact_or_split.files_to_compact(); - let files_to_further_split = keep_and_compact_or_split.files_to_further_split(); - let files_to_keep = keep_and_compact_or_split.files_to_keep(); + let files_to_compact = keep_and_split_or_compact.files_to_compact(); + let files_to_further_split = keep_and_split_or_compact.files_to_further_split(); + let files_to_keep = keep_and_split_or_compact.files_to_keep(); assert_eq!(files_to_compact.len(), 2); assert_eq!(files_to_further_split.len(), 0); @@ -653,15 +653,15 @@ mod tests { // -------------------- // size limit = MAX_SIZE * 5 to force the third choice, L0.1, L0.2 with L1.11, L1.12, L1.13 - let keep_and_compact_or_split = limit_files_to_compact( + let keep_and_split_or_compact = limit_files_to_compact( MAX_SIZE * 5, files.clone(), CompactionLevel::FileNonOverlapped, ); - let files_to_compact = keep_and_compact_or_split.files_to_compact(); - let files_to_further_split = keep_and_compact_or_split.files_to_further_split(); - let files_to_keep = keep_and_compact_or_split.files_to_keep(); + let files_to_compact = keep_and_split_or_compact.files_to_compact(); + let files_to_further_split = keep_and_split_or_compact.files_to_further_split(); + let files_to_keep = keep_and_split_or_compact.files_to_keep(); assert_eq!(files_to_compact.len(), 5); assert_eq!(files_to_further_split.len(), 0); @@ -688,12 +688,12 @@ mod tests { // -------------------- // size limit >= total size to force the forth choice compacting everything: L0.1, L0.2, L0.3 with L1.11, L1.12, L1.13 - let keep_and_compact_or_split = + let keep_and_split_or_compact = limit_files_to_compact(MAX_SIZE * 6, files, CompactionLevel::FileNonOverlapped); - let files_to_compact = keep_and_compact_or_split.files_to_compact(); - let files_to_further_split = keep_and_compact_or_split.files_to_further_split(); - let files_to_keep = keep_and_compact_or_split.files_to_keep(); + let files_to_compact = keep_and_split_or_compact.files_to_compact(); + let files_to_further_split = keep_and_split_or_compact.files_to_further_split(); + let files_to_keep = keep_and_split_or_compact.files_to_keep(); assert_eq!(files_to_compact.len(), 6); assert_eq!(files_to_further_split.len(), 0); @@ -748,12 +748,12 @@ mod tests { // -------------------- // size limit = MAX_SIZE to force the first choice: splitting L1.1 & L2.11 - let keep_and_compact_or_split = + let keep_and_split_or_compact = limit_files_to_compact(MAX_SIZE, files.clone(), CompactionLevel::Final); - let files_to_compact = keep_and_compact_or_split.files_to_compact(); - let files_to_further_split = keep_and_compact_or_split.files_to_further_split(); - let files_to_keep = keep_and_compact_or_split.files_to_keep(); + let files_to_compact = keep_and_split_or_compact.files_to_compact(); + let files_to_further_split = keep_and_split_or_compact.files_to_further_split(); + let files_to_keep = keep_and_split_or_compact.files_to_keep(); assert_eq!(files_to_compact.len(), 0); assert_eq!(files_to_further_split.len(), 2); @@ -781,12 +781,12 @@ mod tests { // -------------------- // size limit = MAX_SIZE * 3 to force the second choice,: compact L1.1 with L2.11 - let keep_and_compact_or_split = + let keep_and_split_or_compact = limit_files_to_compact(MAX_SIZE * 3, files.clone(), CompactionLevel::Final); - let files_to_compact = keep_and_compact_or_split.files_to_compact(); - let files_to_further_split = keep_and_compact_or_split.files_to_further_split(); - let files_to_keep = keep_and_compact_or_split.files_to_keep(); + let files_to_compact = keep_and_split_or_compact.files_to_compact(); + let files_to_further_split = keep_and_split_or_compact.files_to_further_split(); + let files_to_keep = keep_and_split_or_compact.files_to_keep(); assert_eq!(files_to_compact.len(), 2); assert_eq!(files_to_further_split.len(), 0); @@ -814,12 +814,12 @@ mod tests { // -------------------- // size limit = MAX_SIZE * 3 to force the second choice, compact L1.1 with L1.12, because it still not enough to for third choice - let keep_and_compact_or_split = + let keep_and_split_or_compact = limit_files_to_compact(MAX_SIZE * 3, files.clone(), CompactionLevel::Final); - let files_to_compact = keep_and_compact_or_split.files_to_compact(); - let files_to_further_split = keep_and_compact_or_split.files_to_further_split(); - let files_to_keep = keep_and_compact_or_split.files_to_keep(); + let files_to_compact = keep_and_split_or_compact.files_to_compact(); + let files_to_further_split = keep_and_split_or_compact.files_to_further_split(); + let files_to_keep = keep_and_split_or_compact.files_to_keep(); assert_eq!(files_to_compact.len(), 2); assert_eq!(files_to_further_split.len(), 0); @@ -847,12 +847,12 @@ mod tests { // -------------------- // size limit = MAX_SIZE * 5 to force the third choice, L1.1, L1.2 with L2.11, L2.12, L2.13 - let keep_and_compact_or_split = + let keep_and_split_or_compact = limit_files_to_compact(MAX_SIZE * 5, files.clone(), CompactionLevel::Final); - let files_to_compact = keep_and_compact_or_split.files_to_compact(); - let files_to_further_split = keep_and_compact_or_split.files_to_further_split(); - let files_to_keep = keep_and_compact_or_split.files_to_keep(); + let files_to_compact = keep_and_split_or_compact.files_to_compact(); + let files_to_further_split = keep_and_split_or_compact.files_to_further_split(); + let files_to_keep = keep_and_split_or_compact.files_to_keep(); assert_eq!(files_to_compact.len(), 4); assert_eq!(files_to_further_split.len(), 0); @@ -880,12 +880,12 @@ mod tests { // -------------------- // size limit >= total size to force the forth choice compacting everything: L1.1, L1.2, L1.3 with L2.11, L2.12, L2.13 - let keep_and_compact_or_split = + let keep_and_split_or_compact = limit_files_to_compact(MAX_SIZE * 6, files, CompactionLevel::Final); - let files_to_compact = keep_and_compact_or_split.files_to_compact(); - let files_to_further_split = keep_and_compact_or_split.files_to_further_split(); - let files_to_keep = keep_and_compact_or_split.files_to_keep(); + let files_to_compact = keep_and_split_or_compact.files_to_compact(); + let files_to_further_split = keep_and_split_or_compact.files_to_further_split(); + let files_to_keep = keep_and_split_or_compact.files_to_keep(); assert_eq!(files_to_compact.len(), 6); assert_eq!(files_to_further_split.len(), 0); diff --git a/compactor2/src/components/split_or_compact/logging.rs b/compactor2/src/components/split_or_compact/logging.rs index 1048b50c1d..ca56d7defe 100644 --- a/compactor2/src/components/split_or_compact/logging.rs +++ b/compactor2/src/components/split_or_compact/logging.rs @@ -3,7 +3,7 @@ use std::fmt::Display; use data_types::{CompactionLevel, ParquetFile}; use observability_deps::tracing::info; -use crate::{file_classification::FilesToCompactOrSplit, partition_info::PartitionInfo}; +use crate::{file_classification::FilesToSplitOrCompact, partition_info::PartitionInfo}; use super::SplitOrCompact; @@ -42,7 +42,7 @@ where partition_info: &PartitionInfo, files: Vec<ParquetFile>, target_level: CompactionLevel, - ) -> (FilesToCompactOrSplit, Vec<ParquetFile>) { + ) -> (FilesToSplitOrCompact, Vec<ParquetFile>) { let (files_to_split_or_compact, files_to_keep) = self.inner.apply(partition_info, files, target_level); diff --git a/compactor2/src/components/split_or_compact/metrics.rs b/compactor2/src/components/split_or_compact/metrics.rs index 22407656d0..22a31c4113 100644 --- a/compactor2/src/components/split_or_compact/metrics.rs +++ b/compactor2/src/components/split_or_compact/metrics.rs @@ -4,7 +4,7 @@ use data_types::{CompactionLevel, ParquetFile}; use metric::{Registry, U64Counter, U64Histogram, U64HistogramOptions}; use super::SplitOrCompact; -use crate::{file_classification::FilesToCompactOrSplit, partition_info::PartitionInfo}; +use crate::{file_classification::FilesToSplitOrCompact, partition_info::PartitionInfo}; const METRIC_NAME_FILES_TO_SPLIT: &str = "iox_compactor_files_to_split"; const METRIC_NAME_SPLIT_DECISION_COUNT: &str = "iox_compactor_split_decision"; @@ -66,26 +66,26 @@ where partition_info: &PartitionInfo, files: Vec<ParquetFile>, target_level: CompactionLevel, - ) -> (FilesToCompactOrSplit, Vec<ParquetFile>) { - let (files_to_compact_or_split, files_not_to_split) = + ) -> (FilesToSplitOrCompact, Vec<ParquetFile>) { + let (files_to_split_or_compact, files_not_to_split) = self.inner.apply(partition_info, files, target_level); - match &files_to_compact_or_split { - FilesToCompactOrSplit::FilesToSplit(inner_files_to_split) => { + match &files_to_split_or_compact { + FilesToSplitOrCompact::Split(inner_files_to_split) => { if !inner_files_to_split.is_empty() { self.files_to_split .record(inner_files_to_split.len() as u64); self.split_decision_count.inc(1); } } - FilesToCompactOrSplit::FilesToCompact(inner_files_to_compact) => { + FilesToSplitOrCompact::Compact(inner_files_to_compact) => { if !inner_files_to_compact.is_empty() { self.compact_decision_count.inc(1); } } } - (files_to_compact_or_split, files_not_to_split) + (files_to_split_or_compact, files_not_to_split) } } @@ -125,10 +125,10 @@ mod tests { SplitCompact::new(MAX_FILE, MAX_FILE as u64), &registry, ); - let (files_to_compact_or_split, _files_to_keep) = + let (files_to_split_or_compact, _files_to_keep) = split_compact.apply(&p_info, files, CompactionLevel::Initial); - assert!(files_to_compact_or_split.is_empty()); + assert!(files_to_split_or_compact.is_empty()); assert_histogram!( registry, @@ -160,10 +160,10 @@ mod tests { SplitCompact::new(MAX_FILE, MAX_FILE as u64), &registry, ); - let (files_to_compact_or_split, _files_to_keep) = + let (files_to_split_or_compact, _files_to_keep) = split_compact.apply(&p_info, files, CompactionLevel::FileNonOverlapped); - assert_eq!(files_to_compact_or_split.files_to_split_len(), 1); + assert_eq!(files_to_split_or_compact.files_to_split_len(), 1); assert_histogram!( registry, @@ -196,10 +196,10 @@ mod tests { SplitCompact::new(MAX_FILE * 3, MAX_FILE as u64), &registry, ); - let (files_to_compact_or_split, _files_to_keep) = + let (files_to_split_or_compact, _files_to_keep) = split_compact.apply(&p_info, files, CompactionLevel::Final); - assert_eq!(files_to_compact_or_split.files_to_compact_len(), 3); + assert_eq!(files_to_split_or_compact.files_to_compact_len(), 3); assert_histogram!( registry, diff --git a/compactor2/src/components/split_or_compact/mod.rs b/compactor2/src/components/split_or_compact/mod.rs index 1cd6675d10..aa4ac014ab 100644 --- a/compactor2/src/components/split_or_compact/mod.rs +++ b/compactor2/src/components/split_or_compact/mod.rs @@ -2,7 +2,7 @@ use std::fmt::{Debug, Display}; use data_types::{CompactionLevel, ParquetFile}; -use crate::{file_classification::FilesToCompactOrSplit, PartitionInfo}; +use crate::{file_classification::FilesToSplitOrCompact, PartitionInfo}; pub mod files_to_compact; pub mod large_files_to_split; @@ -20,5 +20,5 @@ pub trait SplitOrCompact: Debug + Display + Send + Sync { partition_info: &PartitionInfo, files: Vec<ParquetFile>, target_level: CompactionLevel, - ) -> (FilesToCompactOrSplit, Vec<ParquetFile>); + ) -> (FilesToSplitOrCompact, Vec<ParquetFile>); } diff --git a/compactor2/src/components/split_or_compact/split_compact.rs b/compactor2/src/components/split_or_compact/split_compact.rs index 21853191dc..7313683905 100644 --- a/compactor2/src/components/split_or_compact/split_compact.rs +++ b/compactor2/src/components/split_or_compact/split_compact.rs @@ -2,7 +2,7 @@ use std::fmt::Display; use data_types::{CompactionLevel, ParquetFile}; -use crate::{file_classification::FilesToCompactOrSplit, partition_info::PartitionInfo}; +use crate::{file_classification::FilesToSplitOrCompact, partition_info::PartitionInfo}; use super::{ files_to_compact::limit_files_to_compact, @@ -56,11 +56,11 @@ impl SplitOrCompact for SplitCompact { _partition_info: &PartitionInfo, files: Vec<ParquetFile>, target_level: CompactionLevel, - ) -> (FilesToCompactOrSplit, Vec<ParquetFile>) { + ) -> (FilesToSplitOrCompact, Vec<ParquetFile>) { // Compact all in one run if total size is less than max_compact_size let total_size: i64 = files.iter().map(|f| f.file_size_bytes).sum(); if total_size as usize <= self.max_compact_size { - return (FilesToCompactOrSplit::FilesToCompact(files), vec![]); + return (FilesToSplitOrCompact::Compact(files), vec![]); } // (1) This function identifies all start-level files that overlap with more than one target-level files @@ -70,23 +70,23 @@ impl SplitOrCompact for SplitCompact { if !files_to_split.is_empty() { // These files must be split before further compaction return ( - FilesToCompactOrSplit::FilesToSplit(files_to_split), + FilesToSplitOrCompact::Split(files_to_split), files_not_to_split, ); } // (2) No start level split is needed, which means every start-level file overlaps with at most one target-level file // Need to limit number of files to compact to stay under compact size limit - let keep_and_compact_or_split = + let keep_and_split_or_compact = limit_files_to_compact(self.max_compact_size, files_not_to_split, target_level); - let files_to_compact = keep_and_compact_or_split.files_to_compact(); - let files_to_further_split = keep_and_compact_or_split.files_to_further_split(); - let mut files_to_keep = keep_and_compact_or_split.files_to_keep(); + let files_to_compact = keep_and_split_or_compact.files_to_compact(); + let files_to_further_split = keep_and_split_or_compact.files_to_further_split(); + let mut files_to_keep = keep_and_split_or_compact.files_to_keep(); if !files_to_compact.is_empty() { return ( - FilesToCompactOrSplit::FilesToCompact(files_to_compact), + FilesToSplitOrCompact::Compact(files_to_compact), files_to_keep, ); } @@ -100,10 +100,7 @@ impl SplitOrCompact for SplitCompact { files_to_keep.extend(files_not_to_split); - ( - FilesToCompactOrSplit::FilesToSplit(files_to_split), - files_to_keep, - ) + (FilesToSplitOrCompact::Split(files_to_split), files_to_keep) } } @@ -132,10 +129,10 @@ mod tests { let files = vec![]; let p_info = Arc::new(PartitionInfoBuilder::new().build()); let split_compact = SplitCompact::new(FILE_SIZE, FILE_SIZE as u64); - let (files_to_compact_or_split, files_to_keep) = + let (files_to_split_or_compact, files_to_keep) = split_compact.apply(&p_info, files, CompactionLevel::Initial); - assert!(files_to_compact_or_split.is_empty()); + assert!(files_to_split_or_compact.is_empty()); assert!(files_to_keep.is_empty()); } @@ -161,7 +158,7 @@ mod tests { let p_info = Arc::new(PartitionInfoBuilder::new().build()); let split_compact = SplitCompact::new(FILE_SIZE, FILE_SIZE as u64); - let (_files_to_compact_or_split, _files_to_keep) = + let (_files_to_split_or_compact, _files_to_keep) = split_compact.apply(&p_info, files, CompactionLevel::Final); } @@ -187,13 +184,13 @@ mod tests { FILE_SIZE - (FILE_SIZE as f64 * PERCENTAGE_OF_SOFT_EXCEEDED) as usize - 30; let max_compact_size = 3 * max_desired_file_size; let split_compact = SplitCompact::new(max_compact_size, max_desired_file_size as u64); - let (files_to_compact_or_split, files_to_keep) = + let (files_to_split_or_compact, files_to_keep) = split_compact.apply(&p_info, files, CompactionLevel::Final); // need to split files - let files_to_compact = files_to_compact_or_split.files_to_compact(); - let files_to_split = files_to_compact_or_split.files_to_split(); - let split_times = files_to_compact_or_split.split_times(); + let files_to_compact = files_to_split_or_compact.files_to_compact(); + let files_to_split = files_to_split_or_compact.files_to_split(); + let split_times = files_to_split_or_compact.split_times(); assert!(files_to_compact.is_empty()); assert_eq!(files_to_split.len(), 2); assert_eq!(files_to_keep.len(), 2); @@ -205,7 +202,7 @@ mod tests { // See layout of 2 set of files insta::assert_yaml_snapshot!( - format_files_split("files to split", &files_to_compact_or_split.files_to_split() , "files to keep:", &files_to_keep), + format_files_split("files to split", &files_to_split_or_compact.files_to_split() , "files to keep:", &files_to_keep), @r###" --- - files to split @@ -242,15 +239,15 @@ mod tests { // size limit > total size --> compact all let p_info = Arc::new(PartitionInfoBuilder::new().build()); let split_compact = SplitCompact::new(FILE_SIZE * 6 + 1, FILE_SIZE as u64); - let (files_to_compact_or_split, files_to_keep) = + let (files_to_split_or_compact, files_to_keep) = split_compact.apply(&p_info, files, CompactionLevel::FileNonOverlapped); - assert_eq!(files_to_compact_or_split.files_to_compact_len(), 5); + assert_eq!(files_to_split_or_compact.files_to_compact_len(), 5); assert!(files_to_keep.is_empty()); // See layout of 2 set of files insta::assert_yaml_snapshot!( - format_files_split("files to compact", &files_to_compact_or_split.files_to_compact() , "files to keep:", &files_to_keep), + format_files_split("files to compact", &files_to_split_or_compact.files_to_compact() , "files to keep:", &files_to_keep), @r###" --- - files to compact @@ -287,15 +284,15 @@ mod tests { // hit size limit -> split start_level files that overlap with more than 1 target_level files let p_info = Arc::new(PartitionInfoBuilder::new().build()); let split_compact = SplitCompact::new(FILE_SIZE, FILE_SIZE as u64); - let (files_to_compact_or_split, files_to_keep) = + let (files_to_split_or_compact, files_to_keep) = split_compact.apply(&p_info, files, CompactionLevel::FileNonOverlapped); - assert_eq!(files_to_compact_or_split.files_to_split_len(), 1); + assert_eq!(files_to_split_or_compact.files_to_split_len(), 1); assert_eq!(files_to_keep.len(), 4); // See layout of 2 set of files insta::assert_yaml_snapshot!( - format_files_split("files to compact or split:", &files_to_compact_or_split.files_to_split(), "files to keep:", &files_to_keep), + format_files_split("files to compact or split:", &files_to_split_or_compact.files_to_split(), "files to keep:", &files_to_keep), @r###" --- - "files to compact or split:" @@ -332,15 +329,15 @@ mod tests { // hit size limit and nthign to split --> limit number if files to compact let p_info = Arc::new(PartitionInfoBuilder::new().build()); let split_compact = SplitCompact::new(FILE_SIZE * 3, FILE_SIZE as u64); - let (files_to_compact_or_split, files_to_keep) = + let (files_to_split_or_compact, files_to_keep) = split_compact.apply(&p_info, files, CompactionLevel::Final); - assert_eq!(files_to_compact_or_split.files_to_compact_len(), 3); + assert_eq!(files_to_split_or_compact.files_to_compact_len(), 3); assert_eq!(files_to_keep.len(), 1); // See layout of 2 set of files insta::assert_yaml_snapshot!( - format_files_split("files to compact or split:", &files_to_compact_or_split.files_to_compact() , "files to keep:", &files_to_keep), + format_files_split("files to compact or split:", &files_to_split_or_compact.files_to_compact() , "files to keep:", &files_to_keep), @r###" --- - "files to compact or split:" diff --git a/compactor2/src/driver.rs b/compactor2/src/driver.rs index 92082ec4a1..b8a0b6d58f 100644 --- a/compactor2/src/driver.rs +++ b/compactor2/src/driver.rs @@ -14,7 +14,7 @@ use crate::{ Components, }, error::{DynError, ErrorKind, SimpleError}, - file_classification::{FileClassification, FileToSplit, FilesToCompactOrSplit}, + file_classification::{FileClassification, FileToSplit, FilesToSplitOrCompact}, partition_info::PartitionInfo, PlanIR, }; @@ -151,10 +151,10 @@ async fn compact_partition( /// - Split L1s each of which overlaps with more than 1 L2s into many L1s, each overlaps with at most one L2 files /// . Each branch does find non-overlaps and upgragde files to avoid unecessary recompacting. /// The actually split files: -/// 1. files_to _keep: do not compact these files because they are already higher than target level +/// 1. files_to_keep: do not compact these files because they are already higher than target level /// 2. files_to_upgrade: upgrade this initial-level files to target level because they are not overlap with /// any target-level and initial-level files and large enough (> desired max size) -/// 3. files_to_compact_or_split.: this is either files to compact or split and will be compacted or split accordingly +/// 3. files_to_split_or_compact: this is either files to split or files to compact and will be handled accordingly /// /// Example: 4 files: two L0s, two L1s and one L2 @@ -227,18 +227,18 @@ async fn try_compact_partition( // compaction let FileClassification { target_level, - files_to_compact_or_split, + files_to_split_or_compact, files_to_upgrade, files_to_keep, } = components .file_classifier .classify(&partition_info, &round_info, branch); - // Skip partition if it has neither files to upgrade nor files to compact or split + // Skip partition if it has neither files to upgrade nor files to split or compact if files_to_upgrade.is_empty() && !components .post_classification_partition_filter - .apply(&partition_info, &files_to_compact_or_split.files()) + .apply(&partition_info, &files_to_split_or_compact.files()) .await? { return Ok(()); @@ -246,7 +246,7 @@ async fn try_compact_partition( // Compact let created_file_params = run_plans( - &files_to_compact_or_split, + &files_to_split_or_compact, &partition_info, &components, target_level, @@ -264,7 +264,7 @@ async fn try_compact_partition( // Update the catalog to reflect the newly created files, soft delete the compacted files and // update the upgraded files - let files_to_delete = files_to_compact_or_split.files(); + let files_to_delete = files_to_split_or_compact.files(); let (created_files, upgraded_files) = update_catalog( Arc::clone(&components), partition_id, @@ -294,7 +294,7 @@ async fn try_compact_partition( /// Compact of split give files async fn run_plans( - files: &FilesToCompactOrSplit, + files: &FilesToSplitOrCompact, partition_info: &Arc<PartitionInfo>, components: &Arc<Components>, target_level: CompactionLevel, @@ -302,7 +302,7 @@ async fn run_plans( scratchpad_ctx: &mut dyn Scratchpad, ) -> Result<Vec<ParquetFileParams>, DynError> { match files { - FilesToCompactOrSplit::FilesToCompact(files) => { + FilesToSplitOrCompact::Compact(files) => { run_compaction_plan( files, partition_info, @@ -313,7 +313,7 @@ async fn run_plans( ) .await } - FilesToCompactOrSplit::FilesToSplit(files) => { + FilesToSplitOrCompact::Split(files) => { run_split_plans( files, partition_info, diff --git a/compactor2/src/file_classification.rs b/compactor2/src/file_classification.rs index 63291014e7..f0a3bab98e 100644 --- a/compactor2/src/file_classification.rs +++ b/compactor2/src/file_classification.rs @@ -12,8 +12,8 @@ pub struct FileClassification { /// The target level of file resulting from compaction pub target_level: CompactionLevel, - /// Decision on what files should be compacted or split. See [`FilesToCompactOrSplit`] for more details. - pub files_to_compact_or_split: FilesToCompactOrSplit, + /// Decision on what files should be split or compacted. See [`FilesToSplitOrCompact`] for more details. + pub files_to_split_or_compact: FilesToSplitOrCompact, /// Non-overlapped files that should be upgraded to the target /// level without rewriting (for example they are of sufficient @@ -27,69 +27,69 @@ pub struct FileClassification { impl FileClassification { pub fn files_to_compact_len(&self) -> usize { - match &self.files_to_compact_or_split { - FilesToCompactOrSplit::FilesToCompact(files) => files.len(), - FilesToCompactOrSplit::FilesToSplit(_) => 0, + match &self.files_to_split_or_compact { + FilesToSplitOrCompact::Compact(files) => files.len(), + FilesToSplitOrCompact::Split(_) => 0, } } pub fn files_to_split_len(&self) -> usize { - match &self.files_to_compact_or_split { - FilesToCompactOrSplit::FilesToCompact(_files) => 0, - FilesToCompactOrSplit::FilesToSplit(files) => files.len(), + match &self.files_to_split_or_compact { + FilesToSplitOrCompact::Compact(_files) => 0, + FilesToSplitOrCompact::Split(files) => files.len(), } } } /// Files to compact or to split #[derive(Debug, PartialEq, Eq)] -pub enum FilesToCompactOrSplit { +pub enum FilesToSplitOrCompact { /// These files should be compacted together, ideally forming a single output file. /// Due to constraints such as the maximum desired output file size and the "leading edge" optimization /// `FilesToCompact` may actually produce multiple output files. - FilesToCompact(Vec<ParquetFile>), + Compact(Vec<ParquetFile>), /// The input files should be split into multiple output files, at the specified times - FilesToSplit(Vec<FileToSplit>), + Split(Vec<FileToSplit>), } -impl FilesToCompactOrSplit { +impl FilesToSplitOrCompact { // Return true if thelist is empty pub fn is_empty(&self) -> bool { match self { - Self::FilesToCompact(files) => files.is_empty(), - Self::FilesToSplit(files) => files.is_empty(), + Self::Compact(files) => files.is_empty(), + Self::Split(files) => files.is_empty(), } } /// Return lentgh of files to compact pub fn files_to_compact_len(&self) -> usize { match self { - Self::FilesToCompact(files) => files.len(), - Self::FilesToSplit(_) => 0, + Self::Compact(files) => files.len(), + Self::Split(_) => 0, } } /// Return lentgh of files to split pub fn files_to_split_len(&self) -> usize { match self { - Self::FilesToCompact(_) => 0, - Self::FilesToSplit(files) => files.len(), + Self::Compact(_) => 0, + Self::Split(files) => files.len(), } } /// Return files to compact pub fn files_to_compact(&self) -> Vec<ParquetFile> { match self { - Self::FilesToCompact(files) => files.clone(), - Self::FilesToSplit(_) => vec![], + Self::Compact(files) => files.clone(), + Self::Split(_) => vec![], } } /// Return files to split pub fn files_to_split(&self) -> Vec<ParquetFile> { match self { - Self::FilesToCompact(_) => vec![], - Self::FilesToSplit(files) => { + Self::Compact(_) => vec![], + Self::Split(files) => { let files: Vec<ParquetFile> = files.iter().map(|f| f.file.clone()).collect(); files } @@ -99,16 +99,16 @@ impl FilesToCompactOrSplit { // return split times of files to split pub fn split_times(&self) -> Vec<Vec<i64>> { match self { - Self::FilesToCompact(_) => vec![], - Self::FilesToSplit(files) => files.iter().map(|f| f.split_times.clone()).collect(), + Self::Compact(_) => vec![], + Self::Split(files) => files.iter().map(|f| f.split_times.clone()).collect(), } } /// Return files of either type pub fn files(&self) -> Vec<ParquetFile> { match self { - Self::FilesToCompact(files) => files.clone(), - Self::FilesToSplit(files) => files.iter().map(|f| f.file.clone()).collect(), + Self::Compact(files) => files.clone(), + Self::Split(files) => files.iter().map(|f| f.file.clone()).collect(), } } }
f97e1765d8ef351a35d8359e58583573745a1493
Marco Neumann
2023-04-11 16:35:29
do not loose schema for empty query responses (#7506)
Within our query tests and our CLI, we've used to print out empty query responses as: ```text ++ ++ ``` This is pretty misleading. Why are there no columns?! The reason is that while Flight provides us with schema information, we often have zero record batches (because why would the querier send an empty batch). Now lets fix this by creating an empty batch on the client side based on the schema data we've received. This way, people know that there are columns but no rows: ```text +-------+--------+------+------+ | count | system | time | town | +-------+--------+------+------+ +-------+--------+------+------+ ``` An alternative fix would be to pass the schema in addition to `Vec<RecordBatch>` to the formatting code, but that seemed to be more effort.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
fix: do not loose schema for empty query responses (#7506) Within our query tests and our CLI, we've used to print out empty query responses as: ```text ++ ++ ``` This is pretty misleading. Why are there no columns?! The reason is that while Flight provides us with schema information, we often have zero record batches (because why would the querier send an empty batch). Now lets fix this by creating an empty batch on the client side based on the schema data we've received. This way, people know that there are columns but no rows: ```text +-------+--------+------+------+ | count | system | time | town | +-------+--------+------+------+ +-------+--------+------+------+ ``` An alternative fix would be to pass the schema in addition to `Vec<RecordBatch>` to the formatting code, but that seemed to be more effort. Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/influxdb_iox/src/commands/query.rs b/influxdb_iox/src/commands/query.rs index 48c9e19ba7..3c081cad70 100644 --- a/influxdb_iox/src/commands/query.rs +++ b/influxdb_iox/src/commands/query.rs @@ -1,3 +1,4 @@ +use arrow::record_batch::RecordBatch; use clap::ValueEnum; use futures::TryStreamExt; use influxdb_iox_client::format::influxql::write_columnar; @@ -83,14 +84,24 @@ pub async fn command(connection: Connection, config: Config) -> Result<()> { query_lang, } = config; - let query_results = match query_lang { + let mut query_results = match query_lang { QueryLanguage::Sql => client.sql(namespace, query).await, QueryLanguage::InfluxQL => client.influxql(namespace, query).await, }?; // It might be nice to do some sort of streaming write // rather than buffering the whole thing. - let batches: Vec<_> = query_results.try_collect().await?; + let mut batches: Vec<_> = (&mut query_results).try_collect().await?; + + // read schema AFTER collection, otherwise the stream does not have the schema data yet + let schema = query_results + .inner() + .schema() + .cloned() + .ok_or(influxdb_iox_client::flight::Error::NoSchema)?; + + // preserve schema so we print table headers even for empty results + batches.push(RecordBatch::new_empty(schema)); match (query_lang, &format) { (QueryLanguage::InfluxQL, OutputFormat::Pretty) => { diff --git a/influxdb_iox/tests/end_to_end_cases/all_in_one.rs b/influxdb_iox/tests/end_to_end_cases/all_in_one.rs index a5842f4be8..1e53b98bb2 100644 --- a/influxdb_iox/tests/end_to_end_cases/all_in_one.rs +++ b/influxdb_iox/tests/end_to_end_cases/all_in_one.rs @@ -33,7 +33,8 @@ async fn smoke() { // run query let sql = format!("select * from {table_name}"); - let batches = run_sql(sql, namespace, all_in_one.querier_grpc_connection(), None).await; + let (batches, _schema) = + run_sql(sql, namespace, all_in_one.querier_grpc_connection(), None).await; let expected = [ "+------+------+--------------------------------+-----+", @@ -79,7 +80,8 @@ async fn ephemeral_mode() { // run query // do not select time becasue it changes every time let sql = format!("select tag1, tag2, val from {table_name}"); - let batches = run_sql(sql, namespace, all_in_one.querier_grpc_connection(), None).await; + let (batches, _schema) = + run_sql(sql, namespace, all_in_one.querier_grpc_connection(), None).await; let expected = [ "+------+------+-----+", diff --git a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected index ea5fb5b822..5395a75c53 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected @@ -15,6 +15,7 @@ -- InfluxQL: SELECT * FROM non_existent; ++ ++ +++ -- InfluxQL: SELECT *::tag, f64 FROM m0; -- Results After Sorting +------------------+----------------------+-------+-------+------+ @@ -359,8 +360,10 @@ Error while planning query: Error during planning: invalid number of arguments f | m0 | 2022-10-31T02:00:10Z | 21.2 | +------------------+----------------------+------+ -- InfluxQL: SELECT f64 FROM m0 WHERE f64 >= 19.5 AND str = 1; -++ -++ ++------------------+------+-----+ +| iox::measurement | time | f64 | ++------------------+------+-----+ ++------------------+------+-----+ -- InfluxQL: SELECT f64 FROM m0 WHERE f64 >= 19 + 0.5 OR non_existent = 1; +------------------+----------------------+------+ | iox::measurement | time | f64 | @@ -368,11 +371,15 @@ Error while planning query: Error during planning: invalid number of arguments f | m0 | 2022-10-31T02:00:10Z | 21.2 | +------------------+----------------------+------+ -- InfluxQL: SELECT f64 FROM m0 WHERE f64 >= 19.5 AND non_existent = 1; -++ -++ ++------------------+------+-----+ +| iox::measurement | time | f64 | ++------------------+------+-----+ ++------------------+------+-----+ -- InfluxQL: SELECT f64 FROM m0 WHERE f64 >= 19.5 AND f64 =~ /foo/; -++ -++ ++------------------+------+-----+ +| iox::measurement | time | f64 | ++------------------+------+-----+ ++------------------+------+-----+ -- InfluxQL: SELECT f64 FROM m0 WHERE f64 >= 19.5 OR f64 =~ /foo/; +------------------+----------------------+------+ | iox::measurement | time | f64 | @@ -667,6 +674,7 @@ Error while planning query: Error during planning: invalid number of arguments f -- InfluxQL: SELECT MEAN(foo) FROM cpu; ++ ++ +++ -- InfluxQL: SELECT MEAN(usage_idle) + MEAN(foo) FROM cpu GROUP BY cpu; +------------------+----------------------+-----------+--------------------------+ | iox::measurement | time | cpu | mean_usage_idle_mean_foo | @@ -686,6 +694,7 @@ Error while planning query: Error during planning: invalid number of arguments f -- InfluxQL: SELECT MEAN(foo) FROM cpu GROUP BY cpu; ++ ++ +++ -- InfluxQL: SELECT COUNT(f64), SUM(f64) FROM m0 GROUP BY TIME(30s) FILL(none); +------------------+----------------------+-------+------+ | iox::measurement | time | count | sum | @@ -1079,8 +1088,10 @@ Error while planning query: Error during planning: mixing aggregate and non-aggr | disk | 2022-10-31T02:01:30Z | | disk1s5 | | 3 | +------------------+----------------------+-----------+---------+-------+---------+ -- InfluxQL: SELECT COUNT(usage_idle) FROM cpu WHERE time >= now() - 2m GROUP BY TIME(30s) FILL(null); -++ -++ ++------------------+------+-------+ +| iox::measurement | time | count | ++------------------+------+-------+ ++------------------+------+-------+ -- InfluxQL: SELECT f64 FROM m0 WHERE tag0 = 'val00' LIMIT 3; +------------------+----------------------+------+ | iox::measurement | time | f64 | @@ -1240,8 +1251,10 @@ Error while planning query: Error during planning: mixing aggregate and non-aggr | cpu | 1970-01-01T00:00:00Z | cpu1 | 2 | +------------------+----------------------+-----------+-------+ -- InfluxQL: SELECT COUNT(usage_idle) FROM cpu GROUP BY cpu OFFSET 1; -++ -++ ++------------------+------+-----+-------+ +| iox::measurement | time | cpu | count | ++------------------+------+-----+-------+ ++------------------+------+-----+-------+ -- InfluxQL: SELECT COUNT(usage_idle) FROM cpu GROUP BY cpu LIMIT 1; +------------------+----------------------+-----------+-------+ | iox::measurement | time | cpu | count | @@ -1251,8 +1264,10 @@ Error while planning query: Error during planning: mixing aggregate and non-aggr | cpu | 1970-01-01T00:00:00Z | cpu1 | 2 | +------------------+----------------------+-----------+-------+ -- InfluxQL: SELECT COUNT(usage_idle) FROM cpu GROUP BY cpu OFFSET 1; -++ -++ ++------------------+------+-----+-------+ +| iox::measurement | time | cpu | count | ++------------------+------+-----+-------+ ++------------------+------+-----+-------+ -- InfluxQL: SELECT COUNT(usage_idle) FROM cpu WHERE time >= '2022-10-31T02:00:00Z' AND time < '2022-10-31T02:05:00Z' GROUP BY TIME(30s) LIMIT 2; +------------------+----------------------+-------+ | iox::measurement | time | count | diff --git a/influxdb_iox/tests/query_tests2/cases/in/pushdown.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/pushdown.sql.expected index 0bcc725724..3432920092 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/pushdown.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/pushdown.sql.expected @@ -233,8 +233,10 @@ ---------- -- SQL: SELECT * from restaurant where 5.0 < system and town != 'tewsbury' and system < 7.0 and (count = 632 or town = 'reading') and time > to_timestamp('1970-01-01T00:00:00.000000130+00:00'); -- Results After Sorting -++ -++ ++-------+--------+------+------+ +| count | system | time | town | ++-------+--------+------+------+ ++-------+--------+------+------+ -- SQL: EXPLAIN SELECT * from restaurant where 5.0 < system and town != 'tewsbury' and system < 7.0 and (count = 632 or town = 'reading') and time > to_timestamp('1970-01-01T00:00:00.000000130+00:00'); -- Results After Normalizing UUIDs ---------- diff --git a/test_helpers_end_to_end/src/client.rs b/test_helpers_end_to_end/src/client.rs index d431b4fdcc..80f404499f 100644 --- a/test_helpers_end_to_end/src/client.rs +++ b/test_helpers_end_to_end/src/client.rs @@ -1,5 +1,5 @@ //! Client helpers for writing end to end ng tests -use arrow::record_batch::RecordBatch; +use arrow::{datatypes::SchemaRef, record_batch::RecordBatch}; use data_types::{NamespaceId, TableId}; use dml::{DmlMeta, DmlWrite}; use futures::TryStreamExt; @@ -84,7 +84,7 @@ pub async fn try_run_sql( namespace: impl Into<String>, querier_connection: Connection, authorization: Option<&str>, -) -> Result<Vec<RecordBatch>, influxdb_iox_client::flight::Error> { +) -> Result<(Vec<RecordBatch>, SchemaRef), influxdb_iox_client::flight::Error> { let mut client = influxdb_iox_client::flight::Client::new(querier_connection); if let Some(authorization) = authorization { client.add_header("authorization", authorization).unwrap(); @@ -94,11 +94,18 @@ pub async fn try_run_sql( // Normally this would be done one per connection, not per query client.handshake().await?; - client - .sql(namespace.into(), sql_query.into()) - .await? - .try_collect() - .await + let mut stream = client.sql(namespace.into(), sql_query.into()).await?; + + let batches = (&mut stream).try_collect().await?; + + // read schema AFTER collection, otherwise the stream does not have the schema data yet + let schema = stream + .inner() + .schema() + .cloned() + .ok_or(influxdb_iox_client::flight::Error::NoSchema)?; + + Ok((batches, schema)) } /// Runs a InfluxQL query using the flight API on the specified connection. @@ -107,7 +114,7 @@ pub async fn try_run_influxql( namespace: impl Into<String>, querier_connection: Connection, authorization: Option<&str>, -) -> Result<Vec<RecordBatch>, influxdb_iox_client::flight::Error> { +) -> Result<(Vec<RecordBatch>, SchemaRef), influxdb_iox_client::flight::Error> { let mut client = influxdb_iox_client::flight::Client::new(querier_connection); if let Some(authorization) = authorization { client.add_header("authorization", authorization).unwrap(); @@ -117,11 +124,20 @@ pub async fn try_run_influxql( // Normally this would be done one per connection, not per query client.handshake().await?; - client + let mut stream = client .influxql(namespace.into(), influxql_query.into()) - .await? - .try_collect() - .await + .await?; + + let batches = (&mut stream).try_collect().await?; + + // read schema AFTER collection, otherwise the stream does not have the schema data yet + let schema = stream + .inner() + .schema() + .cloned() + .ok_or(influxdb_iox_client::flight::Error::NoSchema)?; + + Ok((batches, schema)) } /// Runs a SQL query using the flight API on the specified connection. @@ -132,7 +148,7 @@ pub async fn run_sql( namespace: impl Into<String>, querier_connection: Connection, authorization: Option<&str>, -) -> Vec<RecordBatch> { +) -> (Vec<RecordBatch>, SchemaRef) { try_run_sql(sql, namespace, querier_connection, authorization) .await .expect("Error executing sql query") @@ -146,7 +162,7 @@ pub async fn run_influxql( namespace: impl Into<String>, querier_connection: Connection, authorization: Option<&str>, -) -> Vec<RecordBatch> { +) -> (Vec<RecordBatch>, SchemaRef) { try_run_influxql( influxql.clone(), namespace, diff --git a/test_helpers_end_to_end/src/snapshot_comparison.rs b/test_helpers_end_to_end/src/snapshot_comparison.rs index e18524bfdd..08fa5191c2 100644 --- a/test_helpers_end_to_end/src/snapshot_comparison.rs +++ b/test_helpers_end_to_end/src/snapshot_comparison.rs @@ -1,6 +1,7 @@ mod queries; use crate::{run_sql, snapshot_comparison::queries::TestQueries, try_run_influxql, MiniCluster}; +use arrow::record_batch::RecordBatch; use arrow_flight::error::FlightError; use snafu::{OptionExt, ResultExt, Snafu}; use std::{ @@ -219,7 +220,7 @@ async fn run_query( ) -> Result<Vec<String>> { let query_text = query.text(); - let results = match language { + let (mut batches, schema) = match language { Language::Sql => { run_sql( query_text, @@ -248,6 +249,7 @@ async fn run_query( } } }; + batches.push(RecordBatch::new_empty(schema)); - Ok(query.normalize_results(results)) + Ok(query.normalize_results(batches)) } diff --git a/test_helpers_end_to_end/src/steps.rs b/test_helpers_end_to_end/src/steps.rs index 83fee5ae39..3a1d56952e 100644 --- a/test_helpers_end_to_end/src/steps.rs +++ b/test_helpers_end_to_end/src/steps.rs @@ -382,13 +382,14 @@ where Step::Query { sql, expected } => { info!("====Begin running SQL query: {}", sql); // run query - let batches = run_sql( + let (mut batches, schema) = run_sql( sql, state.cluster.namespace(), state.cluster.querier().querier_grpc_connection(), None, ) .await; + batches.push(RecordBatch::new_empty(schema)); assert_batches_sorted_eq!(expected, &batches); info!("====Done running"); } @@ -439,20 +440,21 @@ where } => { info!("====Begin running SQL query (authenticated): {}", sql); // run query - let batches = run_sql( + let (mut batches, schema) = run_sql( sql, state.cluster.namespace(), state.cluster().querier().querier_grpc_connection(), Some(authorization.as_str()), ) .await; + batches.push(RecordBatch::new_empty(schema)); assert_batches_sorted_eq!(expected, &batches); info!("====Done running"); } Step::VerifiedQuery { sql, verify } => { info!("====Begin running SQL verified query: {}", sql); // run query - let batches = run_sql( + let (batches, _schema) = run_sql( sql, state.cluster.namespace(), state.cluster.querier().querier_grpc_connection(), @@ -465,13 +467,14 @@ where Step::InfluxQLQuery { query, expected } => { info!("====Begin running InfluxQL query: {}", query); // run query - let batches = run_influxql( + let (mut batches, schema) = run_influxql( query, state.cluster.namespace(), state.cluster.querier().querier_grpc_connection(), None, ) .await; + batches.push(RecordBatch::new_empty(schema)); assert_batches_sorted_eq!(expected, &batches); info!("====Done running"); } @@ -525,13 +528,14 @@ where } => { info!("====Begin running InfluxQL query: {}", query); // run query - let batches = run_influxql( + let (mut batches, schema) = run_influxql( query, state.cluster.namespace(), state.cluster.querier().querier_grpc_connection(), Some(authorization), ) .await; + batches.push(RecordBatch::new_empty(schema)); assert_batches_sorted_eq!(expected, &batches); info!("====Done running"); }
76a67a2c6562d010ef8410d456101ad50c2a6c94
Marco Neumann
2023-06-23 12:53:04
add counter-example back
Was removed in #8033 but that made @alamb sad.
null
docs: add counter-example back Was removed in #8033 but that made @alamb sad.
diff --git a/docs/query_processing.md b/docs/query_processing.md index 8a6d954451..a482dd5594 100644 --- a/docs/query_processing.md +++ b/docs/query_processing.md @@ -87,6 +87,34 @@ flowchart TB LogicalPlan --> OtherOut ``` +We are trying to avoid ending up with something like this: + +```mermaid +flowchart TB + classDef out color:#020A47,fill:#9394FF,stroke-width:0 + classDef intermediate color:#020A47,fill:#D6F622,stroke-width:0 + classDef in color:#020A47,fill:#5EE4E4,stroke-width:0 + + SQL[SQL]:::in + InfluxQL[InfluxQL]:::in + OtherIn["Other (possibly in the future)"]:::in + + IngesterData[Ingester Data]:::out + ParquetFile[Parquet File]:::out + OtherOut["Other (possibly in the future)"]:::out + + SQL --> IngesterData + SQL --> ParquetFile + SQL --> OtherOut + + InfluxQL --> IngesterData + InfluxQL --> ParquetFile + InfluxQL --> OtherOut + + OtherIn --> IngesterData + OtherIn --> ParquetFile + OtherIn --> OtherOut +``` ## Frontend
0a31afd00d8556d25ee82f4d6528276a17e8cc50
Dom Dwyer
2023-05-16 17:46:20
correct time range buckets
Turns out there are 60 seconds in a minute, not 3,600.
null
fix: correct time range buckets Turns out there are 60 seconds in a minute, not 3,600.
diff --git a/ingester/src/persist/file_metrics.rs b/ingester/src/persist/file_metrics.rs index d12024b43f..3e4926b4fd 100644 --- a/ingester/src/persist/file_metrics.rs +++ b/ingester/src/persist/file_metrics.rs @@ -7,7 +7,7 @@ use metric::{ use super::completion_observer::{CompletedPersist, PersistCompletionObserver}; -const MINUTES: Duration = Duration::from_secs(60 * 60); +const MINUTES: Duration = Duration::from_secs(60); #[derive(Debug)] pub(crate) struct ParquetFileInstrumentation<T> {
b694b9f494a4c04aad7011bbc8bd8e333c105e0a
Dom Dwyer
2023-08-28 18:00:21
Merkle tree content hash for cache
Adds a (currently unused) NamespaceCache decorator that observes the post-merge content of the cache to maintain a content hash. This makes use of a Merkle Search Tree (https://inria.hal.science/hal-02303490) to track the CRDT content of the cache in a deterministic structure that allows for synchronisation between peers (itself a CRDT). The hash of two routers' NamespaceCache will be equal iff their cache contents are equal - this can be used to (very cheaply) identify out-of-sync routers, and trigger convergence. The MST structure used here provides functionality to compare two compact MST representations, and identify subsets of the cache that are out-of-sync, allowing for cheap convergence. Note this content hash only covers the tables, their set of columns, and those column schemas - this reflects the fact that only these values may currently be converged by gossip. Future work will enable full convergence of all fields.
null
feat(router): Merkle tree content hash for cache Adds a (currently unused) NamespaceCache decorator that observes the post-merge content of the cache to maintain a content hash. This makes use of a Merkle Search Tree (https://inria.hal.science/hal-02303490) to track the CRDT content of the cache in a deterministic structure that allows for synchronisation between peers (itself a CRDT). The hash of two routers' NamespaceCache will be equal iff their cache contents are equal - this can be used to (very cheaply) identify out-of-sync routers, and trigger convergence. The MST structure used here provides functionality to compare two compact MST representations, and identify subsets of the cache that are out-of-sync, allowing for cheap convergence. Note this content hash only covers the tables, their set of columns, and those column schemas - this reflects the fact that only these values may currently be converged by gossip. Future work will enable full convergence of all fields.
diff --git a/Cargo.lock b/Cargo.lock index c3fcbe4fe2..73d30440bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3460,6 +3460,17 @@ dependencies = [ "autocfg", ] +[[package]] +name = "merkle-search-tree" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6f5f8e25bd481489f8e0389fd4ced21ec6cbaf1a30e3204102df45dc6b04ce5" +dependencies = [ + "base64 0.21.3", + "siphasher 0.3.11", + "tracing", +] + [[package]] name = "metric" version = "0.1.0" @@ -4787,6 +4798,7 @@ dependencies = [ "iox_catalog", "iox_tests", "iox_time", + "merkle-search-tree", "metric", "mutable_batch", "mutable_batch_lp", diff --git a/router/Cargo.toml b/router/Cargo.toml index 59dd66b668..e45ad05c8a 100644 --- a/router/Cargo.toml +++ b/router/Cargo.toml @@ -20,6 +20,7 @@ hashbrown = { workspace = true } hyper = "0.14" iox_catalog = { path = "../iox_catalog" } iox_time = { path = "../iox_time" } +merkle-search-tree = { version = "0.6.0", features = ["tracing"] } metric = { path = "../metric" } mutable_batch = { path = "../mutable_batch" } mutable_batch_lp = { path = "../mutable_batch_lp" } diff --git a/router/src/gossip/anti_entropy/merkle.rs b/router/src/gossip/anti_entropy/merkle.rs new file mode 100644 index 0000000000..bfb240f6be --- /dev/null +++ b/router/src/gossip/anti_entropy/merkle.rs @@ -0,0 +1,113 @@ +//! Maintain a [Merkle Search Tree] covering the content of a +//! [`NamespaceCache`]. +//! +//! [Merkle Search Tree]: https://inria.hal.science/hal-02303490 + +use std::sync::Arc; + +use async_trait::async_trait; +use data_types::{NamespaceName, NamespaceSchema}; +use merkle_search_tree::MerkleSearchTree; +use parking_lot::Mutex; + +use crate::namespace_cache::{ChangeStats, NamespaceCache}; + +/// A [`NamespaceCache`] decorator that maintains a content hash / consistency +/// proof. +/// +/// This [`MerkleTree`] tracks the content of the underlying [`NamespaceCache`] +/// delegate, maintaining a compact, serialisable representation in a +/// [MerkleSearchTree] that can be used to perform efficient differential +/// convergence (anti-entropy) of peers to provide eventual consistency. +/// +/// # Merge Correctness +/// +/// The inner [`NamespaceCache`] implementation MUST commutatively & +/// deterministically merge two [`NamespaceSchema`] to converge (monotonically) +/// towards the same result (gossip payloads are CmRDTs). +/// +/// # Portability +/// +/// This implementation relies on the rust [`Hash`] implementation, which is +/// specifically defined as being allowed to differ across platforms (for +/// example, with differing endianness) and across different Rust complier +/// versions. +/// +/// If two nodes are producing differing hashes for the same underlying content, +/// they will appear to never converge. +#[derive(Debug)] +pub struct MerkleTree<T> { + inner: T, + + mst: Mutex<MerkleSearchTree<NamespaceName<'static>, NamespaceSchema>>, +} + +impl<T> MerkleTree<T> { + /// Initialise a new [`MerkleTree`] that generates a content hash covering + /// `inner`. + pub fn new(inner: T) -> Self { + Self { + inner, + mst: Mutex::new(MerkleSearchTree::default()), + } + } + + /// Return a 128-bit hash describing the content of the inner `T`. + /// + /// This hash only covers a subset of schema fields (see + /// [`NamespaceContentHash`]). + pub fn content_hash(&self) -> merkle_search_tree::digest::RootHash { + self.mst.lock().root_hash().clone() + } +} + +#[async_trait] +impl<T> NamespaceCache for MerkleTree<T> +where + T: NamespaceCache, +{ + type ReadError = T::ReadError; + + async fn get_schema( + &self, + namespace: &NamespaceName<'static>, + ) -> Result<Arc<NamespaceSchema>, Self::ReadError> { + self.inner.get_schema(namespace).await + } + + fn put_schema( + &self, + namespace: NamespaceName<'static>, + schema: NamespaceSchema, + ) -> (Arc<NamespaceSchema>, ChangeStats) { + // Pass the namespace into the inner storage, and evaluate the merged + // return value (the new content of the cache). + let (schema, diff) = self.inner.put_schema(namespace.clone(), schema); + + // Intercept the the resulting cache entry state and merge it into the + // merkle tree. + self.mst.lock().upsert(namespace, &schema); + + // And pass through the return value to the caller. + (schema, diff) + } +} + +/// A [`NamespaceSchema`] decorator that produces a content hash covering fields +/// that SHOULD be converged across gossip peers. +#[derive(Debug)] +struct NamespaceContentHash<'a>(&'a NamespaceSchema); + +impl<'a> std::hash::Hash for NamespaceContentHash<'a> { + fn hash<H: std::hash::Hasher>(&self, state: &mut H) { + // Technically the ID does not need to be covered by the content hash + // (the namespace name -> namespace ID is immutable and asserted + // elsewhere) but it's not harmful to include it, and would drive + // detection of a broken mapping invariant. + self.0.id.hash(state); + + // The set of tables, and their schemas MUST form part of the content + // hash as they are part of the content that must be converged. + self.0.tables.hash(state); + } +} diff --git a/router/src/gossip/anti_entropy/mod.rs b/router/src/gossip/anti_entropy/mod.rs new file mode 100644 index 0000000000..8ec7e94688 --- /dev/null +++ b/router/src/gossip/anti_entropy/mod.rs @@ -0,0 +1,3 @@ +//! Anti-entropy primitives providing eventual consistency over gossip. + +pub mod merkle; diff --git a/router/src/gossip/mod.rs b/router/src/gossip/mod.rs index 1a988f083c..96dd72602f 100644 --- a/router/src/gossip/mod.rs +++ b/router/src/gossip/mod.rs @@ -47,6 +47,7 @@ //! [`NamespaceCache`]: crate::namespace_cache::NamespaceCache //! [`SchemaTx`]: gossip_schema::handle::SchemaTx +pub mod anti_entropy; pub mod namespace_cache; pub mod schema_change_observer; pub mod traits;
9358ec74db2ab6c3d7cf6035b4d61962afb24664
Marco Neumann
2023-08-10 18:32:55
remove `Predicate` usage from `QueryNamespace` (#8468)
For #8097.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
refactor: remove `Predicate` usage from `QueryNamespace` (#8468) For #8097. Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/iox_query/src/lib.rs b/iox_query/src/lib.rs index bb1864ffdb..987b9205e9 100644 --- a/iox_query/src/lib.rs +++ b/iox_query/src/lib.rs @@ -21,7 +21,11 @@ use arrow::{ }; use async_trait::async_trait; use data_types::{ChunkId, ChunkOrder, TransitionPartitionId}; -use datafusion::{error::DataFusionError, physical_plan::Statistics, prelude::SessionContext}; +use datafusion::{ + error::DataFusionError, + physical_plan::Statistics, + prelude::{Expr, SessionContext}, +}; use exec::IOxSessionContext; use once_cell::sync::Lazy; use parquet_file::storage::ParquetExecInput; @@ -146,9 +150,9 @@ pub type QueryText = Box<dyn std::fmt::Display + Send + Sync>; #[async_trait] pub trait QueryNamespace: QueryNamespaceMeta + Debug + Send + Sync { /// Returns a set of chunks within the partition with data that may match the provided - /// predicate. + /// filter expression. /// - /// If possible, chunks which have no rows that can possibly match the predicate may be omitted. + /// If possible, chunks which have no rows that can possibly match the filter may be omitted. /// /// If projection is `None`, returned chunks will include all columns of its original data. /// Otherwise, returned chunks will include PK columns (tags and time) and columns specified in @@ -157,7 +161,7 @@ pub trait QueryNamespace: QueryNamespaceMeta + Debug + Send + Sync { async fn chunks( &self, table_name: &str, - predicate: &Predicate, + filters: &[Expr], projection: Option<&Vec<usize>>, ctx: IOxSessionContext, ) -> Result<Vec<Arc<dyn QueryChunk>>, DataFusionError>; diff --git a/iox_query/src/test.rs b/iox_query/src/test.rs index 49a33be39f..92864d842a 100644 --- a/iox_query/src/test.rs +++ b/iox_query/src/test.rs @@ -62,7 +62,7 @@ pub struct TestDatabase { column_names: Arc<Mutex<Option<StringSetRef>>>, /// The predicate passed to the most recent call to `chunks()` - chunks_predicate: Mutex<Predicate>, + chunks_predicate: Mutex<Vec<Expr>>, /// Retention time ns. retention_time_ns: Option<i64>, @@ -104,7 +104,7 @@ impl TestDatabase { } /// Return the most recent predicate passed to get_chunks() - pub fn get_chunks_predicate(&self) -> Predicate { + pub fn get_chunks_predicate(&self) -> Vec<Expr> { self.chunks_predicate.lock().clone() } @@ -129,13 +129,14 @@ impl QueryNamespace for TestDatabase { async fn chunks( &self, table_name: &str, - predicate: &Predicate, + filters: &[Expr], _projection: Option<&Vec<usize>>, _ctx: IOxSessionContext, ) -> Result<Vec<Arc<dyn QueryChunk>>, DataFusionError> { // save last predicate - *self.chunks_predicate.lock() = predicate.clone(); + *self.chunks_predicate.lock() = filters.to_vec(); + let predicate = Predicate::default().with_exprs(filters.iter().cloned()); let partitions = self.partitions.lock().clone(); Ok(partitions .values() @@ -147,7 +148,7 @@ impl QueryNamespace for TestDatabase { prune_chunks( c.schema(), &[Arc::clone(*c) as Arc<dyn QueryChunk>], - predicate, + &predicate, ) .ok() .map(|res| res[0]) diff --git a/iox_query_influxrpc/src/lib.rs b/iox_query_influxrpc/src/lib.rs index 849f95f8d9..b70e4bab2a 100644 --- a/iox_query_influxrpc/src/lib.rs +++ b/iox_query_influxrpc/src/lib.rs @@ -1289,13 +1289,14 @@ fn table_chunk_stream<'a>( Some(ret) => predicate.clone().with_retention(ret), None => predicate.clone(), }; + let filters = predicate.filter_expr().into_iter().collect::<Vec<_>>(); let projection = columns_in_predicates(need_fields, &table_schema, table_name, &predicate); - let chunks = namespace + let mut chunks = namespace .chunks( table_name, - &predicate, + &filters, projection.as_ref(), ctx.child_ctx("table chunks"), ) @@ -1304,6 +1305,20 @@ fn table_chunk_stream<'a>( table_name: table_name.as_ref(), })?; + // if there is a field restriction on the predicate, only + // chunks with that field should be returned. If the chunk has + // none of the fields specified, then it doesn't match + // TODO: test this branch + if let Some(field_columns) = &predicate.field_columns { + chunks.retain(|chunk| { + let schema = chunk.schema(); + // keep chunk if it has any of the columns requested + field_columns + .iter() + .any(|col| schema.find_index_of(col).is_some()) + }) + } + Ok((table_name, Arc::clone(&table_schema), predicate, chunks)) } }) @@ -2352,19 +2367,6 @@ mod tests { Sort: h2o.foo ASC NULLS FIRST, h2o.time ASC NULLS FIRST [foo:Dictionary(Int32, Utf8);N, foo.bar:Float64;N, time:Timestamp(Nanosecond, None)] TableScan: h2o [foo:Dictionary(Int32, Utf8);N, foo.bar:Float64;N, time:Timestamp(Nanosecond, None)] "###); - - let got_predicate = test_db.get_chunks_predicate(); - let exp_predicate = Predicate::new() - .with_field_columns(vec!["foo.bar"]) - .unwrap() - .with_value_expr( - "_value" - .as_expr() - .eq(lit(1.2)) - .try_into() - .expect("failed to convert _value expression"), - ); - assert_eq!(got_predicate, exp_predicate); } #[tokio::test] @@ -2447,8 +2449,7 @@ mod tests { let silly_predicate = Predicate::new().with_expr(expr.eq(lit("bar"))); // verify that the predicate was rewritten to `foo = 'bar'` - let expr = col("foo").eq(lit_dict("bar")); - let expected_predicate = Predicate::new().with_expr(expr); + let expected_predicate = vec![col("foo").eq(lit_dict("bar"))]; run_test_with_predicate(&func, silly_predicate, expected_predicate).await; @@ -2461,9 +2462,7 @@ mod tests { // verify that the predicate was rewritten to `false` as the // measurement name is `h20` - let expr = lit(false); - - let expected_predicate = Predicate::new().with_expr(expr); + let expected_predicate = vec![lit(false)]; run_test_with_predicate(&func, silly_predicate, expected_predicate).await; // ------------- Test 3 ---------------- @@ -2483,9 +2482,7 @@ mod tests { Box::new(DataType::Int32), Box::new(ScalarValue::Utf8(Some("bar".to_string()))), ); - let expr = col("foo").eq(lit(dict)); - - let expected_predicate = Predicate::new().with_expr(expr); + let expected_predicate = vec![col("foo").eq(lit(dict))]; run_test_with_predicate(&func, silly_predicate, expected_predicate).await; } @@ -2494,7 +2491,7 @@ mod tests { async fn run_test_with_predicate<T>( func: &T, predicate: Predicate, - expected_predicate: Predicate, + expected_predicate: Vec<Expr>, ) where T: Fn(Arc<TestDatabase>, InfluxRpcPredicate) -> BoxFuture<'static, ()> + Send + Sync, { diff --git a/querier/src/namespace/query_access.rs b/querier/src/namespace/query_access.rs index afbe6f3334..4a6549f0de 100644 --- a/querier/src/namespace/query_access.rs +++ b/querier/src/namespace/query_access.rs @@ -12,6 +12,7 @@ use datafusion::{ catalog::{schema::SchemaProvider, CatalogProvider}, datasource::TableProvider, error::DataFusionError, + prelude::Expr, }; use datafusion_util::config::DEFAULT_SCHEMA; use iox_query::{ @@ -19,7 +20,7 @@ use iox_query::{ QueryChunk, QueryCompletedToken, QueryNamespace, QueryText, }; use observability_deps::tracing::{debug, trace}; -use predicate::{rpc_predicate::QueryNamespaceMeta, Predicate}; +use predicate::rpc_predicate::QueryNamespaceMeta; use schema::Schema; use std::{any::Any, collections::HashMap, sync::Arc}; use trace::ctx::SpanContext; @@ -41,11 +42,11 @@ impl QueryNamespace for QuerierNamespace { async fn chunks( &self, table_name: &str, - predicate: &Predicate, + filters: &[Expr], projection: Option<&Vec<usize>>, ctx: IOxSessionContext, ) -> Result<Vec<Arc<dyn QueryChunk>>, DataFusionError> { - debug!(%table_name, %predicate, "Finding chunks for table"); + debug!(%table_name, ?filters, "Finding chunks for table"); // get table metadata let table = match self.tables.get(table_name).map(Arc::clone) { Some(table) => table, @@ -56,29 +57,14 @@ impl QueryNamespace for QuerierNamespace { } }; - let filters = predicate.filter_expr().into_iter().collect::<Vec<_>>(); - - let mut chunks = table + let chunks = table .chunks( - &filters, + filters, ctx.child_span("QuerierNamespace chunks"), projection, ) .await?; - // if there is a field restriction on the predicate, only - // chunks with that field should be returned. If the chunk has - // none of the fields specified, then it doesn't match - // TODO: test this branch - if let Some(field_columns) = &predicate.field_columns { - chunks.retain(|chunk| { - let schema = chunk.schema(); - // keep chunk if it has any of the columns requested - field_columns - .iter() - .any(|col| schema.find_index_of(col).is_some()) - }) - } Ok(chunks) }
41c9604e46e4dfe556cda5b127e951c67b18f3a3
Dom Dwyer
2023-07-26 17:20:59
schema gossip skeleton
Adds the supporting types required to integrate the generic gossip crate into a schema-specific broadcast primitive. This commit implements the two "halves": * GossipMessageDispatcher: async processing of incoming gossip msgs * Handle: the send-side handle for async sending of gossip msgs These types are responsible for converting into/from the serialised bytes sent over the gossip primitive into application-level / protobuf types.
null
feat(router): schema gossip skeleton Adds the supporting types required to integrate the generic gossip crate into a schema-specific broadcast primitive. This commit implements the two "halves": * GossipMessageDispatcher: async processing of incoming gossip msgs * Handle: the send-side handle for async sending of gossip msgs These types are responsible for converting into/from the serialised bytes sent over the gossip primitive into application-level / protobuf types.
diff --git a/router/src/gossip/dispatcher.rs b/router/src/gossip/dispatcher.rs new file mode 100644 index 0000000000..72b7948f90 --- /dev/null +++ b/router/src/gossip/dispatcher.rs @@ -0,0 +1,93 @@ +//! A deserialiser and dispatcher of [`gossip`] messages. + +use std::fmt::Debug; + +use async_trait::async_trait; +use bytes::Bytes; +use generated_types::influxdata::iox::gossip::v1::{gossip_message::Msg, GossipMessage}; +use generated_types::prost::Message; +use observability_deps::tracing::{info, warn}; +use tokio::{sync::mpsc, task::JoinHandle}; + +/// A handler of [`Msg`] received via gossip. +#[async_trait] +pub trait GossipMessageHandler: Send + Sync + Debug { + /// Process `message`. + async fn handle(&self, message: Msg); +} + +/// An async gossip message dispatcher. +/// +/// This type is responsible for deserialising incoming gossip payloads and +/// passing them off to the provided [`GossipMessageHandler`] implementation. +/// This decoupling allow the handler to deal strictly in terms of messages, +/// abstracting it from the underlying message transport / format. +/// +/// This type provides a buffer between incoming events, and processing, +/// preventing processing time from blocking the gossip reactor. Once the buffer +/// is full, incoming events are dropped until space is made through processing +/// of outstanding events. +#[derive(Debug)] +pub struct GossipMessageDispatcher { + tx: mpsc::Sender<Bytes>, + task: JoinHandle<()>, +} + +impl GossipMessageDispatcher { + /// Initialise a new dispatcher, buffering up to `buffer` number of events. + /// + /// The provided `handler` does not block the gossip reactor during + /// execution. + pub fn new<T>(handler: T, buffer: usize) -> Self + where + T: GossipMessageHandler + 'static, + { + // Initialise a buffered channel to decouple the two halves. + let (tx, rx) = mpsc::channel(buffer); + + // And run a receiver loop to pull the events from the channel. + let task = tokio::spawn(dispatch_loop(rx, handler)); + + Self { tx, task } + } +} + +#[async_trait] +impl gossip::Dispatcher for GossipMessageDispatcher { + async fn dispatch(&self, payload: Bytes) { + if let Err(e) = self.tx.try_send(payload) { + warn!(error=%e, "failed to buffer gossip event"); + } + } +} + +impl Drop for GossipMessageDispatcher { + fn drop(&mut self) { + self.task.abort(); + } +} + +async fn dispatch_loop<T>(mut rx: mpsc::Receiver<Bytes>, handler: T) +where + T: GossipMessageHandler, +{ + while let Some(payload) = rx.recv().await { + // Deserialise the payload into the appropriate proto type. + let msg = match GossipMessage::decode(payload).map(|v| v.msg) { + Ok(Some(v)) => v, + Ok(None) => { + warn!("valid frame contains no message"); + continue; + } + Err(e) => { + warn!(error=%e, "failed to deserialise gossip message"); + continue; + } + }; + + // Pass this message off to the handler to process. + handler.handle(msg).await; + } + + info!("stopping gossip dispatcher"); +} diff --git a/router/src/gossip/handle.rs b/router/src/gossip/handle.rs new file mode 100644 index 0000000000..be53973fb0 --- /dev/null +++ b/router/src/gossip/handle.rs @@ -0,0 +1,20 @@ +//! A handle to publish gossip messages to other peers. + +use crate::namespace_cache::ChangeStats; + +/// Application-level event notifiers. +pub trait SchemaEventHandle { + /// Observe a new schema diff. + fn observe_diff(&self, c: &ChangeStats); +} + +/// A handle to the [`gossip`] subsystem propagating schema change +/// notifications. +#[derive(Debug)] +pub struct Handle(gossip::GossipHandle); + +impl SchemaEventHandle for Handle { + fn observe_diff(&self, _c: &ChangeStats) { + unreachable!() + } +} diff --git a/router/src/gossip/mod.rs b/router/src/gossip/mod.rs new file mode 100644 index 0000000000..aad32e30c1 --- /dev/null +++ b/router/src/gossip/mod.rs @@ -0,0 +1,49 @@ +//! Gossip event dispatcher & handler implementations for routers. +//! +//! This sub-system is composed of the following primary components: +//! +//! * [`gossip`] crate: provides the gossip transport, the [`GossipHandle`], and +//! the [`Dispatcher`]. This crate operates on raw bytes. +//! +//! * The [`Handle`]: a router-specific wrapper over the underlying +//! [`GossipHandle`]. This type translates the application calls into protobuf +//! [`Msg`], and serialises them into bytes for the underlying [`gossip`] +//! impl. +//! +//! * The [`GossipMessageDispatcher`]: deserialises the incoming bytes from the +//! gossip [`Dispatcher`] into [`Msg`] and passes them off to the +//! [`GossipMessageHandler`] implementation for processing. +//! +//! ```text +//! event handler +//! │ ▲ +//! │ │ +//! │ Application types │ +//! │ │ +//! ▼ │ +//! ┌──────────┐ ┌─────────────────────────┐ +//! │ Handle │ │ GossipMessageDispatcher │ +//! └──────────┘ └─────────────────────────┘ +//! │ ▲ +//! │ │ +//! │ Encoded Protobuf bytes │ +//! │ │ +//! │ │ +//! ┌ Gossip ─ ─│─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─│─ ─ ─ ─ ─ ─ ─ +//! ▼ │ │ +//! │ ┌──────────────┐ ┌──────────────────┐ +//! │ GossipHandle │ │ Dispatcher │ │ +//! │ └──────────────┘ └──────────────────┘ +//! │ +//! └ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ +//! ``` +//! +//! [`GossipHandle`]: gossip::GossipHandle +//! [`Dispatcher`]: gossip::Dispatcher +//! [`Handle`]: handle::Handle +//! [`Msg`]: generated_types::influxdata::iox::gossip::v1::gossip_message::Msg +//! [`GossipMessageDispatcher`]: dispatcher::GossipMessageDispatcher +//! [`GossipMessageHandler`]: dispatcher::GossipMessageHandler + +pub mod dispatcher; +pub mod handle; diff --git a/router/src/lib.rs b/router/src/lib.rs index 9f82b308c9..2ac928025c 100644 --- a/router/src/lib.rs +++ b/router/src/lib.rs @@ -137,6 +137,7 @@ use criterion as _; use workspace_hack as _; pub mod dml_handlers; +pub mod gossip; pub mod namespace_cache; pub mod namespace_resolver; pub mod server;
6cf180738be8c8c297e4429e7d9af45de24a8522
Dom Dwyer
2023-05-23 12:04:52
more exacting retention validation tests
The old tests used partial error string matching, with the whole error message! So when I added more to the error message, the fixture tests didn't fail. This changes the tests to match the full string, and validate the timestamps are included.
null
test: more exacting retention validation tests The old tests used partial error string matching, with the whole error message! So when I added more to the error message, the fixture tests didn't fail. This changes the tests to match the full string, and validate the timestamps are included.
diff --git a/router/src/dml_handlers/retention_validation.rs b/router/src/dml_handlers/retention_validation.rs index eb612e2ae5..a4607a6eff 100644 --- a/router/src/dml_handlers/retention_validation.rs +++ b/router/src/dml_handlers/retention_validation.rs @@ -49,7 +49,10 @@ impl RetentionValidator { } #[async_trait] -impl DmlHandler for RetentionValidator { +impl<P> DmlHandler for RetentionValidator<P> +where + P: TimeProvider, +{ type WriteError = RetentionError; type WriteInput = HashMap<String, MutableBatch>; @@ -88,7 +91,9 @@ impl DmlHandler for RetentionValidator { mod tests { use std::sync::Arc; + use assert_matches::assert_matches; use iox_tests::{TestCatalog, TestNamespace}; + use iox_time::MockProvider; use once_cell::sync::Lazy; use super::*; @@ -113,12 +118,10 @@ mod tests { let line = "bananas,tag1=A,tag2=B val=42i ".to_string() + &now; let writes = lp_to_writes(&line); - let result = handler + let _result = handler .write(&NAMESPACE, namespace.schema().await.into(), writes, None) - .await; - - // no error means the time is inside the retention period - assert!(result.is_ok()); + .await + .unwrap(); } #[tokio::test] @@ -128,13 +131,16 @@ mod tests { // Create the table so that there is a known ID that must be returned. let _want_id = namespace.create_table("bananas").await.table.id; - // Create the validator whose retention period is 1 hour - let handler = RetentionValidator::new(); + let mock_now = iox_time::Time::from_rfc3339("2023-05-23T09:59:06+00:00").unwrap(); + let mock_time = MockProvider::new(mock_now); + + // Create the validator whse retention period is 1 hour + let handler = RetentionValidator { + time_provider: mock_time.clone(), + }; // Make time outside the retention period - let two_hours_ago = (SystemProvider::default().now().timestamp_nanos() - - 2 * 3_600 * 1_000_000_000) - .to_string(); + let two_hours_ago = (mock_now.timestamp_nanos() - 2 * 3_600 * 1_000_000_000).to_string(); let line = "bananas,tag1=A,tag2=B val=42i ".to_string() + &two_hours_ago; let writes = lp_to_writes(&line); @@ -143,9 +149,14 @@ mod tests { .await; // error means the time is outside the retention period - assert!(result.is_err()); - let message = result.unwrap_err().to_string(); - assert!(message.contains("data in table bananas is outside of the retention period")); + assert_matches!(result, Err(e) => { + assert_eq!( + e.to_string(), + "data in table bananas is outside of the retention period: \ + minimum acceptable timestamp is 2023-05-23T08:59:06+00:00, but \ + observed timestamp 2023-05-23T07:59:06+00:00 is older." + ) + }); } #[tokio::test] @@ -155,19 +166,19 @@ mod tests { // Create the table so that there is a known ID that must be returned. let _want_id = namespace.create_table("bananas").await.table.id; - // Create the validator whose retention period is 1 hour - let handler = RetentionValidator::new(); + let mock_now = iox_time::Time::from_rfc3339("2023-05-23T09:59:06+00:00").unwrap(); + let mock_time = MockProvider::new(mock_now); + + // Create the validator whse retention period is 1 hour + let handler = RetentionValidator { + time_provider: mock_time.clone(), + }; // Make time now to be inside the retention period - let now = SystemProvider::default() - .now() - .timestamp_nanos() - .to_string(); + let now = mock_now.timestamp_nanos().to_string(); let line1 = "bananas,tag1=A,tag2=B val=42i ".to_string() + &now; // Make time outside the retention period - let two_hours_ago = (SystemProvider::default().now().timestamp_nanos() - - 2 * 3_600 * 1_000_000_000) - .to_string(); + let two_hours_ago = (mock_now.timestamp_nanos() - 2 * 3_600 * 1_000_000_000).to_string(); let line2 = "bananas,tag1=AA,tag2=BB val=422i ".to_string() + &two_hours_ago; // a lp with 2 lines, one inside and one outside retention period let lp = format!("{line1}\n{line2}"); @@ -178,9 +189,14 @@ mod tests { .await; // error means the time is outside the retention period - assert!(result.is_err()); - let message = result.unwrap_err().to_string(); - assert!(message.contains("data in table bananas is outside of the retention period")); + assert_matches!(result, Err(e) => { + assert_eq!( + e.to_string(), + "data in table bananas is outside of the retention period: \ + minimum acceptable timestamp is 2023-05-23T08:59:06+00:00, but \ + observed timestamp 2023-05-23T07:59:06+00:00 is older." + ) + }); } #[tokio::test] @@ -190,19 +206,19 @@ mod tests { // Create the table so that there is a known ID that must be returned. let _want_id = namespace.create_table("bananas").await.table.id; + let mock_now = iox_time::Time::from_rfc3339("2023-05-23T09:59:06+00:00").unwrap(); + let mock_time = MockProvider::new(mock_now); + // Create the validator whse retention period is 1 hour - let handler = RetentionValidator::new(); + let handler = RetentionValidator { + time_provider: mock_time.clone(), + }; // Make time now to be inside the retention period - let now = SystemProvider::default() - .now() - .timestamp_nanos() - .to_string(); + let now = mock_now.timestamp_nanos().to_string(); let line1 = "bananas,tag1=A,tag2=B val=42i ".to_string() + &now; // Make time outside the retention period - let two_hours_ago = (SystemProvider::default().now().timestamp_nanos() - - 2 * 3_600 * 1_000_000_000) - .to_string(); + let two_hours_ago = (mock_now.timestamp_nanos() - 2 * 3_600 * 1_000_000_000).to_string(); let line2 = "apple,tag1=AA,tag2=BB val=422i ".to_string() + &two_hours_ago; // a lp with 2 lines, one inside and one outside retention period let lp = format!("{line1}\n{line2}"); @@ -213,9 +229,13 @@ mod tests { .await; // error means the time is outside the retention period - assert!(result.is_err()); - let message = result.unwrap_err().to_string(); - assert!(message.contains("data in table apple is outside of the retention period")); + assert_matches!(result, Err(e) => { + assert_eq!( + e.to_string(), + "data in table apple is outside of the retention period: minimum \ + acceptable timestamp is 2023-05-23T08:59:06+00:00, but observed \ + timestamp 2023-05-23T07:59:06+00:00 is older.") + }); } // Parse `lp` into a table-keyed MutableBatch map.
18783f946282ad7f1fd3dd7d88e64ae7f9eeca9f
Marco Neumann
2023-08-22 10:56:19
harden `end_to_end_cases::debug::build_catalog` (#8537)
This seems to fail a lot in CI, try to work around it. A proper fix is tracked under #8287.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
test: harden `end_to_end_cases::debug::build_catalog` (#8537) This seems to fail a lot in CI, try to work around it. A proper fix is tracked under #8287. Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/influxdb_iox/tests/end_to_end_cases/debug.rs b/influxdb_iox/tests/end_to_end_cases/debug.rs index 14ff39f1b5..6e50c85362 100644 --- a/influxdb_iox/tests/end_to_end_cases/debug.rs +++ b/influxdb_iox/tests/end_to_end_cases/debug.rs @@ -5,10 +5,11 @@ use arrow::record_batch::RecordBatch; use arrow_util::assert_batches_sorted_eq; use assert_cmd::Command; use futures::FutureExt; +use influxdb_iox_client::flight::Error as FlightError; use predicates::prelude::*; use tempfile::TempDir; use test_helpers_end_to_end::{ - maybe_skip_integration, run_sql, MiniCluster, ServerFixture, Step, StepTest, StepTestState, + maybe_skip_integration, try_run_sql, MiniCluster, ServerFixture, Step, StepTest, StepTestState, TestConfig, }; @@ -129,12 +130,13 @@ async fn rebuild_and_query(table_dir: &Path, namespace: &str, sql: &str, expecte while retries > 0 { println!("** Retries remaining: {retries}"); let restarted = RestartedServer::build_catalog_and_start(table_dir).await; - let batches = restarted.run_sql(sql, namespace).await; - - // if we got results, great, otherwise try again - if !batches.is_empty() { - assert_batches_sorted_eq!(expected, &batches); - return; + match restarted.try_run_sql(sql, namespace).await { + // if we got results, great, otherwise try again + Ok(batches) if !batches.is_empty() => { + assert_batches_sorted_eq!(expected, &batches); + return; + } + _ => {} } retries -= 1; @@ -151,21 +153,21 @@ struct RestartedServer { } impl RestartedServer { - async fn run_sql( + async fn try_run_sql( &self, sql: impl Into<String>, namespace: impl Into<String>, - ) -> Vec<RecordBatch> { - let (batches, _schema) = run_sql( + ) -> Result<Vec<RecordBatch>, FlightError> { + let (batches, _schema) = try_run_sql( sql, namespace, self.all_in_one.querier_grpc_connection(), None, false, ) - .await; + .await?; - batches + Ok(batches) } /// builds a catalog from an export directory and starts a all in
cbd747db44c416ced72dd747d58e27ec014702c3
Christopher M. Wolff
2023-04-12 14:51:44
update gap fill planner rule to use `interpolate` (#7494)
* feat: add INTERPOLATE fn and update planner gap-fill planner rule * test: add an end-to-end test for interpolate()
null
feat: update gap fill planner rule to use `interpolate` (#7494) * feat: add INTERPOLATE fn and update planner gap-fill planner rule * test: add an end-to-end test for interpolate()
diff --git a/influxdb_iox/tests/query_tests2/cases/in/gapfill.sql b/influxdb_iox/tests/query_tests2/cases/in/gapfill.sql index 09c2427eaf..1cd99776ee 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/gapfill.sql +++ b/influxdb_iox/tests/query_tests2/cases/in/gapfill.sql @@ -1,11 +1,15 @@ -- Gap-filling tests -- IOX_SETUP: OneMeasurementTwoSeries --- Input data --- region=a 2000-05-05T12:20:00Z --- region=a 2000-05-05T12:40:00Z --- region=b 2000-05-05T12:31:00Z --- region=b 2000-05-05T12:39:00Z +-- Input data (by region, time) +SELECT * +FROM cpu +ORDER BY REGION, TIME; + +-- Input data (by time) +SELECT * +FROM cpu +ORDER BY TIME; -- IOX_COMPARE: uuid EXPLAIN SELECT @@ -75,3 +79,13 @@ from cpu where time between timestamp '2000-05-05T12:19:00Z' and timestamp '2000-05-05T12:40:00Z' group by minute; +-- cpu.idle has a null value at 12:31. Interpolation should still occur, +-- overwriting the null value. +SELECT + date_bin_gapfill(interval '4 minutes', time, timestamp '1970-01-01T00:00:00Z') as four_minute, + interpolate(min(cpu.idle)), + interpolate(min(cpu."user")) +from cpu +where time between timestamp '2000-05-05T12:19:00Z' and timestamp '2000-05-05T12:40:00Z' +group by four_minute; + diff --git a/influxdb_iox/tests/query_tests2/cases/in/gapfill.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/gapfill.sql.expected index 2c7a3dc6d7..8afd5ee518 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/gapfill.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/gapfill.sql.expected @@ -1,4 +1,22 @@ -- Test Setup: OneMeasurementTwoSeries +-- SQL: SELECT * FROM cpu ORDER BY REGION, TIME; ++------+--------+----------------------+------+ +| idle | region | time | user | ++------+--------+----------------------+------+ +| 70.0 | a | 2000-05-05T12:20:00Z | 23.2 | +| | a | 2000-05-05T12:40:00Z | 21.0 | +| | b | 2000-05-05T12:31:00Z | 25.2 | +| 60.0 | b | 2000-05-05T12:39:00Z | 28.9 | ++------+--------+----------------------+------+ +-- SQL: SELECT * FROM cpu ORDER BY TIME; ++------+--------+----------------------+------+ +| idle | region | time | user | ++------+--------+----------------------+------+ +| 70.0 | a | 2000-05-05T12:20:00Z | 23.2 | +| | b | 2000-05-05T12:31:00Z | 25.2 | +| 60.0 | b | 2000-05-05T12:39:00Z | 28.9 | +| | a | 2000-05-05T12:40:00Z | 21.0 | ++------+--------+----------------------+------+ -- SQL: EXPLAIN SELECT date_bin_gapfill(interval '10 minute', time, timestamp '1970-01-01T00:00:00Z') as minute, count(cpu.user) from cpu where time between timestamp '2000-05-05T12:00:00Z' and timestamp '2000-05-05T12:59:00Z' group by minute; -- Results After Normalizing UUIDs ---------- @@ -152,4 +170,16 @@ | 2000-05-05T12:38:00Z | 70.0 | | 2000-05-05T12:39:00Z | 60.0 | | 2000-05-05T12:40:00Z | 60.0 | -+----------------------+---------------+ \ No newline at end of file ++----------------------+---------------+ +-- SQL: SELECT date_bin_gapfill(interval '4 minutes', time, timestamp '1970-01-01T00:00:00Z') as four_minute, interpolate(min(cpu.idle)), interpolate(min(cpu."user")) from cpu where time between timestamp '2000-05-05T12:19:00Z' and timestamp '2000-05-05T12:40:00Z' group by four_minute; ++----------------------+---------------+---------------+ +| four_minute | MIN(cpu.idle) | MIN(cpu.user) | ++----------------------+---------------+---------------+ +| 2000-05-05T12:16:00Z | | | +| 2000-05-05T12:20:00Z | 70.0 | 23.2 | +| 2000-05-05T12:24:00Z | 67.5 | 24.2 | +| 2000-05-05T12:28:00Z | 65.0 | 25.2 | +| 2000-05-05T12:32:00Z | 62.5 | 27.05 | +| 2000-05-05T12:36:00Z | 60.0 | 28.9 | +| 2000-05-05T12:40:00Z | | 21.0 | ++----------------------+---------------+---------------+ \ No newline at end of file diff --git a/iox_query/src/logical_optimizer/handle_gapfill.rs b/iox_query/src/logical_optimizer/handle_gapfill.rs index e51476ca5a..7aa3a9ed66 100644 --- a/iox_query/src/logical_optimizer/handle_gapfill.rs +++ b/iox_query/src/logical_optimizer/handle_gapfill.rs @@ -14,7 +14,7 @@ use datafusion::{ optimizer::{optimizer::ApplyOrder, OptimizerConfig, OptimizerRule}, prelude::{col, Expr}, }; -use query_functions::gapfill::{DATE_BIN_GAPFILL_UDF_NAME, LOCF_UDF_NAME}; +use query_functions::gapfill::{DATE_BIN_GAPFILL_UDF_NAME, INTERPOLATE_UDF_NAME, LOCF_UDF_NAME}; use std::{ collections::HashSet, ops::{Bound, Range}, @@ -349,6 +349,14 @@ impl TreeNodeRewriter for DateBinGapfillRewriter { } } +fn udf_to_fill_strategy(name: &str) -> Option<FillStrategy> { + match name { + LOCF_UDF_NAME => Some(FillStrategy::PrevNullAsMissing), + INTERPOLATE_UDF_NAME => Some(FillStrategy::LinearInterpolate), + _ => None, + } +} + fn handle_projection(proj: &Projection) -> Result<Option<LogicalPlan>> { let Projection { input, @@ -365,12 +373,16 @@ fn handle_projection(proj: &Projection) -> Result<Option<LogicalPlan>> { return Ok(None) }; - let fill_cols: Vec<(&Expr, FillStrategy)> = proj_exprs + let fill_cols: Vec<(&Expr, FillStrategy, &str)> = proj_exprs .iter() .filter_map(|e| match e { - Expr::ScalarUDF { fun, args } if fun.name == LOCF_UDF_NAME => { - let col = &args[0]; - Some((col, FillStrategy::PrevNullAsMissing)) + Expr::ScalarUDF { fun, args } => { + if let Some(strategy) = udf_to_fill_strategy(&fun.name) { + let col = &args[0]; + Some((col, strategy, fun.name.as_str())) + } else { + None + } } _ => None, }) @@ -383,12 +395,12 @@ fn handle_projection(proj: &Projection) -> Result<Option<LogicalPlan>> { // Clone the existing GapFill node, then modify it in place // to reflect the new fill strategy. let mut new_gapfill = child_gapfill.clone(); - for (e, col) in fill_cols { - if new_gapfill.replace_fill_strategy(e, col).is_none() { - // There was a gap filling function called on an aggregate column. - return Err(DataFusionError::Plan( - "LOCF must be called on an aggregate column in a gap-filling query".to_string(), - )); + for (e, fs, fn_name) in fill_cols { + if new_gapfill.replace_fill_strategy(e, fs).is_none() { + // There was a gap filling function called on a non-aggregate column. + return Err(DataFusionError::Plan(format!( + "{fn_name} must be called on an aggregate column in a gap-filling query" + ))); } } @@ -397,7 +409,9 @@ fn handle_projection(proj: &Projection) -> Result<Option<LogicalPlan>> { .iter() .cloned() .map(|e| match e { - Expr::ScalarUDF { fun, mut args } if fun.name == LOCF_UDF_NAME => args.remove(0), + Expr::ScalarUDF { fun, mut args } if udf_to_fill_strategy(&fun.name).is_some() => { + args.remove(0) + } _ => e, }) .collect(); @@ -433,16 +447,19 @@ fn check_node(node: &LogicalPlan) -> Result<()> { node.expressions().iter().try_for_each(|expr| { let dbg_count = count_udf(expr, DATE_BIN_GAPFILL_UDF_NAME)?; if dbg_count > 0 { - Err(DataFusionError::Plan(format!( + return Err(DataFusionError::Plan(format!( "{DATE_BIN_GAPFILL_UDF_NAME} may only be used as a GROUP BY expression" - ))) - } else if count_udf(expr, LOCF_UDF_NAME)? > 0 { - Err(DataFusionError::Plan(format!( - "{LOCF_UDF_NAME} may only be used in the SELECT list of a gap-filling query" - ))) - } else { - Ok(()) + ))); } + + for fn_name in [LOCF_UDF_NAME, INTERPOLATE_UDF_NAME] { + if count_udf(expr, fn_name)? > 0 { + return Err(DataFusionError::Plan(format!( + "{fn_name} may only be used in the SELECT list of a gap-filling query" + ))); + } + } + Ok(()) }) } @@ -459,7 +476,9 @@ mod test { use datafusion::optimizer::OptimizerContext; use datafusion::prelude::{avg, case, col, lit, lit_timestamp_nano, min, Expr}; use datafusion::scalar::ScalarValue; - use query_functions::gapfill::{DATE_BIN_GAPFILL_UDF_NAME, LOCF_UDF_NAME}; + use query_functions::gapfill::{ + DATE_BIN_GAPFILL_UDF_NAME, INTERPOLATE_UDF_NAME, LOCF_UDF_NAME, + }; fn table_scan() -> Result<LogicalPlan> { let schema = Schema::new(vec![ @@ -497,6 +516,13 @@ mod test { }) } + fn interpolate(arg: Expr) -> Result<Expr> { + Ok(Expr::ScalarUDF { + fun: query_functions::registry().udf(INTERPOLATE_UDF_NAME)?, + args: vec![arg], + }) + } + fn optimize(plan: &LogicalPlan) -> Result<Option<LogicalPlan>> { let optimizer = Optimizer::with_rules(vec![Arc::new(HandleGapFill::default())]); optimizer.optimize_recursively( @@ -581,6 +607,20 @@ mod test { Ok(()) } + /// calling INTERPOLATE in a WHERE predicate is not valid + #[test] + fn misplaced_interpolate_err() -> Result<()> { + // date_bin_gapfill used in a filter should produce an error + let scan = table_scan()?; + let plan = LogicalPlanBuilder::from(scan) + .filter(interpolate(col("temp"))?.gt(lit(100.0)))? + .build()?; + assert_optimizer_err( + &plan, + "Error during planning: interpolate may only be used in the SELECT list of a gap-filling query", + ); + Ok(()) + } /// calling LOCF on the SELECT list but not on an aggregate column is not valid. #[test] fn misplaced_locf_non_agg_err() -> Result<()> { @@ -607,7 +647,7 @@ mod test { .build()?; assert_optimizer_err( &plan, - "LOCF must be called on an aggregate column in a gap-filling query", + "locf must be called on an aggregate column in a gap-filling query", ); Ok(()) } @@ -852,4 +892,37 @@ mod test { assert_optimized_plan_eq(&plan, &expected)?; Ok(()) } + + #[test] + fn with_interpolate() -> Result<()> { + let dbg_args = "IntervalDayTime(\"60000\"),temps.time,TimestampNanosecond(0, None)"; + let plan = LogicalPlanBuilder::from(table_scan()?) + .filter( + col("time") + .gt_eq(lit_timestamp_nano(1000)) + .and(col("time").lt(lit_timestamp_nano(2000))), + )? + .aggregate( + vec![date_bin_gapfill( + lit(ScalarValue::IntervalDayTime(Some(60_000))), + col("time"), + )?], + vec![avg(col("temp")), min(col("temp"))], + )? + .project(vec![ + col(format!("date_bin_gapfill({dbg_args})")), + interpolate(col("AVG(temps.temp)"))?, + interpolate(col("MIN(temps.temp)"))?, + ])? + .build()?; + + let expected = format!( + "Projection: date_bin_gapfill({dbg_args}), AVG(temps.temp), MIN(temps.temp)\ + \n GapFill: groupBy=[[date_bin_gapfill({dbg_args})]], aggr=[[INTERPOLATE(AVG(temps.temp)), INTERPOLATE(MIN(temps.temp))]], time_column=date_bin_gapfill({dbg_args}), stride=IntervalDayTime(\"60000\"), range=Included(TimestampNanosecond(1000, None))..Excluded(TimestampNanosecond(2000, None))\ + \n Aggregate: groupBy=[[datebin(IntervalDayTime(\"60000\"), temps.time, TimestampNanosecond(0, None))]], aggr=[[AVG(temps.temp), MIN(temps.temp)]]\ + \n Filter: temps.time >= TimestampNanosecond(1000, None) AND temps.time < TimestampNanosecond(2000, None)\ + \n TableScan: temps"); + assert_optimized_plan_eq(&plan, &expected)?; + Ok(()) + } } diff --git a/query_functions/src/gapfill.rs b/query_functions/src/gapfill.rs index e3f04ae5f4..ff8ec06809 100644 --- a/query_functions/src/gapfill.rs +++ b/query_functions/src/gapfill.rs @@ -8,12 +8,13 @@ //! location, //! DATE_BIN_GAPFILL(INTERVAL '1 minute', time, '1970-01-01T00:00:00Z') AS minute, //! LOCF(AVG(temp)) +//! INTERPOLATE(AVG(humidity)) //! FROM temps //! WHERE time > NOW() - INTERVAL '6 hours' AND time < NOW() //! GROUP BY LOCATION, MINUTE //! ``` //! -//! The functions `DATE_BIN_GAPFILL` and `LOCF` are special, +//! The functions `DATE_BIN_GAPFILL`, `LOCF`, and `INTERPOLATE` are special, //! in that they don't have normal implementations, but instead //! are transformed by logical optimizer rule `HandleGapFill` to //! produce a plan that fills gaps. @@ -23,11 +24,13 @@ use arrow::datatypes::{DataType, IntervalUnit, TimeUnit}; use datafusion::{ error::DataFusionError, logical_expr::{ - ReturnTypeFunction, ScalarFunctionImplementation, ScalarUDF, Signature, Volatility, + ReturnTypeFunction, ScalarFunctionImplementation, ScalarUDF, Signature, TypeSignature, + Volatility, }, prelude::create_udf, }; use once_cell::sync::Lazy; +use schema::InfluxFieldType; /// The name of the date_bin_gapfill UDF given to DataFusion. pub const DATE_BIN_GAPFILL_UDF_NAME: &str = "date_bin_gapfill"; @@ -70,6 +73,34 @@ pub(crate) static LOCF: Lazy<Arc<ScalarUDF>> = Lazy::new(|| { )) }); +/// The name of the interpolate UDF given to DataFusion. +pub const INTERPOLATE_UDF_NAME: &str = "interpolate"; + +/// (Non-)Implementation of interpolate. +/// This function takes a single numeric argument and +/// produces a value of the same type. It is +/// used in the context of gap-filling queries to indicate +/// columns that should be inmterpolated. It does not have +/// an implementation since it will be consumed by the logical optimizer rule +/// `HandleGapFill`. +pub(crate) static INTERPOLATE: Lazy<Arc<ScalarUDF>> = Lazy::new(|| { + let return_type_fn: ReturnTypeFunction = Arc::new(|args| Ok(Arc::new(args[0].clone()))); + let signatures = vec![ + InfluxFieldType::Float, + InfluxFieldType::Integer, + InfluxFieldType::UInteger, + ] + .iter() + .map(|&influx_type| TypeSignature::Exact(vec![influx_type.into()])) + .collect(); + Arc::new(ScalarUDF::new( + INTERPOLATE_UDF_NAME, + &Signature::one_of(signatures, Volatility::Volatile), + &return_type_fn, + &unimplemented_scalar_impl(INTERPOLATE_UDF_NAME), + )) +}); + fn unimplemented_scalar_impl(name: &'static str) -> ScalarFunctionImplementation { Arc::new(move |_| { Err(DataFusionError::NotImplemented(format!( @@ -143,4 +174,30 @@ mod test { .to_string() .contains(expected)); } + + fn interpolate(arg: Expr) -> Expr { + crate::registry() + .udf(super::INTERPOLATE_UDF_NAME) + .expect("should be registered") + .call(vec![arg]) + } + + #[tokio::test] + async fn interpolate_errs() { + let arg = Arc::new(Float64Array::from(vec![100.0])); + let rb = RecordBatch::try_from_iter(vec![("f0", arg as ArrayRef)]).unwrap(); + let ctx = context_with_table(rb); + let df = ctx + .table("t") + .await + .unwrap() + .select(vec![interpolate(col("f0"))]) + .unwrap(); + let res = df.collect().await; + let expected = "interpolate is not yet implemented"; + assert!(res + .expect_err("should be an error") + .to_string() + .contains(expected)); + } } diff --git a/query_functions/src/registry.rs b/query_functions/src/registry.rs index 7b763bd4cd..d04b5cf861 100644 --- a/query_functions/src/registry.rs +++ b/query_functions/src/registry.rs @@ -26,6 +26,7 @@ impl FunctionRegistry for IOxFunctionRegistry { [ gapfill::DATE_BIN_GAPFILL_UDF_NAME, gapfill::LOCF_UDF_NAME, + gapfill::INTERPOLATE_UDF_NAME, regex::REGEX_MATCH_UDF_NAME, regex::REGEX_NOT_MATCH_UDF_NAME, window::WINDOW_BOUNDS_UDF_NAME, @@ -39,6 +40,7 @@ impl FunctionRegistry for IOxFunctionRegistry { match name { gapfill::DATE_BIN_GAPFILL_UDF_NAME => Ok(gapfill::DATE_BIN_GAPFILL.clone()), gapfill::LOCF_UDF_NAME => Ok(gapfill::LOCF.clone()), + gapfill::INTERPOLATE_UDF_NAME => Ok(gapfill::INTERPOLATE.clone()), regex::REGEX_MATCH_UDF_NAME => Ok(regex::REGEX_MATCH_UDF.clone()), regex::REGEX_NOT_MATCH_UDF_NAME => Ok(regex::REGEX_NOT_MATCH_UDF.clone()), window::WINDOW_BOUNDS_UDF_NAME => Ok(window::WINDOW_BOUNDS_UDF.clone()),
d46a5f0c515376a776c239a27d40e7a2d0abef84
Dom Dwyer
2023-07-06 15:36:30
update metrics out of phase
Emit the metrics out-of-phase with the metric scraper.
null
refactor: update metrics out of phase Emit the metrics out-of-phase with the metric scraper.
diff --git a/tracker/src/disk_metric.rs b/tracker/src/disk_metric.rs index 412b1ddfed..fe92597904 100644 --- a/tracker/src/disk_metric.rs +++ b/tracker/src/disk_metric.rs @@ -3,6 +3,12 @@ use std::{borrow::Cow, path::PathBuf, time::Duration}; use metric::{Attributes, U64Gauge}; use sysinfo::{DiskExt, RefreshKind, System, SystemExt}; +/// The interval at which disk metrics are updated. +/// +/// This is purposely chosen to be out-of-phase w.r.t the default metric scrape +/// interval. +const UPDATE_INTERVAL: Duration = Duration::from_secs(13); + /// A periodic reporter of disk capacity / free statistics for a given /// directory. #[derive(Debug)] @@ -68,7 +74,7 @@ impl DiskSpaceMetrics { /// Start the [`DiskSpaceMetrics`] evaluation loop, blocking forever. pub async fn run(mut self) { - let mut interval = tokio::time::interval(Duration::from_secs(10)); + let mut interval = tokio::time::interval(UPDATE_INTERVAL); loop { interval.tick().await;
ed694d3be4b6635804aef3d76d6ad33a5902ab2e
Marco Neumann
2023-01-26 11:03:08
introduce scratchpad store for compactor (#6706)
* feat: introduce scratchpad store for compactor Use an intermediate in-memory store (can be a disk later if we want) to stage all inputs and outputs of the compaction. The reasons are: - **fewer IO ops:** DataFusion's streaming IO requires slightly more IO requests (at least 2 per file) due to the way it is optimized to read as little as possible. It first reads the metadata and then decides which content to fetch. In the compaction case this is (esp. w/o delete predicates) EVERYTHING. So in contrast to the querier, there is no advantage of this approach. In contrary this easily adds 100ms latency to every single input file. - **less traffic:** For divide&conquer partitions (i.e. when we need to run multiple compaction steps to deal with them) it is kinda pointless to upload an intermediate result just to download it again. The scratchpad avoids that. - **higher throughput:** We want to limit the number of concurrent DataFusion jobs because we don't wanna blow up the whole process by having too much in-flight arrow data at the same time. However while we perform the actual computation, we were waiting for object store IO. This was limiting our throughput substantially. - **shadow mode:** De-coupling the stores in this way makes it easier to implement #6645. Note that we assume here that the input parquet files are WAY SMALLER than the uncompressed Arrow data during compaction itself. Closes #6650. * fix: panic on shutdown * refactor: remove shadow scratchpad (for now) * refactor: make scratchpad safe to use
null
feat: introduce scratchpad store for compactor (#6706) * feat: introduce scratchpad store for compactor Use an intermediate in-memory store (can be a disk later if we want) to stage all inputs and outputs of the compaction. The reasons are: - **fewer IO ops:** DataFusion's streaming IO requires slightly more IO requests (at least 2 per file) due to the way it is optimized to read as little as possible. It first reads the metadata and then decides which content to fetch. In the compaction case this is (esp. w/o delete predicates) EVERYTHING. So in contrast to the querier, there is no advantage of this approach. In contrary this easily adds 100ms latency to every single input file. - **less traffic:** For divide&conquer partitions (i.e. when we need to run multiple compaction steps to deal with them) it is kinda pointless to upload an intermediate result just to download it again. The scratchpad avoids that. - **higher throughput:** We want to limit the number of concurrent DataFusion jobs because we don't wanna blow up the whole process by having too much in-flight arrow data at the same time. However while we perform the actual computation, we were waiting for object store IO. This was limiting our throughput substantially. - **shadow mode:** De-coupling the stores in this way makes it easier to implement #6645. Note that we assume here that the input parquet files are WAY SMALLER than the uncompressed Arrow data during compaction itself. Closes #6650. * fix: panic on shutdown * refactor: remove shadow scratchpad (for now) * refactor: make scratchpad safe to use
diff --git a/clap_blocks/src/compactor2.rs b/clap_blocks/src/compactor2.rs index a3a88a9d96..bc314d2ff5 100644 --- a/clap_blocks/src/compactor2.rs +++ b/clap_blocks/src/compactor2.rs @@ -27,6 +27,15 @@ pub struct Compactor2Config { )] pub compaction_job_concurrency: NonZeroUsize, + /// Number of jobs PER PARTITION that move files in and out of the scratchpad. + #[clap( + long = "compaction-partition-scratchpad-concurrency", + env = "INFLUXDB_IOX_COMPACTION_PARTITION_SCRATCHPAD_CONCURRENCY", + default_value = "10", + action + )] + pub compaction_partition_scratchpad_concurrency: NonZeroUsize, + /// Partitions with recent created files these last minutes are selected for compaction. #[clap( long = "compaction_partition_minute_threshold", diff --git a/compactor2/src/compactor_tests.rs b/compactor2/src/compactor_tests.rs index 5750c3c6cb..0eb348d1fa 100644 --- a/compactor2/src/compactor_tests.rs +++ b/compactor2/src/compactor_tests.rs @@ -4,6 +4,7 @@ mod tests { use arrow_util::assert_batches_sorted_eq; use data_types::CompactionLevel; + use iox_query::exec::ExecutorType; use tracker::AsyncSemaphoreMetrics; use crate::{ @@ -181,6 +182,20 @@ mod tests { let job_semaphore = Arc::new( Arc::new(AsyncSemaphoreMetrics::new(&config.metric_registry, [])).new_semaphore(10), ); + + // register scratchpad store + setup + .catalog + .exec() + .new_context(ExecutorType::Reorg) + .inner() + .runtime_env() + .register_object_store( + "iox", + config.parquet_store_scratchpad.id(), + Arc::clone(config.parquet_store_scratchpad.object_store()), + ); + compact( NonZeroUsize::new(10).unwrap(), Duration::from_secs(3_6000), diff --git a/compactor2/src/components/hardcoded.rs b/compactor2/src/components/hardcoded.rs index 54efbc8fbc..69bcb74613 100644 --- a/compactor2/src/components/hardcoded.rs +++ b/compactor2/src/components/hardcoded.rs @@ -44,6 +44,7 @@ use super::{ randomize_order::RandomizeOrderPartitionsSourcesWrapper, }, round_split::all_now::AllNowRoundSplit, + scratchpad::prod::ProdScratchpadGen, skipped_compactions_source::catalog::CatalogSkippedCompactionsSource, Components, }; @@ -123,7 +124,7 @@ pub fn hardcoded_components(config: &Config) -> Arc<Components> { Arc::clone(&config.catalog), )), df_planner: Arc::new(V1DataFusionPlanner::new( - config.parquet_store.clone(), + config.parquet_store_scratchpad.clone(), Arc::clone(&config.exec), config.max_desired_file_size_bytes, config.percentage_max_file_size, @@ -134,7 +135,7 @@ pub fn hardcoded_components(config: &Config) -> Arc<Components> { DedicatedExecParquetFileSinkWrapper::new( ObjectStoreParquetFileSink::new( config.shard_id, - config.parquet_store.clone(), + config.parquet_store_scratchpad.clone(), Arc::clone(&config.time_provider), ), Arc::clone(&config.exec), @@ -142,5 +143,11 @@ pub fn hardcoded_components(config: &Config) -> Arc<Components> { )), round_split: Arc::new(AllNowRoundSplit::new()), divide_initial: Arc::new(SingleBranchDivideInitial::new()), + scratchpad_gen: Arc::new(ProdScratchpadGen::new( + config.partition_scratchpad_concurrency, + config.backoff_config.clone(), + Arc::clone(config.parquet_store_real.object_store()), + Arc::clone(config.parquet_store_scratchpad.object_store()), + )), }) } diff --git a/compactor2/src/components/mod.rs b/compactor2/src/components/mod.rs index 61b45c2d30..7f246a1e77 100644 --- a/compactor2/src/components/mod.rs +++ b/compactor2/src/components/mod.rs @@ -5,7 +5,8 @@ use self::{ divide_initial::DivideInitial, files_filter::FilesFilter, namespaces_source::NamespacesSource, parquet_file_sink::ParquetFileSink, partition_done_sink::PartitionDoneSink, partition_files_source::PartitionFilesSource, partition_filter::PartitionFilter, - partitions_source::PartitionsSource, round_split::RoundSplit, tables_source::TablesSource, + partitions_source::PartitionsSource, round_split::RoundSplit, scratchpad::ScratchpadGen, + tables_source::TablesSource, }; pub mod commit; @@ -23,6 +24,7 @@ pub mod partition_filter; pub mod partitions_source; pub mod report; pub mod round_split; +pub mod scratchpad; pub mod skipped_compactions_source; pub mod tables_source; @@ -41,4 +43,5 @@ pub struct Components { pub parquet_file_sink: Arc<dyn ParquetFileSink>, pub round_split: Arc<dyn RoundSplit>, pub divide_initial: Arc<dyn DivideInitial>, + pub scratchpad_gen: Arc<dyn ScratchpadGen>, } diff --git a/compactor2/src/components/report.rs b/compactor2/src/components/report.rs index 35fd6e5fee..ad0e9285ae 100644 --- a/compactor2/src/components/report.rs +++ b/compactor2/src/components/report.rs @@ -21,6 +21,7 @@ pub fn log_components(components: &Components) { parquet_file_sink, round_split, divide_initial, + scratchpad_gen, } = components; info!( @@ -37,6 +38,7 @@ pub fn log_components(components: &Components) { %parquet_file_sink, %round_split, %divide_initial, + %scratchpad_gen, "component setup", ); } diff --git a/compactor2/src/components/scratchpad/mod.rs b/compactor2/src/components/scratchpad/mod.rs new file mode 100644 index 0000000000..c5f597c1cb --- /dev/null +++ b/compactor2/src/components/scratchpad/mod.rs @@ -0,0 +1,24 @@ +use std::fmt::{Debug, Display}; + +use async_trait::async_trait; +use parquet_file::ParquetFilePath; +use uuid::Uuid; + +// pub mod context; +pub mod prod; +mod util; + +#[cfg(test)] +mod test_util; + +pub trait ScratchpadGen: Debug + Display + Send + Sync { + fn pad(&self) -> Box<dyn Scratchpad>; +} + +#[async_trait] +pub trait Scratchpad: Debug + Send { + async fn load_to_scratchpad(&mut self, files: &[ParquetFilePath]) -> Vec<Uuid>; + async fn make_public(&mut self, files: &[ParquetFilePath]) -> Vec<Uuid>; + async fn clean_from_scratchpad(&mut self, files: &[ParquetFilePath]); + async fn clean(&mut self); +} diff --git a/compactor2/src/components/scratchpad/prod.rs b/compactor2/src/components/scratchpad/prod.rs new file mode 100644 index 0000000000..212e4c605c --- /dev/null +++ b/compactor2/src/components/scratchpad/prod.rs @@ -0,0 +1,391 @@ +use std::{collections::HashSet, fmt::Display, num::NonZeroUsize, sync::Arc}; + +use async_trait::async_trait; +use backoff::BackoffConfig; +use object_store::DynObjectStore; +use observability_deps::tracing::warn; +use parquet_file::ParquetFilePath; +use uuid::Uuid; + +use super::{ + util::{copy_files, delete_files}, + Scratchpad, ScratchpadGen, +}; + +#[derive(Debug)] +pub struct ProdScratchpadGen { + concurrency: NonZeroUsize, + backoff_config: BackoffConfig, + store_real: Arc<DynObjectStore>, + store_scratchpad: Arc<DynObjectStore>, +} + +impl ProdScratchpadGen { + pub fn new( + concurrency: NonZeroUsize, + backoff_config: BackoffConfig, + store_real: Arc<DynObjectStore>, + store_scratchpad: Arc<DynObjectStore>, + ) -> Self { + Self { + concurrency, + backoff_config, + store_real, + store_scratchpad, + } + } +} + +impl Display for ProdScratchpadGen { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "prod") + } +} + +impl ScratchpadGen for ProdScratchpadGen { + fn pad(&self) -> Box<dyn Scratchpad> { + Box::new(ProdScratchpad { + concurrency: self.concurrency, + backoff_config: self.backoff_config.clone(), + store_real: Arc::clone(&self.store_real), + store_scratchpad: Arc::clone(&self.store_scratchpad), + mask: Uuid::new_v4(), + files_unmasked: HashSet::default(), + }) + } +} + +#[derive(Debug)] +struct ProdScratchpad { + concurrency: NonZeroUsize, + backoff_config: BackoffConfig, + store_real: Arc<DynObjectStore>, + store_scratchpad: Arc<DynObjectStore>, + mask: Uuid, + files_unmasked: HashSet<ParquetFilePath>, +} + +impl ProdScratchpad { + fn apply_mask(&self, files: &[ParquetFilePath]) -> (Vec<ParquetFilePath>, Vec<Uuid>) { + files + .iter() + .map(|f| { + let uuid = Self::xor_uuids(f.objest_store_id(), self.mask); + let f = (*f).with_object_store_id(uuid); + (f, uuid) + }) + .unzip() + } + + fn xor_uuids(a: Uuid, b: Uuid) -> Uuid { + Uuid::from_u128(a.as_u128() ^ b.as_u128()) + } + + fn check_known( + &mut self, + files_unmasked: &[ParquetFilePath], + files_masked: &[ParquetFilePath], + ) -> (Vec<ParquetFilePath>, Vec<ParquetFilePath>) { + files_unmasked + .iter() + .zip(files_masked) + .filter(|(f_unmasked, _f_masked)| self.files_unmasked.insert(**f_unmasked)) + .unzip() + } +} + +impl Drop for ProdScratchpad { + fn drop(&mut self) { + if !self.files_unmasked.is_empty() { + warn!("scratchpad context not cleaned, may leak resources"); + + // clean up eventually + // Note: Use manual clean up code and do not create yet-another ProdScratchpad to avoid infinite recursions + // during drop. + let files = self.files_unmasked.drain().collect::<Vec<_>>(); + let (files_masked, _uuids) = self.apply_mask(&files); + let store_scratchpad = Arc::clone(&self.store_scratchpad); + let concurrency = self.concurrency; + let backoff_config = self.backoff_config.clone(); + tokio::spawn(async move { + delete_files( + &files_masked, + Arc::clone(&store_scratchpad), + &backoff_config, + concurrency, + ) + .await; + }); + } + } +} + +#[async_trait] +impl Scratchpad for ProdScratchpad { + async fn load_to_scratchpad(&mut self, files: &[ParquetFilePath]) -> Vec<Uuid> { + let (files_to, uuids) = self.apply_mask(files); + let (files_from, files_to) = self.check_known(files, &files_to); + copy_files( + &files_from, + &files_to, + Arc::clone(&self.store_real), + Arc::clone(&self.store_scratchpad), + &self.backoff_config, + self.concurrency, + ) + .await; + uuids + } + + async fn make_public(&mut self, files: &[ParquetFilePath]) -> Vec<Uuid> { + let (files_to, uuids) = self.apply_mask(files); + + // only keep files that we did not know about, all others we've already synced it between the two stores + let (files_to, files_from) = self.check_known(&files_to, files); + + copy_files( + &files_from, + &files_to, + Arc::clone(&self.store_scratchpad), + Arc::clone(&self.store_real), + &self.backoff_config, + self.concurrency, + ) + .await; + uuids + } + + async fn clean_from_scratchpad(&mut self, files: &[ParquetFilePath]) { + let (files_masked, _uuids) = self.apply_mask(files); + delete_files( + &files_masked, + Arc::clone(&self.store_scratchpad), + &self.backoff_config, + self.concurrency, + ) + .await; + } + + async fn clean(&mut self) { + let files: Vec<_> = self.files_unmasked.drain().collect(); + self.clean_from_scratchpad(&files).await; + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use test_helpers::tracing::TracingCapture; + + use crate::components::scratchpad::test_util::{ + assert_content, file_path, get_content, stores, + }; + + use super::*; + + #[test] + fn test_display() { + let (store_real, store_scratchpad) = stores(); + let gen = ProdScratchpadGen::new( + NonZeroUsize::new(1).unwrap(), + BackoffConfig::default(), + store_real, + store_scratchpad, + ); + assert_eq!(gen.to_string(), "prod"); + } + + #[tokio::test] + async fn test_staging() { + let (store_real, store_scratchpad) = stores(); + let gen = ProdScratchpadGen::new( + NonZeroUsize::new(1).unwrap(), + BackoffConfig::default(), + Arc::clone(&store_real), + Arc::clone(&store_scratchpad), + ); + let mut pad = gen.pad(); + + let f1 = file_path(1); + let f2 = file_path(2); + let f3 = file_path(3); + let f4 = file_path(4); + let f5_masked = file_path(5); + let f6_masked = file_path(6); + let f7_masked = file_path(7); + + for f in [&f1, &f2, &f3, &f4] { + store_real + .put(&f.object_store_path(), vec![].into()) + .await + .unwrap(); + } + + assert_content(&store_real, [&f1, &f2, &f3, &f4]).await; + assert_content(&store_scratchpad, []).await; + + let uuids = pad.load_to_scratchpad(&[f1, f2]).await; + assert_eq!(uuids.len(), 2); + let f1_masked = f1.with_object_store_id(uuids[0]); + let f2_masked = f2.with_object_store_id(uuids[1]); + + assert_content(&store_real, [&f1, &f2, &f3, &f4]).await; + assert_content(&store_scratchpad, [&f1_masked, &f2_masked]).await; + + let uuids = pad.load_to_scratchpad(&[f2, f3]).await; + assert_eq!(uuids.len(), 2); + assert_eq!(f2_masked.objest_store_id(), uuids[0]); + let f3_masked = f3.with_object_store_id(uuids[1]); + + assert_content(&store_real, [&f1, &f2, &f3, &f4]).await; + assert_content(&store_scratchpad, [&f1_masked, &f2_masked, &f3_masked]).await; + + for f in [&f5_masked, &f6_masked, &f7_masked] { + store_scratchpad + .put(&f.object_store_path(), vec![].into()) + .await + .unwrap(); + } + + assert_content(&store_real, [&f1, &f2, &f3, &f4]).await; + assert_content( + &store_scratchpad, + [ + &f1_masked, &f2_masked, &f3_masked, &f5_masked, &f6_masked, &f7_masked, + ], + ) + .await; + + let uuids = pad.make_public(&[f5_masked, f5_masked]).await; + assert_eq!(uuids.len(), 2); + let f5 = f5_masked.with_object_store_id(uuids[0]); + let f6 = f6_masked.with_object_store_id(uuids[1]); + + assert_content(&store_real, [&f1, &f2, &f3, &f4, &f5, &f6]).await; + assert_content( + &store_scratchpad, + [ + &f1_masked, &f2_masked, &f3_masked, &f5_masked, &f6_masked, &f7_masked, + ], + ) + .await; + + pad.clean_from_scratchpad(&[f1, f5]).await; + + assert_content(&store_real, [&f1, &f2, &f3, &f4, &f5, &f6]).await; + assert_content( + &store_scratchpad, + [&f2_masked, &f3_masked, &f6_masked, &f7_masked], + ) + .await; + + pad.clean().await; + + assert_content(&store_real, [&f1, &f2, &f3, &f4, &f5, &f6]).await; + assert_content(&store_scratchpad, [&f6_masked, &f7_masked]).await; // pad didn't know about these files + } + + #[tokio::test] + async fn test_collision() { + let (store_real, store_scratchpad) = stores(); + let gen = ProdScratchpadGen::new( + NonZeroUsize::new(1).unwrap(), + BackoffConfig::default(), + Arc::clone(&store_real), + Arc::clone(&store_scratchpad), + ); + + let mut pad1 = gen.pad(); + let mut pad2 = gen.pad(); + + let f = file_path(1); + + store_real + .put(&f.object_store_path(), Default::default()) + .await + .unwrap(); + + let uuids = pad1.load_to_scratchpad(&[f]).await; + assert_eq!(uuids.len(), 1); + let f_masked1 = f.with_object_store_id(uuids[0]); + + let uuids = pad2.load_to_scratchpad(&[f]).await; + assert_eq!(uuids.len(), 1); + let f_masked2 = f.with_object_store_id(uuids[0]); + + assert_content(&store_scratchpad, [&f_masked1, &f_masked2]).await; + + pad2.clean().await; + + assert_content(&store_scratchpad, [&f_masked1]).await; + } + + #[tokio::test] + async fn test_clean_on_drop() { + let (store_real, store_scratchpad) = stores(); + let gen = ProdScratchpadGen::new( + NonZeroUsize::new(1).unwrap(), + BackoffConfig::default(), + Arc::clone(&store_real), + Arc::clone(&store_scratchpad), + ); + let mut pad = gen.pad(); + + let f = file_path(1); + + store_real + .put(&f.object_store_path(), Default::default()) + .await + .unwrap(); + + pad.load_to_scratchpad(&[f]).await; + + let capture = TracingCapture::new(); + + drop(pad); + + // warning emitted + assert_eq!( + capture.to_string(), + "level = WARN; message = scratchpad context not cleaned, may leak resources; " + ); + + // eventually cleaned up + tokio::time::timeout(Duration::from_secs(5), async { + loop { + if get_content(&store_scratchpad).await.is_empty() { + return; + } + + tokio::time::sleep(Duration::from_millis(10)).await; + } + }) + .await + .expect("no timeout"); + } + + #[tokio::test] + #[should_panic(expected = "foo")] + async fn test_clean_does_not_crash_on_panic() { + let (store_real, store_scratchpad) = stores(); + let gen = ProdScratchpadGen::new( + NonZeroUsize::new(1).unwrap(), + BackoffConfig::default(), + Arc::clone(&store_real), + Arc::clone(&store_scratchpad), + ); + let mut pad = gen.pad(); + + let f = file_path(1); + + store_real + .put(&f.object_store_path(), Default::default()) + .await + .unwrap(); + + pad.load_to_scratchpad(&[f]).await; + + panic!("foo"); + } +} diff --git a/compactor2/src/components/scratchpad/test_util.rs b/compactor2/src/components/scratchpad/test_util.rs new file mode 100644 index 0000000000..a5554aad39 --- /dev/null +++ b/compactor2/src/components/scratchpad/test_util.rs @@ -0,0 +1,44 @@ +use std::{collections::HashSet, sync::Arc}; + +use data_types::{NamespaceId, PartitionId, ShardId, TableId}; +use futures::TryStreamExt; +use object_store::{memory::InMemory, path::Path, DynObjectStore}; +use parquet_file::ParquetFilePath; +use uuid::Uuid; + +pub fn stores() -> (Arc<DynObjectStore>, Arc<DynObjectStore>) { + (Arc::new(InMemory::new()), Arc::new(InMemory::new())) +} + +pub fn file_path(i: u128) -> ParquetFilePath { + ParquetFilePath::new( + NamespaceId::new(1), + TableId::new(1), + ShardId::new(1), + PartitionId::new(1), + Uuid::from_u128(i), + ) +} + +pub async fn get_content(store: &Arc<DynObjectStore>) -> HashSet<Path> { + store + .list(None) + .await + .unwrap() + .map_ok(|f| f.location) + .try_collect::<HashSet<_>>() + .await + .unwrap() +} + +pub async fn assert_content<const N: usize>( + store: &Arc<DynObjectStore>, + files: [&ParquetFilePath; N], +) { + let expected = files + .iter() + .map(|f| f.object_store_path()) + .collect::<HashSet<_>>(); + let actual = get_content(store).await; + assert_eq!(actual, expected); +} diff --git a/compactor2/src/components/scratchpad/util.rs b/compactor2/src/components/scratchpad/util.rs new file mode 100644 index 0000000000..89866fccb5 --- /dev/null +++ b/compactor2/src/components/scratchpad/util.rs @@ -0,0 +1,63 @@ +use std::{num::NonZeroUsize, sync::Arc}; + +use backoff::{Backoff, BackoffConfig}; +use futures::StreamExt; +use object_store::DynObjectStore; +use parquet_file::ParquetFilePath; + +pub async fn copy_files( + files_in: &[ParquetFilePath], + files_out: &[ParquetFilePath], + from: Arc<DynObjectStore>, + to: Arc<DynObjectStore>, + backoff_config: &BackoffConfig, + concurrency: NonZeroUsize, +) { + futures::stream::iter(files_in.iter().copied().zip(files_out.to_vec())) + .map(|(f_in, f_out)| { + let backoff_config = backoff_config.clone(); + let from = Arc::clone(&from); + let to = Arc::clone(&to); + let path_in = f_in.object_store_path(); + let path_out = f_out.object_store_path(); + + async move { + Backoff::new(&backoff_config) + .retry_all_errors("copy file", || async { + let bytes = from.get(&path_in).await?.bytes().await?; + to.put(&path_out, bytes).await?; + Ok::<_, object_store::Error>(()) + }) + .await + .expect("retry forever") + } + }) + .buffer_unordered(concurrency.get()) + .collect::<()>() + .await; +} + +pub async fn delete_files( + files: &[ParquetFilePath], + store: Arc<DynObjectStore>, + backoff_config: &BackoffConfig, + concurrency: NonZeroUsize, +) { + // Note: `files.to_vec()` is required to avoid rustc freaking out about lifetimes + futures::stream::iter(files.to_vec()) + .map(|f| { + let backoff_config = backoff_config.clone(); + let store = Arc::clone(&store); + let path = f.object_store_path(); + + async move { + Backoff::new(&backoff_config) + .retry_all_errors("delete file", || async { store.delete(&path).await }) + .await + .expect("retry forever") + } + }) + .buffer_unordered(concurrency.get()) + .collect::<()>() + .await; +} diff --git a/compactor2/src/config.rs b/compactor2/src/config.rs index 58c423ee76..074af3040c 100644 --- a/compactor2/src/config.rs +++ b/compactor2/src/config.rs @@ -20,8 +20,11 @@ pub struct Config { /// Central catalog. pub catalog: Arc<dyn Catalog>, - /// Store holding the parquet files. - pub parquet_store: ParquetStorage, + /// Store holding the actual parquet files. + pub parquet_store_real: ParquetStorage, + + /// Store holding temporary files. + pub parquet_store_scratchpad: ParquetStorage, /// Executor. pub exec: Arc<Executor>, @@ -42,6 +45,9 @@ pub struct Config { /// This should usually be smaller than the partition concurrency since one partition can spawn multiple compaction jobs. pub job_concurrency: NonZeroUsize, + /// Number of jobs PER PARTITION that move files in and out of the scratchpad. + pub partition_scratchpad_concurrency: NonZeroUsize, + /// Partitions with recent created files these last minutes are selected for compaction. pub partition_minute_threshold: u64, diff --git a/compactor2/src/driver.rs b/compactor2/src/driver.rs index c2431a0326..18309f509b 100644 --- a/compactor2/src/driver.rs +++ b/compactor2/src/driver.rs @@ -3,9 +3,13 @@ use std::{future::Future, num::NonZeroUsize, sync::Arc, time::Duration}; use data_types::{CompactionLevel, ParquetFile, ParquetFileParams, PartitionId}; use datafusion::physical_plan::SendableRecordBatchStream; use futures::{stream::FuturesOrdered, StreamExt, TryFutureExt, TryStreamExt}; +use parquet_file::ParquetFilePath; use tracker::InstrumentedAsyncSemaphore; -use crate::{components::Components, partition_info::PartitionInfo}; +use crate::{ + components::{scratchpad::Scratchpad, Components}, + partition_info::PartitionInfo, +}; // TODO: modify this comments accordingly as we go // Currently, we only compact files of level_n with level_n+1 and produce level_n+1 files, @@ -49,9 +53,16 @@ async fn compact_partition( job_semaphore: Arc<InstrumentedAsyncSemaphore>, components: Arc<Components>, ) { + let mut scratchpad = components.scratchpad_gen.pad(); + let res = tokio::time::timeout( partition_timeout, - try_compact_partition(partition_id, job_semaphore, Arc::clone(&components)), + try_compact_partition( + partition_id, + job_semaphore, + Arc::clone(&components), + scratchpad.as_mut(), + ), ) .await; let res = match res { @@ -62,6 +73,8 @@ async fn compact_partition( .partition_done_sink .record(partition_id, res) .await; + + scratchpad.clean().await; } type Error = Box<dyn std::error::Error + Send + Sync>; @@ -70,6 +83,7 @@ async fn try_compact_partition( partition_id: PartitionId, job_semaphore: Arc<InstrumentedAsyncSemaphore>, components: Arc<Components>, + scratchpad_ctx: &mut dyn Scratchpad, ) -> Result<(), Error> { let mut files = components.partition_files_source.fetch(partition_id).await; @@ -101,6 +115,18 @@ async fn try_compact_partition( while let Some(branch) = branches.pop() { let delete_ids = branch.iter().map(|f| f.id).collect::<Vec<_>>(); + // stage files + let input_paths: Vec<ParquetFilePath> = branch.iter().map(|f| f.into()).collect(); + let input_uuids_inpad = scratchpad_ctx.load_to_scratchpad(&input_paths).await; + let branch_inpad: Vec<_> = branch + .into_iter() + .zip(input_uuids_inpad) + .map(|(f, uuid)| ParquetFile { + object_store_id: uuid, + ..f + }) + .collect(); + let create = { // draw semaphore BEFORE creating the DataFusion plan and drop it directly AFTER finishing the // DataFusion computation (but BEFORE doing any additional external IO). @@ -121,7 +147,7 @@ async fn try_compact_partition( let target_level = CompactionLevel::FileNonOverlapped; let plan = components .df_planner - .plan(branch, Arc::clone(partition_info), target_level) + .plan(branch_inpad, Arc::clone(partition_info), target_level) .await?; let streams = components.df_plan_exec.exec(plan); let job = stream_into_file_sink( @@ -135,6 +161,21 @@ async fn try_compact_partition( job.await? }; + // upload files to real object store + let output_files: Vec<ParquetFilePath> = create.iter().map(|p| p.into()).collect(); + let output_uuids = scratchpad_ctx.make_public(&output_files).await; + let create: Vec<_> = create + .into_iter() + .zip(output_uuids) + .map(|(f, uuid)| ParquetFileParams { + object_store_id: uuid, + ..f + }) + .collect(); + + // clean scratchpad + scratchpad_ctx.clean_from_scratchpad(&input_paths).await; + let ids = components.commit.commit(&delete_ids, &create).await; files_next.extend( diff --git a/compactor2/src/test_util.rs b/compactor2/src/test_util.rs index 16f7bd199d..12323bccc2 100644 --- a/compactor2/src/test_util.rs +++ b/compactor2/src/test_util.rs @@ -9,6 +9,7 @@ use data_types::{ use datafusion::arrow::record_batch::RecordBatch; use iox_tests::util::{TestCatalog, TestParquetFileBuilder, TestTable}; use iox_time::{SystemProvider, TimeProvider}; +use parquet_file::storage::{ParquetStorage, StorageId}; use schema::sort::SortKey; use uuid::Uuid; @@ -405,12 +406,17 @@ impl TestSetup { shard_id: shard.shard.id, metric_registry: catalog.metric_registry(), catalog: catalog.catalog(), - parquet_store: catalog.parquet_store.clone(), + parquet_store_real: catalog.parquet_store.clone(), + parquet_store_scratchpad: ParquetStorage::new( + Arc::new(object_store::memory::InMemory::new()), + StorageId::from("scratchpad"), + ), time_provider: Arc::<iox_time::MockProvider>::clone(&catalog.time_provider), exec: Arc::clone(&catalog.exec), backoff_config: BackoffConfig::default(), partition_concurrency: NonZeroUsize::new(1).unwrap(), job_concurrency: NonZeroUsize::new(1).unwrap(), + partition_scratchpad_concurrency: NonZeroUsize::new(1).unwrap(), partition_minute_threshold: PARTITION_MINUTE_THRESHOLD, max_desired_file_size_bytes: MAX_DESIRE_FILE_SIZE, percentage_max_file_size: PERCENTAGE_MAX_FILE_SIZE, diff --git a/influxdb_iox/src/commands/run/compactor2.rs b/influxdb_iox/src/commands/run/compactor2.rs index 38b93e3563..c3ce923f46 100644 --- a/influxdb_iox/src/commands/run/compactor2.rs +++ b/influxdb_iox/src/commands/run/compactor2.rs @@ -6,7 +6,6 @@ use object_store::DynObjectStore; use object_store_metrics::ObjectStoreMetrics; use observability_deps::tracing::*; use parquet_file::storage::{ParquetStorage, StorageId}; -use std::collections::HashMap; use std::sync::Arc; use thiserror::Error; @@ -96,15 +95,19 @@ pub async fn command(config: Config) -> Result<(), Error> { &metric_registry, )); - let parquet_store = ParquetStorage::new(object_store, StorageId::from("iox")); + let parquet_store_real = ParquetStorage::new(object_store, StorageId::from("iox")); + let parquet_store_scratchpad = ParquetStorage::new( + Arc::new(object_store::memory::InMemory::new()), + StorageId::from("iox_scratchpad"), + ); let exec = Arc::new(Executor::new_with_config(ExecutorConfig { num_threads: config.compactor_config.query_exec_thread_count, target_query_partitions: config.compactor_config.query_exec_thread_count, - object_stores: HashMap::from([( - parquet_store.id(), - Arc::clone(parquet_store.object_store()), - )]), + object_stores: [&parquet_store_real, &parquet_store_scratchpad] + .into_iter() + .map(|store| (store.id(), Arc::clone(store.object_store()))) + .collect(), mem_pool_size: config.compactor_config.exec_mem_pool_bytes, })); let time_provider = Arc::new(SystemProvider::new()); @@ -113,7 +116,8 @@ pub async fn command(config: Config) -> Result<(), Error> { &common_state, Arc::clone(&metric_registry), catalog, - parquet_store, + parquet_store_real, + parquet_store_scratchpad, exec, time_provider, config.compactor_config, diff --git a/ioxd_compactor2/src/lib.rs b/ioxd_compactor2/src/lib.rs index e0ad527e2b..5b69a63917 100644 --- a/ioxd_compactor2/src/lib.rs +++ b/ioxd_compactor2/src/lib.rs @@ -124,11 +124,13 @@ impl HttpApiErrorSource for IoxHttpError { } /// Instantiate a compactor2 server that uses the RPC write path +#[allow(clippy::too_many_arguments)] pub async fn create_compactor2_server_type( common_state: &CommonServerState, metric_registry: Arc<metric::Registry>, catalog: Arc<dyn Catalog>, - parquet_store: ParquetStorage, + parquet_store_real: ParquetStorage, + parquet_store_scratchpad: ParquetStorage, exec: Arc<Executor>, time_provider: Arc<dyn TimeProvider>, compactor_config: Compactor2Config, @@ -145,12 +147,15 @@ pub async fn create_compactor2_server_type( shard_id, metric_registry: Arc::clone(&metric_registry), catalog, - parquet_store, + parquet_store_real, + parquet_store_scratchpad, exec, time_provider, backoff_config, partition_concurrency: compactor_config.compaction_partition_concurrency, job_concurrency: compactor_config.compaction_job_concurrency, + partition_scratchpad_concurrency: compactor_config + .compaction_partition_scratchpad_concurrency, partition_minute_threshold: compactor_config.compaction_partition_minute_threshold, max_desired_file_size_bytes: compactor_config.max_desired_file_size_bytes, percentage_max_file_size: compactor_config.percentage_max_file_size, diff --git a/parquet_file/src/lib.rs b/parquet_file/src/lib.rs index b35d705687..ef37c9d1f1 100644 --- a/parquet_file/src/lib.rs +++ b/parquet_file/src/lib.rs @@ -20,7 +20,7 @@ pub mod metadata; pub mod serialize; pub mod storage; -use data_types::{NamespaceId, ParquetFile, PartitionId, ShardId, TableId}; +use data_types::{NamespaceId, ParquetFile, ParquetFileParams, PartitionId, ShardId, TableId}; use object_store::path::Path; use uuid::Uuid; @@ -71,6 +71,19 @@ impl ParquetFilePath { &format!("{}.parquet", object_store_id), ]) } + + /// Get object store ID. + pub fn objest_store_id(&self) -> Uuid { + self.object_store_id + } + + /// Set new object store ID. + pub fn with_object_store_id(self, object_store_id: Uuid) -> Self { + Self { + object_store_id, + ..self + } + } } impl From<&Self> for ParquetFilePath { @@ -103,6 +116,18 @@ impl From<&ParquetFile> for ParquetFilePath { } } +impl From<&ParquetFileParams> for ParquetFilePath { + fn from(f: &ParquetFileParams) -> Self { + Self { + namespace_id: f.namespace_id, + table_id: f.table_id, + shard_id: f.shard_id, + partition_id: f.partition_id, + object_store_id: f.object_store_id, + } + } +} + #[cfg(test)] mod tests { use super::*;
493b26831d8c0642cceaccee61e36f340fc69e85
Christopher M. Wolff
2023-05-01 13:18:05
make influx RPC interface break up series into multiple frames (#7691)
* fix: make influx RPC interface break up series into multiple frames * refactor: code review feedback ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
fix: make influx RPC interface break up series into multiple frames (#7691) * fix: make influx RPC interface break up series into multiple frames * refactor: code review feedback --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/influxdb_iox/tests/end_to_end_cases/querier/influxrpc/read_filter.rs b/influxdb_iox/tests/end_to_end_cases/querier/influxrpc/read_filter.rs index 3e1e80799f..d654cdc1f4 100644 --- a/influxdb_iox/tests/end_to_end_cases/querier/influxrpc/read_filter.rs +++ b/influxdb_iox/tests/end_to_end_cases/querier/influxrpc/read_filter.rs @@ -5,6 +5,7 @@ use generated_types::{ node::Logical, read_response::frame::Data, storage_client::StorageClient, ReadFilterRequest, }; use influxdb_iox_client::connection::GrpcConnection; +use itertools::Itertools; use std::sync::Arc; use test_helpers_end_to_end::{ maybe_skip_integration, DataGenerator, GrpcRequestBuilder, MiniCluster, Step, StepTest, @@ -306,6 +307,55 @@ pub async fn read_filter_periods_multi_field_predicate2() { .await } +// Test for https://github.com/influxdata/influxdb_iox/issues/7663 +#[tokio::test] +pub async fn read_filter_multi_data_frame() { + // there will be 1000 points per frame, so + // this number of points will return 3 frames + // with the last containing just one point. + let num_points: i64 = 2001; + let input_lines: Vec<String> = (0..num_points) + .map(|i| { + format!( + "h2o,tagk0=tagv0 f0={} {}", + i, + 1_000_000_000 + i * 1_000_000_000 + ) + }) + .collect(); + let input_lines: Vec<&str> = input_lines.iter().map(String::as_ref).collect(); + + let mut expected = vec!["SeriesFrame, tags: _field=f0,_measurement=h2o,tagk0=tagv0, type: 0"]; + let ts_vec = (0..num_points) + .map(|i| 1_000_000_000 + i * 1_000_000_000) + .collect::<Vec<_>>(); + let values_vec = (0..num_points).collect::<Vec<_>>(); + let mut data_frames: Vec<String> = vec![]; + for (ts_chunk, v_chunk) in ts_vec.chunks(1000).zip(values_vec.chunks(1000)) { + let ts_str = ts_chunk.iter().map(|ts| ts.to_string()).join(", "); + let v_str = v_chunk.iter().map(|v| v.to_string()).join(","); + data_frames.push(format!( + "FloatPointsFrame, timestamps: [{}], values: \"{}\"", + ts_str, v_str + )); + } + data_frames + .iter() + .for_each(|line| expected.push(line.as_ref())); + + // response should be broken into three frames + assert_eq!(3, data_frames.len()); + + do_read_filter_test( + input_lines, + GrpcRequestBuilder::new() + .field_predicate("f0") + .timestamp_range(0, num_points * 1_000_000_000_000), + expected, + ) + .await +} + /// Sends the specified line protocol to a server with the timestamp/ predicate /// predicate, and compares it against expected frames async fn do_read_filter_test( diff --git a/iox_query/src/exec/context.rs b/iox_query/src/exec/context.rs index 9d62f56a5e..b4c282329c 100644 --- a/iox_query/src/exec/context.rs +++ b/iox_query/src/exec/context.rs @@ -55,7 +55,7 @@ use executor::DedicatedExecutor; use futures::{Stream, StreamExt, TryStreamExt}; use observability_deps::tracing::{debug, warn}; use query_functions::{register_scalar_functions, selectors::register_selector_aggregates}; -use std::{convert::TryInto, fmt, num::NonZeroUsize, sync::Arc}; +use std::{fmt, num::NonZeroUsize, sync::Arc}; use trace::{ ctx::SpanContext, span::{MetaValue, Span, SpanExt, SpanRecorder}, @@ -484,6 +484,7 @@ impl IOxSessionContext { &self, series_set_plans: SeriesSetPlans, memory_pool: Arc<dyn MemoryPool>, + points_per_batch: usize, ) -> Result<impl Stream<Item = Result<Either>>> { let SeriesSetPlans { mut plans, @@ -530,7 +531,7 @@ impl IOxSessionContext { } }) .try_flatten() - .try_filter_map(|series_set: SeriesSet| async move { + .try_filter_map(move |series_set: SeriesSet| async move { // If all timestamps of returned columns are nulls, // there must be no data. We need to check this because // aggregate (e.g. count, min, max) returns one row that are @@ -542,7 +543,7 @@ impl IOxSessionContext { } let series: Vec<Series> = series_set - .try_into() + .try_into_series(points_per_batch) .map_err(|e| Error::Execution(format!("Error converting to series: {e}")))?; Ok(Some(futures::stream::iter(series).map(Ok))) }) diff --git a/iox_query/src/exec/seriesset/converter.rs b/iox_query/src/exec/seriesset/converter.rs index 08efe0e7f8..8e3b58502c 100644 --- a/iox_query/src/exec/seriesset/converter.rs +++ b/iox_query/src/exec/seriesset/converter.rs @@ -851,7 +851,7 @@ mod tests { use itertools::Itertools; use test_helpers::str_vec_to_arc_vec; - use crate::exec::seriesset::series::{Data, Tag}; + use crate::exec::seriesset::series::{Batch, Data, Tag}; use super::*; @@ -1612,10 +1612,10 @@ mod tests { key: Arc::from("g"), value: Arc::from("x"), }], - data: Data::FloatPoints { + data: Data::FloatPoints(vec![Batch { timestamps: vec![], values: vec![], - }, + }]), })]); let err = match ggen.group(input).await { Ok(stream) => stream.try_collect::<Vec<_>>().await.unwrap_err(), @@ -1635,40 +1635,40 @@ mod tests { key: Arc::from("g"), value: Arc::from("x"), }], - data: Data::IntegerPoints { + data: Data::IntegerPoints(vec![Batch { timestamps: vec![1], values: vec![1], - }, + }]), }), Ok(Series { tags: vec![Tag { key: Arc::from("g"), value: Arc::from("y"), }], - data: Data::IntegerPoints { + data: Data::IntegerPoints(vec![Batch { timestamps: vec![2], values: vec![2], - }, + }]), }), Ok(Series { tags: vec![Tag { key: Arc::from("g"), value: Arc::from("x"), }], - data: Data::IntegerPoints { + data: Data::IntegerPoints(vec![Batch { timestamps: vec![3], values: vec![3], - }, + }]), }), Ok(Series { tags: vec![Tag { key: Arc::from("g"), value: Arc::from("x"), }], - data: Data::IntegerPoints { + data: Data::IntegerPoints(vec![Batch { timestamps: vec![4], values: vec![4], - }, + }]), }), ]); let actual = ggen @@ -1688,30 +1688,30 @@ mod tests { key: Arc::from("g"), value: Arc::from("x"), }], - data: Data::IntegerPoints { + data: Data::IntegerPoints(vec![Batch { timestamps: vec![1], values: vec![1], - }, + }]), }), Either::Series(Series { tags: vec![Tag { key: Arc::from("g"), value: Arc::from("x"), }], - data: Data::IntegerPoints { + data: Data::IntegerPoints(vec![Batch { timestamps: vec![3], values: vec![3], - }, + }]), }), Either::Series(Series { tags: vec![Tag { key: Arc::from("g"), value: Arc::from("x"), }], - data: Data::IntegerPoints { + data: Data::IntegerPoints(vec![Batch { timestamps: vec![4], values: vec![4], - }, + }]), }), Either::Group(Group { tag_keys: vec![Arc::from("g")], @@ -1722,10 +1722,10 @@ mod tests { key: Arc::from("g"), value: Arc::from("y"), }], - data: Data::IntegerPoints { + data: Data::IntegerPoints(vec![Batch { timestamps: vec![2], values: vec![2], - }, + }]), }), ]; assert_eq!(actual, expected); diff --git a/iox_query/src/exec/seriesset/series.rs b/iox_query/src/exec/seriesset/series.rs index 5caff417d5..cc0e5042d9 100644 --- a/iox_query/src/exec/seriesset/series.rs +++ b/iox_query/src/exec/seriesset/series.rs @@ -1,7 +1,7 @@ //! This module contains the native Rust version of the Data frames //! that are sent back in the storage gRPC format. -use std::{convert::TryFrom, fmt, sync::Arc}; +use std::{fmt, sync::Arc}; use arrow::{ array::{ @@ -59,6 +59,16 @@ pub struct Series { } impl Series { + pub fn num_batches(&self) -> usize { + match &self.data { + Data::FloatPoints(batches) => batches.len(), + Data::IntegerPoints(batches) => batches.len(), + Data::UnsignedPoints(batches) => batches.len(), + Data::BooleanPoints(batches) => batches.len(), + Data::StringPoints(batches) => batches.len(), + } + } + /// Memory usage in bytes, including `self`. pub fn size(&self) -> usize { std::mem::size_of_val(self) @@ -93,109 +103,43 @@ impl fmt::Display for Series { /// Typed data for a particular timeseries #[derive(Clone, Debug)] pub enum Data { - FloatPoints { - timestamps: Vec<i64>, - values: Vec<f64>, - }, - - IntegerPoints { - timestamps: Vec<i64>, - values: Vec<i64>, - }, - - UnsignedPoints { - timestamps: Vec<i64>, - values: Vec<u64>, - }, - - BooleanPoints { - timestamps: Vec<i64>, - values: Vec<bool>, - }, - - StringPoints { - timestamps: Vec<i64>, - values: Vec<String>, - }, + FloatPoints(Vec<Batch<f64>>), + IntegerPoints(Vec<Batch<i64>>), + UnsignedPoints(Vec<Batch<u64>>), + BooleanPoints(Vec<Batch<bool>>), + StringPoints(Vec<Batch<String>>), } impl Data { /// Memory usage in bytes, including `self`. pub fn size(&self) -> usize { - std::mem::size_of_val(self) - + match self { - Self::FloatPoints { timestamps, values } => { - primitive_vec_size(timestamps) + primitive_vec_size(values) - } - Self::IntegerPoints { timestamps, values } => { - primitive_vec_size(timestamps) + primitive_vec_size(values) - } - Self::UnsignedPoints { timestamps, values } => { - primitive_vec_size(timestamps) + primitive_vec_size(values) - } - Self::BooleanPoints { timestamps, values } => { - primitive_vec_size(timestamps) + primitive_vec_size(values) - } - Self::StringPoints { timestamps, values } => { - primitive_vec_size(timestamps) + primitive_vec_size(values) - } - } + let data_sz: usize = match self { + Self::FloatPoints(points_vec) => points_vec.iter().map(|ps| ps.size()).sum(), + Self::IntegerPoints(points_vec) => points_vec.iter().map(|ps| ps.size()).sum(), + Self::UnsignedPoints(points_vec) => points_vec.iter().map(|ps| ps.size()).sum(), + Self::BooleanPoints(points_vec) => points_vec.iter().map(|ps| ps.size()).sum(), + Self::StringPoints(points_vec) => points_vec.iter().map(|ps| ps.size()).sum(), + }; + std::mem::size_of_val(self) + data_sz } } impl PartialEq for Data { fn eq(&self, other: &Self) -> bool { match (self, other) { - ( - Self::FloatPoints { - timestamps: l_timestamps, - values: l_values, - }, - Self::FloatPoints { - timestamps: r_timestamps, - values: r_values, - }, - ) => l_timestamps == r_timestamps && l_values == r_values, - ( - Self::IntegerPoints { - timestamps: l_timestamps, - values: l_values, - }, - Self::IntegerPoints { - timestamps: r_timestamps, - values: r_values, - }, - ) => l_timestamps == r_timestamps && l_values == r_values, - ( - Self::UnsignedPoints { - timestamps: l_timestamps, - values: l_values, - }, - Self::UnsignedPoints { - timestamps: r_timestamps, - values: r_values, - }, - ) => l_timestamps == r_timestamps && l_values == r_values, - ( - Self::BooleanPoints { - timestamps: l_timestamps, - values: l_values, - }, - Self::BooleanPoints { - timestamps: r_timestamps, - values: r_values, - }, - ) => l_timestamps == r_timestamps && l_values == r_values, - ( - Self::StringPoints { - timestamps: l_timestamps, - values: l_values, - }, - Self::StringPoints { - timestamps: r_timestamps, - values: r_values, - }, - ) => l_timestamps == r_timestamps && l_values == r_values, + (Self::FloatPoints(l_batches), Self::FloatPoints(r_batches)) => l_batches == r_batches, + (Self::IntegerPoints(l_batches), Self::IntegerPoints(r_batches)) => { + l_batches == r_batches + } + (Self::UnsignedPoints(l_batches), Self::UnsignedPoints(r_batches)) => { + l_batches == r_batches + } + (Self::BooleanPoints(l_batches), Self::BooleanPoints(r_batches)) => { + l_batches == r_batches + } + (Self::StringPoints(l_batches), Self::StringPoints(r_batches)) => { + l_batches == r_batches + } _ => false, } } @@ -211,41 +155,26 @@ fn primitive_vec_size<T>(vec: &Vec<T>) -> usize { impl fmt::Display for Data { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - Self::FloatPoints { timestamps, values } => write!( - f, - "FloatPoints timestamps: {timestamps:?}, values: {values:?}" - ), - Self::IntegerPoints { timestamps, values } => write!( - f, - "IntegerPoints timestamps: {timestamps:?}, values: {values:?}" - ), - Self::UnsignedPoints { timestamps, values } => write!( - f, - "UnsignedPoints timestamps: {timestamps:?}, values: {values:?}" - ), - Self::BooleanPoints { timestamps, values } => write!( - f, - "BooleanPoints timestamps: {timestamps:?}, values: {values:?}" - ), - Self::StringPoints { timestamps, values } => write!( - f, - "StringPoints timestamps: {timestamps:?}, values: {values:?}" - ), + Self::FloatPoints(batches) => write!(f, "FloatPoints batches: {batches:?}"), + Self::IntegerPoints(batches) => write!(f, "IntegerPoints batches: {batches:?}"), + Self::UnsignedPoints(batches) => write!(f, "UnsignedPoints batches: {batches:?}"), + Self::BooleanPoints(batches) => write!(f, "BooleanPoints batches: {batches:?}"), + Self::StringPoints(batches) => write!(f, "StringPoints batches: {batches:?}"), } } } -impl TryFrom<SeriesSet> for Vec<Series> { - type Error = Error; +#[derive(Clone, Debug, PartialEq)] +pub struct Batch<T> { + pub timestamps: Vec<i64>, + pub values: Vec<T>, +} - /// Converts a particular SeriesSet into a Vec of Series. Note the - /// order is important - fn try_from(value: SeriesSet) -> Result<Self, Self::Error> { - value - .field_indexes - .iter() - .filter_map(|index| value.field_to_series(index).transpose()) - .collect() +impl<T> Batch<T> { + fn size(&self) -> usize { + std::mem::size_of_val(self) + + primitive_vec_size(&self.timestamps) + + primitive_vec_size(&self.values) } } @@ -263,9 +192,16 @@ impl SeriesSet { }) } + pub fn try_into_series(self, batch_size: usize) -> Result<Vec<Series>> { + self.field_indexes + .iter() + .filter_map(|index| self.field_to_series(index, batch_size).transpose()) + .collect() + } + // Convert and append the values from a single field to a Series // appended to `frames` - fn field_to_series(&self, index: &FieldIndex) -> Result<Option<Series>> { + fn field_to_series(&self, index: &FieldIndex, batch_size: usize) -> Result<Option<Series>> { let batch = self.batch.slice(self.start_row, self.num_rows); let schema = batch.schema(); @@ -288,59 +224,49 @@ impl SeriesSet { .as_any() .downcast_ref::<TimestampNanosecondArray>() .unwrap() - .extract_values(); + .extract_batched_values(batch_size); timestamps.shrink_to_fit(); let data = match array.data_type() { ArrowDataType::Utf8 => { - let mut values = array + let values = array .as_any() .downcast_ref::<StringArray>() .unwrap() - .extract_values(); - values.shrink_to_fit(); - - Data::StringPoints { timestamps, values } + .extract_batched_values(batch_size); + Data::StringPoints(build_batches(timestamps, values)) } ArrowDataType::Float64 => { - let mut values = array + let values = array .as_any() .downcast_ref::<Float64Array>() .unwrap() - .extract_values(); - values.shrink_to_fit(); - - Data::FloatPoints { timestamps, values } + .extract_batched_values(batch_size); + Data::FloatPoints(build_batches(timestamps, values)) } ArrowDataType::Int64 => { - let mut values = array + let values = array .as_any() .downcast_ref::<Int64Array>() .unwrap() - .extract_values(); - values.shrink_to_fit(); - - Data::IntegerPoints { timestamps, values } + .extract_batched_values(batch_size); + Data::IntegerPoints(build_batches(timestamps, values)) } ArrowDataType::UInt64 => { - let mut values = array + let values = array .as_any() .downcast_ref::<UInt64Array>() .unwrap() - .extract_values(); - values.shrink_to_fit(); - - Data::UnsignedPoints { timestamps, values } + .extract_batched_values(batch_size); + Data::UnsignedPoints(build_batches(timestamps, values)) } ArrowDataType::Boolean => { - let mut values = array + let values = array .as_any() .downcast_ref::<BooleanArray>() .unwrap() - .extract_values(); - values.shrink_to_fit(); - - Data::BooleanPoints { timestamps, values } + .extract_batched_values(batch_size); + Data::BooleanPoints(build_batches(timestamps, values)) } _ => { return UnsupportedDataTypeSnafu { @@ -385,6 +311,15 @@ impl SeriesSet { } } +/// Zip together nested vectors of timestamps and values to create batches of points +fn build_batches<T>(timestamps: Vec<Vec<i64>>, values: Vec<Vec<T>>) -> Vec<Batch<T>> { + timestamps + .into_iter() + .zip(values.into_iter()) + .map(|(timestamps, values)| Batch { timestamps, values }) + .collect() +} + /// Represents a group of `Series` #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct Group { @@ -452,58 +387,84 @@ fn fmt_strings(f: &mut fmt::Formatter<'_>, strings: &[Arc<str>]) -> fmt::Result }) } -trait ExtractValues<T> { +trait ExtractBatchedValues<T> { /// Extracts rows as a vector, /// for all rows `i` where `valid[i]` is set - fn extract_values(&self) -> Vec<T>; + fn extract_batched_values(&self, batch_size: usize) -> Vec<Vec<T>>; } -/// Implements extract_values for a particular type of array that -macro_rules! extract_values_impl { +/// Implements extract_batched_values for Arrow arrays. +macro_rules! extract_batched_values_impl { ($DATA_TYPE:ty) => { - fn extract_values(&self) -> Vec<$DATA_TYPE> { - self.iter().flatten().collect() + extract_batched_values_impl! { $DATA_TYPE, identity } + }; + ($DATA_TYPE:ty, $ITER_ADAPTER:expr) => { + fn extract_batched_values(&self, batch_size: usize) -> Vec<Vec<$DATA_TYPE>> { + let num_batches = 1 + self.len() / batch_size; + let mut batches = Vec::with_capacity(num_batches); + + let mut v = Vec::with_capacity(batch_size); + for e in $ITER_ADAPTER(self.iter().flatten()) { + if v.len() >= batch_size { + batches.push(v); + v = Vec::with_capacity(batch_size); + } + v.push(e); + } + if !v.is_empty() { + v.shrink_to_fit(); + batches.push(v); + } + batches.shrink_to_fit(); + batches } }; } -impl ExtractValues<String> for StringArray { - fn extract_values(&self) -> Vec<String> { - self.iter().flatten().map(str::to_string).collect() - } +fn identity<T>(t: T) -> T { + t +} + +fn to_owned_string<'a, I>(i: I) -> impl Iterator<Item = String> +where + I: Iterator<Item = &'a str>, +{ + i.map(str::to_string) +} + +impl ExtractBatchedValues<String> for StringArray { + extract_batched_values_impl! { String, to_owned_string } } -impl ExtractValues<i64> for Int64Array { - extract_values_impl! {i64} +impl ExtractBatchedValues<i64> for Int64Array { + extract_batched_values_impl! {i64} } -impl ExtractValues<u64> for UInt64Array { - extract_values_impl! {u64} +impl ExtractBatchedValues<u64> for UInt64Array { + extract_batched_values_impl! {u64} } -impl ExtractValues<f64> for Float64Array { - extract_values_impl! {f64} +impl ExtractBatchedValues<f64> for Float64Array { + extract_batched_values_impl! {f64} } -impl ExtractValues<bool> for BooleanArray { - extract_values_impl! {bool} +impl ExtractBatchedValues<bool> for BooleanArray { + extract_batched_values_impl! {bool} } -impl ExtractValues<i64> for TimestampNanosecondArray { - extract_values_impl! {i64} +impl ExtractBatchedValues<i64> for TimestampNanosecondArray { + extract_batched_values_impl! {i64} } #[cfg(test)] mod tests { - use std::convert::TryInto; - use crate::exec::field::FieldIndexes; - use arrow::record_batch::RecordBatch; + use arrow::{compute::concat_batches, record_batch::RecordBatch}; use super::*; - fn series_set_to_series_strings(series_set: SeriesSet) -> Vec<String> { - let series: Vec<Series> = series_set.try_into().unwrap(); + fn series_set_to_series_strings(series_set: SeriesSet, batch_size: usize) -> Vec<String> { + let series: Vec<Series> = series_set.try_into_series(batch_size).unwrap(); let series: Vec<String> = series.into_iter().map(|s| s.to_string()).collect(); @@ -521,23 +482,23 @@ mod tests { tags: vec![(Arc::from("tag1"), Arc::from("val1"))], field_indexes: FieldIndexes::from_timestamp_and_value_indexes(5, &[0, 1, 2, 3, 4]), start_row: 1, - num_rows: 2, + num_rows: 4, batch: make_record_batch(), }; - let series_strings = series_set_to_series_strings(series_set); + let series_strings = series_set_to_series_strings(series_set, 3); let expected = vec![ "Series tags={_field=string_field, _measurement=the_table, tag1=val1}", - " StringPoints timestamps: [2000, 3000], values: [\"bar\", \"baz\"]", + " StringPoints batches: [Batch { timestamps: [2000, 3000, 4000], values: [\"bar\", \"baz\", \"bar\"] }, Batch { timestamps: [5000], values: [\"baz\"] }]", "Series tags={_field=int_field, _measurement=the_table, tag1=val1}", - " IntegerPoints timestamps: [2000, 3000], values: [2, 3]", + " IntegerPoints batches: [Batch { timestamps: [2000, 3000, 4000], values: [2, 3, 4] }, Batch { timestamps: [5000], values: [5] }]", "Series tags={_field=uint_field, _measurement=the_table, tag1=val1}", - " UnsignedPoints timestamps: [2000, 3000], values: [22, 33]", + " UnsignedPoints batches: [Batch { timestamps: [2000, 3000, 4000], values: [22, 33, 44] }, Batch { timestamps: [5000], values: [55] }]", "Series tags={_field=float_field, _measurement=the_table, tag1=val1}", - " FloatPoints timestamps: [2000, 3000], values: [20.1, 30.1]", + " FloatPoints batches: [Batch { timestamps: [2000, 3000, 4000], values: [20.1, 30.1, 40.1] }, Batch { timestamps: [5000], values: [50.1] }]", "Series tags={_field=boolean_field, _measurement=the_table, tag1=val1}", - " BooleanPoints timestamps: [2000, 3000], values: [false, true]", + " BooleanPoints batches: [Batch { timestamps: [2000, 3000, 4000], values: [false, true, false] }, Batch { timestamps: [5000], values: [true] }]", ]; assert_eq!( @@ -570,13 +531,13 @@ mod tests { batch, }; - let series_strings = series_set_to_series_strings(series_set); + let series_strings = series_set_to_series_strings(series_set, 100); // expect CAPITAL_TAG is before `_field` and `_measurement` tags // (as that is the correct lexicographical ordering) let expected = vec![ "Series tags={CAPITAL_TAG=the_value, _field=string_field1, _measurement=the_table, tag1=val1}", - " StringPoints timestamps: [2, 3], values: [\"bar\", \"baz\"]", + " StringPoints batches: [Batch { timestamps: [2, 3], values: [\"bar\", \"baz\"] }]", ]; assert_eq!( @@ -610,13 +571,13 @@ mod tests { batch, }; - let series_strings = series_set_to_series_strings(series_set); + let series_strings = series_set_to_series_strings(series_set, 100); let expected = vec![ "Series tags={_field=string_field2, _measurement=the_table, tag1=val1}", - " StringPoints timestamps: [4, 5], values: [\"far\", \"faz\"]", + " StringPoints batches: [Batch { timestamps: [4, 5], values: [\"far\", \"faz\"] }]", "Series tags={_field=string_field1, _measurement=the_table, tag1=val1}", - " StringPoints timestamps: [2, 3], values: [\"bar\", \"baz\"]", + " StringPoints batches: [Batch { timestamps: [2, 3], values: [\"bar\", \"baz\"] }]", ]; assert_eq!( @@ -654,16 +615,39 @@ mod tests { field_indexes: FieldIndexes::from_timestamp_and_value_indexes(3, &[1, 2]), start_row: 0, num_rows: batch.num_rows(), - batch, + batch: batch.clone(), }; // Expect only a single series (for the data in float_field, int_field is all // nulls) - let series_strings = series_set_to_series_strings(series_set); + let series_strings = series_set_to_series_strings(series_set, 100); let expected = vec![ "Series tags={_field=float_field, _measurement=the_table, state=MA}", - " FloatPoints timestamps: [1000, 2000, 4000], values: [10.1, 20.1, 40.1]", + " FloatPoints batches: [Batch { timestamps: [1000, 2000, 4000], values: [10.1, 20.1, 40.1] }]", + ]; + + assert_eq!( + series_strings, expected, + "Expected:\n{expected:#?}\nActual:\n{series_strings:#?}" + ); + + // Multi-batch case + // We can just append record batches here because the tag field does not change + let batch = repeat_batch(3, &batch); + let series_set = SeriesSet { + table_name: Arc::from("the_table"), + tags: vec![(Arc::from("state"), Arc::from("MA"))], + field_indexes: FieldIndexes::from_timestamp_and_value_indexes(3, &[1, 2]), + start_row: 0, + num_rows: batch.num_rows(), + batch, + }; + + let series_strings = series_set_to_series_strings(series_set, 4); + let expected = vec![ + "Series tags={_field=float_field, _measurement=the_table, state=MA}", + " FloatPoints batches: [Batch { timestamps: [1000, 2000, 4000, 1000], values: [10.1, 20.1, 40.1, 10.1] }, Batch { timestamps: [2000, 4000, 1000, 2000], values: [20.1, 40.1, 10.1, 20.1] }, Batch { timestamps: [4000], values: [40.1] }]", ]; assert_eq!( @@ -701,24 +685,58 @@ mod tests { field_indexes: FieldIndexes::from_timestamp_and_value_indexes(6, &[1, 2, 3, 4, 5]), start_row: 0, num_rows: batch.num_rows(), - batch, + batch: batch.clone(), }; // Expect only a single series (for the data in float_field, int_field is all // nulls) - let series_strings = series_set_to_series_strings(series_set); + let series_strings = series_set_to_series_strings(series_set, 100); + + let expected = vec![ + "Series tags={_field=string_field, _measurement=the_table, state=MA}", + " StringPoints batches: [Batch { timestamps: [2000], values: [\"foo\"] }]", + "Series tags={_field=float_field, _measurement=the_table, state=MA}", + " FloatPoints batches: [Batch { timestamps: [2000], values: [1.0] }]", + "Series tags={_field=int_field, _measurement=the_table, state=MA}", + " IntegerPoints batches: [Batch { timestamps: [2000], values: [-10] }]", + "Series tags={_field=uint_field, _measurement=the_table, state=MA}", + " UnsignedPoints batches: [Batch { timestamps: [2000], values: [100] }]", + "Series tags={_field=bool_field, _measurement=the_table, state=MA}", + " BooleanPoints batches: [Batch { timestamps: [2000], values: [true] }]", + ]; + + assert_eq!( + series_strings, expected, + "Expected:\n{expected:#?}\nActual:\n{series_strings:#?}" + ); + + // multi-batch case + + // the tag columns have just a single value so we can just repeat the original batch to + // generate more rows + let batch = repeat_batch(4, &batch); + let series_set = SeriesSet { + table_name: Arc::from("the_table"), + tags: vec![(Arc::from("state"), Arc::from("MA"))], + field_indexes: FieldIndexes::from_timestamp_and_value_indexes(6, &[1, 2, 3, 4, 5]), + start_row: 0, + num_rows: batch.num_rows(), + batch, + }; + + let series_strings = series_set_to_series_strings(series_set, 3); let expected = vec![ "Series tags={_field=string_field, _measurement=the_table, state=MA}", - " StringPoints timestamps: [2000], values: [\"foo\"]", + " StringPoints batches: [Batch { timestamps: [2000, 2000, 2000], values: [\"foo\", \"foo\", \"foo\"] }, Batch { timestamps: [2000], values: [\"foo\"] }]", "Series tags={_field=float_field, _measurement=the_table, state=MA}", - " FloatPoints timestamps: [2000], values: [1.0]", + " FloatPoints batches: [Batch { timestamps: [2000, 2000, 2000], values: [1.0, 1.0, 1.0] }, Batch { timestamps: [2000], values: [1.0] }]", "Series tags={_field=int_field, _measurement=the_table, state=MA}", - " IntegerPoints timestamps: [2000], values: [-10]", + " IntegerPoints batches: [Batch { timestamps: [2000, 2000, 2000], values: [-10, -10, -10] }, Batch { timestamps: [2000], values: [-10] }]", "Series tags={_field=uint_field, _measurement=the_table, state=MA}", - " UnsignedPoints timestamps: [2000], values: [100]", + " UnsignedPoints batches: [Batch { timestamps: [2000, 2000, 2000], values: [100, 100, 100] }, Batch { timestamps: [2000], values: [100] }]", "Series tags={_field=bool_field, _measurement=the_table, state=MA}", - " BooleanPoints timestamps: [2000], values: [true]", + " BooleanPoints batches: [Batch { timestamps: [2000, 2000, 2000], values: [true, true, true] }, Batch { timestamps: [2000], values: [true] }]", ]; assert_eq!( @@ -728,14 +746,20 @@ mod tests { } fn make_record_batch() -> RecordBatch { - let string_array: ArrayRef = Arc::new(StringArray::from(vec!["foo", "bar", "baz", "foo"])); - let int_array: ArrayRef = Arc::new(Int64Array::from(vec![1, 2, 3, 4])); - let uint_array: ArrayRef = Arc::new(UInt64Array::from(vec![11, 22, 33, 44])); - let float_array: ArrayRef = Arc::new(Float64Array::from(vec![10.1, 20.1, 30.1, 40.1])); - let bool_array: ArrayRef = Arc::new(BooleanArray::from(vec![true, false, true, false])); + let string_array: ArrayRef = Arc::new(StringArray::from(vec![ + "foo", "bar", "baz", "bar", "baz", "foo", + ])); + let int_array: ArrayRef = Arc::new(Int64Array::from(vec![1, 2, 3, 4, 5, 6])); + let uint_array: ArrayRef = Arc::new(UInt64Array::from(vec![11, 22, 33, 44, 55, 66])); + let float_array: ArrayRef = + Arc::new(Float64Array::from(vec![10.1, 20.1, 30.1, 40.1, 50.1, 60.1])); + let bool_array: ArrayRef = Arc::new(BooleanArray::from(vec![ + true, false, true, false, true, false, + ])); - let timestamp_array: ArrayRef = - Arc::new(TimestampNanosecondArray::from(vec![1000, 2000, 3000, 4000])); + let timestamp_array: ArrayRef = Arc::new(TimestampNanosecondArray::from(vec![ + 1000, 2000, 3000, 4000, 5000, 6000, + ])); RecordBatch::try_from_iter_with_nullable(vec![ ("string_field", string_array, true), @@ -747,4 +771,8 @@ mod tests { ]) .expect("created new record batch") } + + fn repeat_batch(count: usize, rb: &RecordBatch) -> RecordBatch { + concat_batches(&rb.schema(), std::iter::repeat(rb).take(count)).unwrap() + } } diff --git a/service_grpc_influxrpc/src/data.rs b/service_grpc_influxrpc/src/data.rs index 71748823bf..da9c8fcb15 100644 --- a/service_grpc_influxrpc/src/data.rs +++ b/service_grpc_influxrpc/src/data.rs @@ -8,7 +8,7 @@ use arrow::datatypes::DataType as ArrowDataType; use futures::{stream::BoxStream, Stream, StreamExt}; use iox_query::exec::{ fieldlist::FieldList, - seriesset::series::{self, Either}, + seriesset::series::{self, Batch, Either}, }; use predicate::rpc_predicate::{FIELD_COLUMN_NAME, MEASUREMENT_COLUMN_NAME}; @@ -101,42 +101,72 @@ fn series_to_frames( ) -> impl Stream<Item = Frame> { let series::Series { tags, data } = series; - let (data_type, data_frame) = match data { - series::Data::FloatPoints { timestamps, values } => ( + let (data_type, data_frames): (DataType, Vec<Frame>) = match data { + series::Data::FloatPoints(batches) => ( DataType::Float, - Data::FloatPoints(FloatPointsFrame { timestamps, values }), + batches + .into_iter() + .map(|Batch { timestamps, values }| Frame { + data: Some(Data::FloatPoints(FloatPointsFrame { timestamps, values })), + }) + .collect(), ), - series::Data::IntegerPoints { timestamps, values } => ( + series::Data::IntegerPoints(batches) => ( DataType::Integer, - Data::IntegerPoints(IntegerPointsFrame { timestamps, values }), + batches + .into_iter() + .map(|Batch { timestamps, values }| Frame { + data: Some(Data::IntegerPoints(IntegerPointsFrame { + timestamps, + values, + })), + }) + .collect(), ), - series::Data::UnsignedPoints { timestamps, values } => ( + series::Data::UnsignedPoints(batches) => ( DataType::Unsigned, - Data::UnsignedPoints(UnsignedPointsFrame { timestamps, values }), + batches + .into_iter() + .map(|Batch { timestamps, values }| Frame { + data: Some(Data::UnsignedPoints(UnsignedPointsFrame { + timestamps, + values, + })), + }) + .collect(), ), - series::Data::BooleanPoints { timestamps, values } => ( + series::Data::BooleanPoints(batches) => ( DataType::Boolean, - Data::BooleanPoints(BooleanPointsFrame { timestamps, values }), + batches + .into_iter() + .map(|Batch { timestamps, values }| Frame { + data: Some(Data::BooleanPoints(BooleanPointsFrame { + timestamps, + values, + })), + }) + .collect(), ), - series::Data::StringPoints { timestamps, values } => ( + series::Data::StringPoints(batches) => ( DataType::String, - Data::StringPoints(StringPointsFrame { timestamps, values }), + batches + .into_iter() + .map(|Batch { timestamps, values }| Frame { + data: Some(Data::StringPoints(StringPointsFrame { timestamps, values })), + }) + .collect(), ), }; - let series_frame = Data::Series(SeriesFrame { - tags: convert_tags(tags, tag_key_binary_format), - data_type: data_type.into(), - }); - - futures::stream::iter([ - Frame { - data: Some(series_frame), - }, - Frame { - data: Some(data_frame), - }, - ]) + futures::stream::iter( + std::iter::once(Frame { + data: Some(Data::Series(SeriesFrame { + tags: convert_tags(tags, tag_key_binary_format), + data_type: data_type.into(), + })), + }) + .chain(data_frames.into_iter()), + ) } /// Converts a [`series::Group`] into a storage gRPC `GroupFrame` @@ -220,7 +250,7 @@ fn datatype_to_measurement_field_enum(data_type: &ArrowDataType) -> Result<Field #[cfg(test)] mod tests { - use std::{convert::TryInto, fmt}; + use std::fmt; use arrow::{ array::{ @@ -276,12 +306,12 @@ mod tests { tags: vec![(Arc::from("tag1"), Arc::from("val1"))], field_indexes: FieldIndexes::from_timestamp_and_value_indexes(5, &[0, 1, 2, 3, 4]), start_row: 1, - num_rows: 2, + num_rows: 4, batch: make_record_batch(), }; let series: Vec<Series> = series_set - .try_into() + .try_into_series(3) .expect("Correctly converted series set"); let series: Vec<Either> = series.into_iter().map(|s| s.into()).collect(); @@ -295,15 +325,20 @@ mod tests { let dumped_frames = dump_frames(&frames); let expected_frames = vec![ "SeriesFrame, tags: _field=string_field,_measurement=the_table,tag1=val1, type: 4", - "StringPointsFrame, timestamps: [2000, 3000], values: bar,baz", + "StringPointsFrame, timestamps: [2000, 3000, 4000], values: bar,baz,bar", + "StringPointsFrame, timestamps: [5000], values: baz", "SeriesFrame, tags: _field=int_field,_measurement=the_table,tag1=val1, type: 1", - "IntegerPointsFrame, timestamps: [2000, 3000], values: \"2,3\"", + "IntegerPointsFrame, timestamps: [2000, 3000, 4000], values: \"2,2,3\"", + "IntegerPointsFrame, timestamps: [5000], values: \"3\"", "SeriesFrame, tags: _field=uint_field,_measurement=the_table,tag1=val1, type: 2", - "UnsignedPointsFrame, timestamps: [2000, 3000], values: \"22,33\"", + "UnsignedPointsFrame, timestamps: [2000, 3000, 4000], values: \"22,22,33\"", + "UnsignedPointsFrame, timestamps: [5000], values: \"33\"", "SeriesFrame, tags: _field=float_field,_measurement=the_table,tag1=val1, type: 0", - "FloatPointsFrame, timestamps: [2000, 3000], values: \"20.1,30.1\"", + "FloatPointsFrame, timestamps: [2000, 3000, 4000], values: \"20.1,21.1,30.1\"", + "FloatPointsFrame, timestamps: [5000], values: \"31.1\"", "SeriesFrame, tags: _field=boolean_field,_measurement=the_table,tag1=val1, type: 3", - "BooleanPointsFrame, timestamps: [2000, 3000], values: false,true", + "BooleanPointsFrame, timestamps: [2000, 3000, 4000], values: false,false,true", + "BooleanPointsFrame, timestamps: [5000], values: true", ]; assert_eq!( @@ -322,16 +357,21 @@ mod tests { .unwrap(); let dumped_frames = dump_frames(&frames); let expected_frames = vec![ - "SeriesFrame, tags: \x00=the_table,tag1=val1,�=string_field, type: 4", - "StringPointsFrame, timestamps: [2000, 3000], values: bar,baz", - "SeriesFrame, tags: \x00=the_table,tag1=val1,�=int_field, type: 1", - "IntegerPointsFrame, timestamps: [2000, 3000], values: \"2,3\"", - "SeriesFrame, tags: \x00=the_table,tag1=val1,�=uint_field, type: 2", - "UnsignedPointsFrame, timestamps: [2000, 3000], values: \"22,33\"", - "SeriesFrame, tags: \x00=the_table,tag1=val1,�=float_field, type: 0", - "FloatPointsFrame, timestamps: [2000, 3000], values: \"20.1,30.1\"", - "SeriesFrame, tags: \x00=the_table,tag1=val1,�=boolean_field, type: 3", - "BooleanPointsFrame, timestamps: [2000, 3000], values: false,true", + "SeriesFrame, tags: \0=the_table,tag1=val1,�=string_field, type: 4", + "StringPointsFrame, timestamps: [2000, 3000, 4000], values: bar,baz,bar", + "StringPointsFrame, timestamps: [5000], values: baz", + "SeriesFrame, tags: \0=the_table,tag1=val1,�=int_field, type: 1", + "IntegerPointsFrame, timestamps: [2000, 3000, 4000], values: \"2,2,3\"", + "IntegerPointsFrame, timestamps: [5000], values: \"3\"", + "SeriesFrame, tags: \0=the_table,tag1=val1,�=uint_field, type: 2", + "UnsignedPointsFrame, timestamps: [2000, 3000, 4000], values: \"22,22,33\"", + "UnsignedPointsFrame, timestamps: [5000], values: \"33\"", + "SeriesFrame, tags: \0=the_table,tag1=val1,�=float_field, type: 0", + "FloatPointsFrame, timestamps: [2000, 3000, 4000], values: \"20.1,21.1,30.1\"", + "FloatPointsFrame, timestamps: [5000], values: \"31.1\"", + "SeriesFrame, tags: \0=the_table,tag1=val1,�=boolean_field, type: 3", + "BooleanPointsFrame, timestamps: [2000, 3000, 4000], values: false,false,true", + "BooleanPointsFrame, timestamps: [5000], values: true", ]; assert_eq!( @@ -465,14 +505,20 @@ mod tests { } fn make_record_batch() -> RecordBatch { - let string_array: ArrayRef = Arc::new(StringArray::from(vec!["foo", "bar", "baz", "foo"])); - let int_array: ArrayRef = Arc::new(Int64Array::from(vec![1, 2, 3, 4])); - let uint_array: ArrayRef = Arc::new(UInt64Array::from(vec![11, 22, 33, 44])); - let float_array: ArrayRef = Arc::new(Float64Array::from(vec![10.1, 20.1, 30.1, 40.1])); - let bool_array: ArrayRef = Arc::new(BooleanArray::from(vec![true, false, true, false])); - - let timestamp_array: ArrayRef = - Arc::new(TimestampNanosecondArray::from(vec![1000, 2000, 3000, 4000])); + let string_array: ArrayRef = Arc::new(StringArray::from(vec![ + "foo", "bar", "baz", "bar", "baz", "foo", + ])); + let int_array: ArrayRef = Arc::new(Int64Array::from(vec![1, 2, 2, 3, 3, 4])); + let uint_array: ArrayRef = Arc::new(UInt64Array::from(vec![11, 22, 22, 33, 33, 44])); + let float_array: ArrayRef = + Arc::new(Float64Array::from(vec![10.1, 20.1, 21.1, 30.1, 31.1, 40.1])); + let bool_array: ArrayRef = Arc::new(BooleanArray::from(vec![ + true, false, false, true, true, false, + ])); + + let timestamp_array: ArrayRef = Arc::new(TimestampNanosecondArray::from(vec![ + 1000, 2000, 3000, 4000, 5000, 6000, + ])); RecordBatch::try_from_iter_with_nullable(vec![ ("string_field", string_array, true), diff --git a/service_grpc_influxrpc/src/service.rs b/service_grpc_influxrpc/src/service.rs index 4293cb5d69..5af263e5dd 100644 --- a/service_grpc_influxrpc/src/service.rs +++ b/service_grpc_influxrpc/src/service.rs @@ -59,6 +59,10 @@ use tracker::InstrumentedAsyncOwnedSemaphorePermit; /// this size (there's a bit of additional encoding overhead on top of that, but that should be OK). const MAX_READ_RESPONSE_SIZE: usize = 4194304 - 100_000; // 4MB - <wiggle room> +/// The max number of points allowed in each output data frame. This is the same value TSM uses, +/// and is used to avoid overlarge individual gRPC messages. +const MAX_POINTS_PER_FRAME: usize = 1000; + #[derive(Debug, Snafu)] pub enum Error { #[snafu(display("Namespace not found: {}", db_name))] @@ -1370,6 +1374,7 @@ where .to_series_and_groups( series_plan, Arc::clone(&ctx.inner().runtime_env().memory_pool), + MAX_POINTS_PER_FRAME, ) .await .context(FilteringSeriesSnafu { @@ -1438,6 +1443,7 @@ where .to_series_and_groups( grouped_series_set_plan, Arc::clone(&ctx.inner().runtime_env().memory_pool), + MAX_POINTS_PER_FRAME, ) .await .context(GroupingSeriesSnafu {
6cdc95e49d9831fbd972931e67e67ca8c369db2f
Fraser Savage
2023-05-04 12:36:23
Use a separate DecodeError type for WriteOpEntryDecoder
Having a ginormous error enum returned for this method means that the catch-all behaviour gets leaked into the error naming and semantics of callers. The decoder is a new type and could benefit from not adding to the existing error enum.
null
refactor(wal): Use a separate DecodeError type for WriteOpEntryDecoder Having a ginormous error enum returned for this method means that the catch-all behaviour gets leaked into the error naming and semantics of callers. The decoder is a new type and could benefit from not adding to the existing error enum.
diff --git a/wal/src/lib.rs b/wal/src/lib.rs index b60b85a475..54058d5e2e 100644 --- a/wal/src/lib.rs +++ b/wal/src/lib.rs @@ -134,10 +134,19 @@ pub enum Error { UnableToCreateSegmentFile { source: blocking::WriterError, }, +} - UnableToDecodeRecordBatch { +/// Errors that occur when decoding internal types from a WAL file. +#[derive(Debug, Snafu)] +#[snafu(visibility(pub(crate)))] +pub enum DecodeError { + UnableToCreateMutableBatch { source: mutable_batch_pb::decode::Error, }, + + FailedToReadWal { + source: Error, + }, } /// A specialized `Result` for WAL-related errors @@ -588,22 +597,22 @@ impl WriteOpEntryDecoder { /// more entries to be decoded from the underlying segment. A zero-length vector /// may be returned if there are no writes in a WAL entry batch, but does not /// indicate the decoder is consumed. - pub fn next_write_op_entry_batch(&mut self) -> Result<Option<Vec<WriteOpEntry>>> { - match self.reader.next_batch()? { + pub fn next_write_op_entry_batch(&mut self) -> Result<Option<Vec<WriteOpEntry>>, DecodeError> { + match self.reader.next_batch().context(FailedToReadWalSnafu)? { Some(batch) => Ok(batch .into_iter() .filter_map(|sequenced_op| match sequenced_op.op { WalOp::Write(w) => Some(w), _ => None, }) - .map(|w| -> Result<WriteOpEntry> { + .map(|w| -> Result<WriteOpEntry, DecodeError> { Ok(WriteOpEntry { namespace: NamespaceId::new(w.database_id), table_batches: decode_database_batch(&w) - .context(UnableToDecodeRecordBatchSnafu)?, + .context(UnableToCreateMutableBatchSnafu)?, }) }) - .collect::<Result<Vec<WriteOpEntry>>>()? + .collect::<Result<Vec<WriteOpEntry>, DecodeError>>()? .into()), None => Ok(None), } diff --git a/wal_inspect/src/lib.rs b/wal_inspect/src/lib.rs index 0b37839d80..c0a5e12e08 100644 --- a/wal_inspect/src/lib.rs +++ b/wal_inspect/src/lib.rs @@ -127,7 +127,7 @@ mod tests { iox::wal::v1::sequenced_wal_op::Op, pbdata::v1::DatabaseBatch, }; use mutable_batch_lp::lines_to_batches; - use wal::{Error as WalError, SequencedWalOp, WriteOpEntryDecoder}; + use wal::{DecodeError, SequencedWalOp, WriteOpEntryDecoder}; use super::*; @@ -289,7 +289,7 @@ mod tests { } }), Err(e) => { - assert_matches!(e, WalError::UnableToReadNextOps { .. }); + assert_matches!(e, DecodeError::FailedToReadWal { .. }); break; } };
d6c4b51ba84a3567affc8337ab74d22eedda6e49
Dom Dwyer
2023-06-21 14:55:02
introduce catalog query indirection
Add indirection between the CatalogPartitionFilesSource (within the retry-loop) and the underlying catalog.
null
refactor: introduce catalog query indirection Add indirection between the CatalogPartitionFilesSource (within the retry-loop) and the underlying catalog.
diff --git a/compactor/src/components/partition_files_source/catalog.rs b/compactor/src/components/partition_files_source/catalog.rs index 0fdd1865c7..3f48302b60 100644 --- a/compactor/src/components/partition_files_source/catalog.rs +++ b/compactor/src/components/partition_files_source/catalog.rs @@ -1,20 +1,45 @@ -use std::{fmt::Display, sync::Arc}; +use std::{ + fmt::{Debug, Display}, + sync::Arc, +}; use async_trait::async_trait; use backoff::{Backoff, BackoffConfig}; use data_types::{ParquetFile, PartitionId}; use iox_catalog::interface::Catalog; -use super::PartitionFilesSource; +use super::{rate_limit::QueryRateLimit, PartitionFilesSource}; + +#[async_trait] +pub(crate) trait CatalogQuerier: Send + Sync + Debug { + async fn get_partitions( + &self, + partition_id: PartitionId, + ) -> Result<Vec<ParquetFile>, iox_catalog::interface::Error>; +} + +#[async_trait] +impl CatalogQuerier for Arc<dyn Catalog> { + async fn get_partitions( + &self, + partition_id: PartitionId, + ) -> Result<Vec<ParquetFile>, iox_catalog::interface::Error> { + self.repositories() + .await + .parquet_files() + .list_by_partition_not_to_delete(partition_id) + .await + } +} #[derive(Debug)] -pub struct CatalogPartitionFilesSource { +pub struct CatalogPartitionFilesSource<T = QueryRateLimit<Arc<dyn Catalog>>> { backoff_config: BackoffConfig, - catalog: Arc<dyn Catalog>, + catalog: T, } -impl CatalogPartitionFilesSource { - pub fn new(backoff_config: BackoffConfig, catalog: Arc<dyn Catalog>) -> Self { +impl<T> CatalogPartitionFilesSource<T> { + pub fn new(backoff_config: BackoffConfig, catalog: T) -> Self { Self { backoff_config, catalog, @@ -22,23 +47,21 @@ impl CatalogPartitionFilesSource { } } -impl Display for CatalogPartitionFilesSource { +impl<T> Display for CatalogPartitionFilesSource<T> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "catalog") } } #[async_trait] -impl PartitionFilesSource for CatalogPartitionFilesSource { - async fn fetch(&self, partition: PartitionId) -> Vec<ParquetFile> { +impl<T> PartitionFilesSource for CatalogPartitionFilesSource<T> +where + T: CatalogQuerier, +{ + async fn fetch(&self, partition_id: PartitionId) -> Vec<ParquetFile> { Backoff::new(&self.backoff_config) .retry_all_errors("parquet_files_of_given_partition", || async { - self.catalog - .repositories() - .await - .parquet_files() - .list_by_partition_not_to_delete(partition) - .await + self.catalog.get_partitions(partition_id).await }) .await .expect("retry forever")
54d209d0bff44f3ebdc8f3f5bfdd9873268cbb93
Michael Gattozzi
2024-09-18 11:44:04
Add u32 ID for Databases (#25302)
* feat: Remove lock for FileId tests Since we now are using cargo-nextest in CI we can remove the locks used in the FileId tests to make sure that we have no race conditions * feat: Add u32 ID for Databases This commit adds a new DbId for databases. It also updates paths to use that id as part of the name. When starting up the WriteBuffer we apply the DbId from the persisted snapshot much like we do for ParquetFileId's This introduces the influxdb3_id crate to avoid circular deps with ids. The ParquetFileId should also be moved into this crate, but it's outside the scope of this change. Closes #25301
null
feat: Add u32 ID for Databases (#25302) * feat: Remove lock for FileId tests Since we now are using cargo-nextest in CI we can remove the locks used in the FileId tests to make sure that we have no race conditions * feat: Add u32 ID for Databases This commit adds a new DbId for databases. It also updates paths to use that id as part of the name. When starting up the WriteBuffer we apply the DbId from the persisted snapshot much like we do for ParquetFileId's This introduces the influxdb3_id crate to avoid circular deps with ids. The ParquetFileId should also be moved into this crate, but it's outside the scope of this change. Closes #25301
diff --git a/Cargo.lock b/Cargo.lock index 0083d626d9..de074f0ed0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,19 +4,13 @@ version = 3 [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ "gimli", ] -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "adler2" version = "2.0.0" @@ -133,15 +127,15 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" @@ -636,17 +630,17 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", - "miniz_oxide 0.7.4", + "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -783,9 +777,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" [[package]] name = "bzip2" @@ -830,9 +824,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.16" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9d013ecb737093c0e86b151a7b837993cf9ec6c502946cfb44bedc392421e0b" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "jobserver", "libc", @@ -1945,7 +1939,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", - "miniz_oxide 0.8.0", + "miniz_oxide", ] [[package]] @@ -2138,9 +2132,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "glob" @@ -2433,7 +2427,7 @@ dependencies = [ "http 1.1.0", "hyper 1.4.1", "hyper-util", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", @@ -2468,9 +2462,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-channel", @@ -2488,9 +2482,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2626,6 +2620,7 @@ version = "0.1.0" dependencies = [ "arrow", "influxdb-line-protocol", + "influxdb3_id", "influxdb3_wal", "insta", "observability_deps", @@ -2655,6 +2650,13 @@ dependencies = [ "url", ] +[[package]] +name = "influxdb3_id" +version = "0.1.0" +dependencies = [ + "serde", +] + [[package]] name = "influxdb3_load_generator" version = "0.1.0" @@ -2721,6 +2723,7 @@ dependencies = [ "hyper 0.14.30", "influxdb-line-protocol", "influxdb3_catalog", + "influxdb3_id", "influxdb3_process", "influxdb3_wal", "influxdb3_write", @@ -2777,6 +2780,7 @@ dependencies = [ "futures-util", "hashbrown 0.14.5", "influxdb-line-protocol", + "influxdb3_id", "iox_time", "object_store", "observability_deps", @@ -2809,6 +2813,7 @@ dependencies = [ "indexmap 2.5.0", "influxdb-line-protocol", "influxdb3_catalog", + "influxdb3_id", "influxdb3_wal", "insta", "iox_catalog", @@ -3074,9 +3079,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" [[package]] name = "is_terminal_polyfill" @@ -3398,15 +3403,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.8.0" @@ -4119,9 +4115,9 @@ dependencies = [ [[package]] name = "pretty_assertions" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" dependencies = [ "diff", "yansi", @@ -4342,7 +4338,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.23.12", + "rustls 0.23.13", "socket2", "thiserror", "tokio", @@ -4359,7 +4355,7 @@ dependencies = [ "rand", "ring", "rustc-hash", - "rustls 0.23.12", + "rustls 0.23.13", "slab", "thiserror", "tinyvec", @@ -4449,9 +4445,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" dependencies = [ "bitflags 2.6.0", ] @@ -4571,7 +4567,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-native-certs 0.7.3", "rustls-pemfile 2.1.3", "rustls-pki-types", @@ -4649,9 +4645,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.36" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f55e80d50763938498dd5ebb18647174e0c76dc38c5505294bb224624f30f36" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", @@ -4681,22 +4677,22 @@ dependencies = [ "log", "ring", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] @@ -4776,9 +4772,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.7" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring", "rustls-pki-types", @@ -4820,11 +4816,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4902,18 +4898,18 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", @@ -5847,7 +5843,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", "tokio", ] @@ -6275,15 +6271,15 @@ checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] @@ -6296,9 +6292,9 @@ checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" @@ -6883,7 +6879,7 @@ dependencies = [ "reqwest 0.12.7", "ring", "rustls 0.21.12", - "rustls 0.23.12", + "rustls 0.23.13", "serde", "serde_json", "sha2", @@ -6939,9 +6935,9 @@ dependencies = [ [[package]] name = "yansi" -version = "0.5.1" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "zerocopy" diff --git a/Cargo.toml b/Cargo.toml index e16b13105d..8c9c0f8e79 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ members = [ "influxdb3", "influxdb3_catalog", "influxdb3_client", + "influxdb3_id", "influxdb3_load_generator", "influxdb3_process", "influxdb3_server", diff --git a/influxdb3_catalog/Cargo.toml b/influxdb3_catalog/Cargo.toml index 35704fea88..080561f816 100644 --- a/influxdb3_catalog/Cargo.toml +++ b/influxdb3_catalog/Cargo.toml @@ -12,6 +12,7 @@ observability_deps.workspace = true schema.workspace = true # Local deps +influxdb3_id = { path = "../influxdb3_id" } influxdb3_wal = { path = "../influxdb3_wal" } # crates.io dependencies diff --git a/influxdb3_catalog/src/catalog.rs b/influxdb3_catalog/src/catalog.rs index 55f5d810b2..b4e8eb484a 100644 --- a/influxdb3_catalog/src/catalog.rs +++ b/influxdb3_catalog/src/catalog.rs @@ -1,6 +1,7 @@ //! Implementation of the Catalog that sits entirely in memory. use crate::catalog::Error::TableNotFound; +use influxdb3_id::DbId; use influxdb3_wal::{ CatalogBatch, CatalogOp, FieldAdditions, LastCacheDefinition, LastCacheDelete, }; @@ -149,7 +150,7 @@ impl Catalog { } info!("return new db {}", db_name); - let db = Arc::new(DatabaseSchema::new(db_name.into())); + let db = Arc::new(DatabaseSchema::new(DbId::new(), db_name.into())); inner .databases .insert(Arc::clone(&db.name), Arc::clone(&db)); @@ -333,6 +334,7 @@ impl InnerCatalog { #[serde_with::serde_as] #[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)] pub struct DatabaseSchema { + pub id: DbId, pub name: Arc<str>, /// The database is a map of tables #[serde_as(as = "serde_with::MapPreventDuplicates<_, _>")] @@ -340,8 +342,9 @@ pub struct DatabaseSchema { } impl DatabaseSchema { - pub fn new(name: Arc<str>) -> Self { + pub fn new(id: DbId, name: Arc<str>) -> Self { Self { + id, name, tables: BTreeMap::new(), } @@ -440,6 +443,7 @@ impl DatabaseSchema { } Ok(Some(Self { + id: self.id, name: Arc::clone(&self.name), tables: updated_or_new_tables, })) @@ -447,7 +451,10 @@ impl DatabaseSchema { } pub fn new_from_batch(catalog_batch: &CatalogBatch) -> Result<Self> { - let db_schema = Self::new(Arc::clone(&catalog_batch.database_name)); + let db_schema = Self::new( + catalog_batch.database_id, + Arc::clone(&catalog_batch.database_name), + ); let new_db = db_schema .new_if_updated_from_batch(catalog_batch)? .expect("database must be new"); @@ -745,6 +752,7 @@ mod tests { let cloned_instance_id = Arc::clone(&instance_id); let catalog = Catalog::new(host_id, cloned_instance_id); let mut database = DatabaseSchema { + id: DbId::from(0), name: "test_db".into(), tables: BTreeMap::new(), }; @@ -813,10 +821,12 @@ mod tests { let json = r#"{ "databases": { "db1": { + "id": 0, "name": "db1", "tables": {} }, "db1": { + "id": 0, "name": "db1", "tables": {} } @@ -881,6 +891,7 @@ mod tests { #[test] fn add_columns_updates_schema() { let mut database = DatabaseSchema { + id: DbId::from(0), name: "test".into(), tables: BTreeMap::new(), }; @@ -915,6 +926,7 @@ mod tests { let instance_id = Arc::from("instance-id"); let catalog = Catalog::new(host_id, instance_id); let mut database = DatabaseSchema { + id: DbId::from(0), name: "test_db".into(), tables: BTreeMap::new(), }; @@ -959,6 +971,7 @@ mod tests { let instance_id = Arc::from("instance-id"); let catalog = Catalog::new(host_id, instance_id); let mut database = DatabaseSchema { + id: DbId::from(0), name: "test_db".into(), tables: BTreeMap::new(), }; diff --git a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__catalog_serialization.snap b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__catalog_serialization.snap index 44d81f7750..11ed2b7f24 100644 --- a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__catalog_serialization.snap +++ b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__catalog_serialization.snap @@ -5,6 +5,7 @@ expression: catalog { "databases": { "test_db": { + "id": 0, "name": "test_db", "tables": { "test_table_1": { diff --git a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_last_cache.snap b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_last_cache.snap index a33a79a85e..7c79043108 100644 --- a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_last_cache.snap +++ b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_last_cache.snap @@ -5,6 +5,7 @@ expression: catalog { "databases": { "test_db": { + "id": 0, "name": "test_db", "tables": { "test_table_1": { diff --git a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_series_keys.snap b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_series_keys.snap index ff8c57f7b7..2d8489dfd4 100644 --- a/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_series_keys.snap +++ b/influxdb3_catalog/src/snapshots/influxdb3_catalog__catalog__tests__serialize_series_keys.snap @@ -5,6 +5,7 @@ expression: catalog { "databases": { "test_db": { + "id": 0, "name": "test_db", "tables": { "test_table_1": { diff --git a/influxdb3_id/Cargo.toml b/influxdb3_id/Cargo.toml new file mode 100644 index 0000000000..0d408e5579 --- /dev/null +++ b/influxdb3_id/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "influxdb3_id" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +serde.workspace = true + +[lints] +workspace = true diff --git a/influxdb3_id/src/lib.rs b/influxdb3_id/src/lib.rs new file mode 100644 index 0000000000..548a8472b4 --- /dev/null +++ b/influxdb3_id/src/lib.rs @@ -0,0 +1,39 @@ +use serde::Deserialize; +use serde::Serialize; +use std::sync::atomic::AtomicU32; +use std::sync::atomic::Ordering; + +#[derive(Debug, Copy, Clone, Eq, PartialOrd, Ord, PartialEq, Serialize, Deserialize)] +pub struct DbId(u32); + +static NEXT_DB_ID: AtomicU32 = AtomicU32::new(0); + +impl DbId { + pub fn new() -> Self { + Self(NEXT_DB_ID.fetch_add(1, Ordering::SeqCst)) + } + + pub fn next_id() -> DbId { + Self(NEXT_DB_ID.load(Ordering::SeqCst)) + } + + pub fn set_next_id(&self) { + NEXT_DB_ID.store(self.0, Ordering::SeqCst) + } + + pub fn as_u32(&self) -> u32 { + self.0 + } +} + +impl Default for DbId { + fn default() -> Self { + Self::new() + } +} + +impl From<u32> for DbId { + fn from(value: u32) -> Self { + Self(value) + } +} diff --git a/influxdb3_server/Cargo.toml b/influxdb3_server/Cargo.toml index 834e832206..ac7a84893c 100644 --- a/influxdb3_server/Cargo.toml +++ b/influxdb3_server/Cargo.toml @@ -31,9 +31,10 @@ tracker.workspace = true # Local Deps influxdb3_catalog = { path = "../influxdb3_catalog" } -influxdb3_write = { path = "../influxdb3_write" } +influxdb3_id = { path = "../influxdb3_id" } influxdb3_process = { path = "../influxdb3_process", default-features = false } influxdb3_wal = { path = "../influxdb3_wal"} +influxdb3_write = { path = "../influxdb3_write" } iox_query_influxql_rewrite = { path = "../iox_query_influxql_rewrite" } # crates.io Dependencies diff --git a/influxdb3_server/src/query_executor.rs b/influxdb3_server/src/query_executor.rs index 72e6eb0f22..f11cdc716b 100644 --- a/influxdb3_server/src/query_executor.rs +++ b/influxdb3_server/src/query_executor.rs @@ -704,29 +704,29 @@ mod tests { let test_cases = [ TestCase { - query: "SELECT * FROM system.parquet_files WHERE table_name = 'cpu'", + query: "SELECT table_name, size_bytes, row_count, min_time, max_time FROM system.parquet_files WHERE table_name = 'cpu'", expected: &[ - "+------------+-----------------------------------------------------+------------+-----------+----------+----------+", - "| table_name | path | size_bytes | row_count | min_time | max_time |", - "+------------+-----------------------------------------------------+------------+-----------+----------+----------+", - "| cpu | dbs/test_db/cpu/1970-01-01/00-00/0000000003.parquet | 2142 | 2 | 0 | 10 |", - "| cpu | dbs/test_db/cpu/1970-01-01/00-00/0000000005.parquet | 2142 | 2 | 20 | 30 |", - "| cpu | dbs/test_db/cpu/1970-01-01/00-00/0000000007.parquet | 2142 | 2 | 40 | 50 |", - "| cpu | dbs/test_db/cpu/1970-01-01/00-00/0000000009.parquet | 2142 | 2 | 60 | 70 |", - "+------------+-----------------------------------------------------+------------+-----------+----------+----------+", + "+------------+------------+-----------+----------+----------+", + "| table_name | size_bytes | row_count | min_time | max_time |", + "+------------+------------+-----------+----------+----------+", + "| cpu | 2142 | 2 | 0 | 10 |", + "| cpu | 2142 | 2 | 20 | 30 |", + "| cpu | 2142 | 2 | 40 | 50 |", + "| cpu | 2142 | 2 | 60 | 70 |", + "+------------+------------+-----------+----------+----------+", ], }, TestCase { - query: "SELECT * FROM system.parquet_files WHERE table_name = 'mem'", + query: "SELECT table_name, size_bytes, row_count, min_time, max_time FROM system.parquet_files WHERE table_name = 'mem'", expected: &[ - "+------------+-----------------------------------------------------+------------+-----------+----------+----------+", - "| table_name | path | size_bytes | row_count | min_time | max_time |", - "+------------+-----------------------------------------------------+------------+-----------+----------+----------+", - "| mem | dbs/test_db/mem/1970-01-01/00-00/0000000003.parquet | 2142 | 2 | 0 | 10 |", - "| mem | dbs/test_db/mem/1970-01-01/00-00/0000000005.parquet | 2142 | 2 | 20 | 30 |", - "| mem | dbs/test_db/mem/1970-01-01/00-00/0000000007.parquet | 2142 | 2 | 40 | 50 |", - "| mem | dbs/test_db/mem/1970-01-01/00-00/0000000009.parquet | 2142 | 2 | 60 | 70 |", - "+------------+-----------------------------------------------------+------------+-----------+----------+----------+", + "+------------+------------+-----------+----------+----------+", + "| table_name | size_bytes | row_count | min_time | max_time |", + "+------------+------------+-----------+----------+----------+", + "| mem | 2142 | 2 | 0 | 10 |", + "| mem | 2142 | 2 | 20 | 30 |", + "| mem | 2142 | 2 | 40 | 50 |", + "| mem | 2142 | 2 | 60 | 70 |", + "+------------+------------+-----------+----------+----------+", ], }, ]; diff --git a/influxdb3_wal/Cargo.toml b/influxdb3_wal/Cargo.toml index 2d36bcd3b9..657b7370c6 100644 --- a/influxdb3_wal/Cargo.toml +++ b/influxdb3_wal/Cargo.toml @@ -13,6 +13,9 @@ influxdb-line-protocol.workspace = true observability_deps.workspace = true schema.workspace = true +# Local Crates +influxdb3_id = { path = "../influxdb3_id" } + # crates.io dependencies async-trait.workspace = true bytes.workspace = true diff --git a/influxdb3_wal/src/lib.rs b/influxdb3_wal/src/lib.rs index 8915e3cb82..84ba533caf 100644 --- a/influxdb3_wal/src/lib.rs +++ b/influxdb3_wal/src/lib.rs @@ -11,6 +11,7 @@ use crate::snapshot_tracker::SnapshotInfo; use async_trait::async_trait; use data_types::Timestamp; use hashbrown::HashMap; +use influxdb3_id::DbId; use influxdb_line_protocol::v3::SeriesValue; use influxdb_line_protocol::FieldValue; use iox_time::Time; @@ -211,6 +212,7 @@ pub enum WalOp { #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct CatalogBatch { + pub database_id: DbId, pub database_name: Arc<str>, pub time_ns: i64, pub ops: Vec<CatalogOp>, @@ -424,6 +426,7 @@ pub struct LastCacheDelete { #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct WriteBatch { + pub database_id: DbId, pub database_name: Arc<str>, pub table_chunks: HashMap<Arc<str>, TableChunks>, pub min_time_ns: i64, @@ -431,7 +434,11 @@ pub struct WriteBatch { } impl WriteBatch { - pub fn new(database_name: Arc<str>, table_chunks: HashMap<Arc<str>, TableChunks>) -> Self { + pub fn new( + database_id: DbId, + database_name: Arc<str>, + table_chunks: HashMap<Arc<str>, TableChunks>, + ) -> Self { // find the min and max times across the table chunks let (min_time_ns, max_time_ns) = table_chunks.values().fold( (i64::MAX, i64::MIN), @@ -444,6 +451,7 @@ impl WriteBatch { ); Self { + database_id, database_name, table_chunks, min_time_ns, diff --git a/influxdb3_wal/src/object_store.rs b/influxdb3_wal/src/object_store.rs index 52e21af497..a3e55a43c8 100644 --- a/influxdb3_wal/src/object_store.rs +++ b/influxdb3_wal/src/object_store.rs @@ -510,6 +510,7 @@ impl WalBuffer { self.database_to_write_batch .entry(db_name) .or_insert_with(|| WriteBatch { + database_id: new_write_batch.database_id, database_name: new_write_batch.database_name, table_chunks: Default::default(), min_time_ns: i64::MAX, @@ -612,6 +613,7 @@ mod tests { Field, FieldData, Gen1Duration, Row, SnapshotSequenceNumber, TableChunk, TableChunks, }; use async_trait::async_trait; + use influxdb3_id::DbId; use object_store::memory::InMemory; use std::any::Any; use tokio::sync::oneshot::Receiver; @@ -639,6 +641,7 @@ mod tests { let table_name: Arc<str> = "table1".into(); let op1 = WalOp::Write(WriteBatch { + database_id: DbId::from(0), database_name: Arc::clone(&db_name), table_chunks: HashMap::from([( Arc::clone(&table_name), @@ -686,6 +689,7 @@ mod tests { wal.buffer_op_unconfirmed(op1.clone()).await.unwrap(); let op2 = WalOp::Write(WriteBatch { + database_id: DbId::from(0), database_name: Arc::clone(&db_name), table_chunks: HashMap::from([( Arc::clone(&table_name), @@ -725,6 +729,7 @@ mod tests { max_timestamp_ns: 62_000000000, wal_file_number: WalFileSequenceNumber(1), ops: vec![WalOp::Write(WriteBatch { + database_id: DbId::from(0), database_name: "db1".into(), table_chunks: HashMap::from([( "table1".into(), @@ -794,6 +799,7 @@ mod tests { max_timestamp_ns: 62000000000, wal_file_number: WalFileSequenceNumber(2), ops: vec![WalOp::Write(WriteBatch { + database_id: DbId::from(0), database_name: "db1".into(), table_chunks: HashMap::from([( "table1".into(), @@ -865,6 +871,7 @@ mod tests { // create wal file 3, which should trigger a snapshot let op3 = WalOp::Write(WriteBatch { + database_id: DbId::from(0), database_name: Arc::clone(&db_name), table_chunks: HashMap::from([( Arc::clone(&table_name), @@ -924,6 +931,7 @@ mod tests { max_timestamp_ns: 128_000000000, wal_file_number: WalFileSequenceNumber(3), ops: vec![WalOp::Write(WriteBatch { + database_id: DbId::from(0), database_name: "db1".into(), table_chunks: HashMap::from([( "table1".into(), diff --git a/influxdb3_wal/src/serialize.rs b/influxdb3_wal/src/serialize.rs index a3f6f97dcf..9bdbc2db24 100644 --- a/influxdb3_wal/src/serialize.rs +++ b/influxdb3_wal/src/serialize.rs @@ -92,6 +92,7 @@ mod tests { Field, FieldData, Row, TableChunk, TableChunks, WalFileSequenceNumber, WalOp, WriteBatch, }; use hashbrown::HashMap; + use influxdb3_id::DbId; use std::sync::Arc; #[test] @@ -125,6 +126,7 @@ mod tests { max_timestamp_ns: 10, wal_file_number: WalFileSequenceNumber::new(1), ops: vec![WalOp::Write(WriteBatch { + database_id: DbId::from(0), database_name: "foo".into(), table_chunks, min_time_ns: 0, diff --git a/influxdb3_write/Cargo.toml b/influxdb3_write/Cargo.toml index 08e7863f74..491827a8e2 100644 --- a/influxdb3_write/Cargo.toml +++ b/influxdb3_write/Cargo.toml @@ -20,6 +20,7 @@ schema.workspace = true # Local deps influxdb3_catalog = { path = "../influxdb3_catalog" } +influxdb3_id = { path = "../influxdb3_id" } influxdb3_wal = { path = "../influxdb3_wal" } # crates.io dependencies diff --git a/influxdb3_write/src/last_cache/mod.rs b/influxdb3_write/src/last_cache/mod.rs index 2c3f6dda8f..07d8f1a4aa 100644 --- a/influxdb3_write/src/last_cache/mod.rs +++ b/influxdb3_write/src/last_cache/mod.rs @@ -1575,6 +1575,7 @@ mod tests { use arrow_util::{assert_batches_eq, assert_batches_sorted_eq}; use data_types::NamespaceName; use influxdb3_catalog::catalog::{Catalog, DatabaseSchema, TableDefinition}; + use influxdb3_id::DbId; use influxdb3_wal::{LastCacheDefinition, WalConfig}; use insta::assert_json_snapshot; use iox_time::{MockProvider, Time}; @@ -3059,6 +3060,7 @@ mod tests { // Set up a database in the catalog: let db_name = "test_db"; let mut database = DatabaseSchema { + id: DbId::from(0), name: db_name.into(), tables: BTreeMap::new(), }; diff --git a/influxdb3_write/src/lib.rs b/influxdb3_write/src/lib.rs index 9b3de60ce7..7b5eb847d0 100644 --- a/influxdb3_write/src/lib.rs +++ b/influxdb3_write/src/lib.rs @@ -17,6 +17,7 @@ use datafusion::catalog::Session; use datafusion::error::DataFusionError; use datafusion::prelude::Expr; use influxdb3_catalog::catalog::{self, SequenceNumber}; +use influxdb3_id::DbId; use influxdb3_wal::{LastCacheDefinition, SnapshotSequenceNumber, WalFileSequenceNumber}; use iox_query::QueryChunk; use iox_time::Time; @@ -165,6 +166,8 @@ pub struct PersistedSnapshot { pub host_id: String, /// The next file id to be used with `ParquetFile`s when the snapshot is loaded pub next_file_id: ParquetFileId, + /// The next db id to be used with databases when the snapshot is loaded + pub next_db_id: DbId, /// The snapshot sequence number associated with this snapshot pub snapshot_sequence_number: SnapshotSequenceNumber, /// The wal file sequence number that triggered this snapshot @@ -194,6 +197,7 @@ impl PersistedSnapshot { Self { host_id, next_file_id: ParquetFileId::current(), + next_db_id: DbId::next_id(), snapshot_sequence_number, wal_file_sequence_number, catalog_sequence_number, diff --git a/influxdb3_write/src/paths.rs b/influxdb3_write/src/paths.rs index 52ab9ad9d9..78bda2e94a 100644 --- a/influxdb3_write/src/paths.rs +++ b/influxdb3_write/src/paths.rs @@ -55,12 +55,13 @@ impl ParquetFilePath { pub fn new( host_prefix: &str, db_name: &str, + db_id: u32, table_name: &str, date: DateTime<Utc>, wal_file_sequence_number: WalFileSequenceNumber, ) -> Self { let path = ObjPath::from(format!( - "{host_prefix}/dbs/{db_name}/{table_name}/{}/{}.{}", + "{host_prefix}/dbs/{db_name}-{db_id}/{table_name}/{}/{}.{}", date.format("%Y-%m-%d/%H-%M"), wal_file_sequence_number.as_u64(), PARQUET_FILE_EXTENSION @@ -70,6 +71,7 @@ impl ParquetFilePath { pub fn new_with_chunk_time( db_name: &str, + db_id: u32, table_name: &str, chunk_time: i64, wal_file_sequence_number: WalFileSequenceNumber, @@ -77,7 +79,7 @@ impl ParquetFilePath { // Convert the chunk time into a date time string for YYYY-MM-DDTHH-MM let date_time = DateTime::<Utc>::from_timestamp_nanos(chunk_time); let path = ObjPath::from(format!( - "dbs/{db_name}/{table_name}/{}/{:010}.{}", + "dbs/{db_name}-{db_id}/{table_name}/{}/{:010}.{}", date_time.format("%Y-%m-%d/%H-%M"), wal_file_sequence_number.as_u64(), PARQUET_FILE_EXTENSION @@ -146,11 +148,12 @@ fn parquet_file_path_new() { *ParquetFilePath::new( "my_host", "my_db", + 0, "my_table", Utc.with_ymd_and_hms(2038, 1, 19, 3, 14, 7).unwrap(), WalFileSequenceNumber::new(0), ), - ObjPath::from("my_host/dbs/my_db/my_table/2038-01-19/03-14/0.parquet") + ObjPath::from("my_host/dbs/my_db-0/my_table/2038-01-19/03-14/0.parquet") ); } @@ -160,13 +163,14 @@ fn parquet_file_percent_encoded() { ParquetFilePath::new( "my_host", "..", + 0, "..", Utc.with_ymd_and_hms(2038, 1, 19, 3, 14, 7).unwrap(), WalFileSequenceNumber::new(0), ) .as_ref() .as_ref(), - "my_host/dbs/%2E%2E/%2E%2E/2038-01-19/03-14/0.parquet" + "my_host/dbs/..-0/%2E%2E/2038-01-19/03-14/0.parquet" ); } diff --git a/influxdb3_write/src/persister.rs b/influxdb3_write/src/persister.rs index 5718f9486e..0408afb223 100644 --- a/influxdb3_write/src/persister.rs +++ b/influxdb3_write/src/persister.rs @@ -416,6 +416,7 @@ mod tests { use super::*; use crate::ParquetFileId; use influxdb3_catalog::catalog::SequenceNumber; + use influxdb3_id::DbId; use influxdb3_wal::SnapshotSequenceNumber; use object_store::memory::InMemory; use observability_deps::tracing::info; @@ -488,6 +489,7 @@ mod tests { let info_file = PersistedSnapshot { host_id: "test_host".to_string(), next_file_id: ParquetFileId::from(0), + next_db_id: DbId::from(0), snapshot_sequence_number: SnapshotSequenceNumber::new(0), wal_file_sequence_number: WalFileSequenceNumber::new(0), catalog_sequence_number: SequenceNumber::new(0), @@ -509,6 +511,7 @@ mod tests { let info_file = PersistedSnapshot { host_id: "test_host".to_string(), next_file_id: ParquetFileId::from(0), + next_db_id: DbId::from(0), snapshot_sequence_number: SnapshotSequenceNumber::new(0), wal_file_sequence_number: WalFileSequenceNumber::new(0), catalog_sequence_number: SequenceNumber::default(), @@ -521,6 +524,7 @@ mod tests { let info_file_2 = PersistedSnapshot { host_id: "test_host".to_string(), next_file_id: ParquetFileId::from(1), + next_db_id: DbId::from(0), snapshot_sequence_number: SnapshotSequenceNumber::new(1), wal_file_sequence_number: WalFileSequenceNumber::new(1), catalog_sequence_number: SequenceNumber::default(), @@ -533,6 +537,7 @@ mod tests { let info_file_3 = PersistedSnapshot { host_id: "test_host".to_string(), next_file_id: ParquetFileId::from(2), + next_db_id: DbId::from(0), snapshot_sequence_number: SnapshotSequenceNumber::new(2), wal_file_sequence_number: WalFileSequenceNumber::new(2), catalog_sequence_number: SequenceNumber::default(), @@ -566,6 +571,7 @@ mod tests { let info_file = PersistedSnapshot { host_id: "test_host".to_string(), next_file_id: ParquetFileId::from(0), + next_db_id: DbId::from(0), snapshot_sequence_number: SnapshotSequenceNumber::new(0), wal_file_sequence_number: WalFileSequenceNumber::new(0), catalog_sequence_number: SequenceNumber::default(), @@ -592,6 +598,7 @@ mod tests { let info_file = PersistedSnapshot { host_id: "test_host".to_string(), next_file_id: ParquetFileId::from(id), + next_db_id: DbId::from(0), snapshot_sequence_number: SnapshotSequenceNumber::new(id), wal_file_sequence_number: WalFileSequenceNumber::new(id), catalog_sequence_number: SequenceNumber::new(id as u32), @@ -708,6 +715,7 @@ mod tests { let path = ParquetFilePath::new( "test_host", "db_one", + 0, "table_one", Utc::now(), WalFileSequenceNumber::new(1), diff --git a/influxdb3_write/src/write_buffer/mod.rs b/influxdb3_write/src/write_buffer/mod.rs index 04fdc16376..e91f51f949 100644 --- a/influxdb3_write/src/write_buffer/mod.rs +++ b/influxdb3_write/src/write_buffer/mod.rs @@ -137,6 +137,11 @@ impl WriteBufferImpl { let last_snapshot_sequence_number = persisted_snapshots .first() .map(|s| s.snapshot_sequence_number); + // Set the next db id to use when adding a new database + persisted_snapshots + .first() + .map(|s| s.next_db_id.set_next_id()) + .unwrap_or(()); // Set the next file id to use when persisting ParquetFiles NEXT_FILE_ID.store( persisted_snapshots @@ -555,6 +560,7 @@ impl LastCacheManager for WriteBufferImpl { self.catalog.add_last_cache(db_name, tbl_name, info.clone()); let add_cache_catalog_batch = WalOp::Catalog(CatalogBatch { time_ns: self.time_provider.now().timestamp_nanos(), + database_id: db_schema.id, database_name: Arc::clone(&db_schema.name), ops: vec![CreateLastCache(info.clone())], }); @@ -582,6 +588,7 @@ impl LastCacheManager for WriteBufferImpl { self.wal .write_ops(vec![WalOp::Catalog(CatalogBatch { time_ns: self.time_provider.now().timestamp_nanos(), + database_id: catalog.db_schema(db_name).expect("db exists").id, database_name: db_name.into(), ops: vec![CatalogOp::DeleteLastCache(LastCacheDelete { table: tbl_name.into(), @@ -609,13 +616,13 @@ mod tests { use datafusion_util::config::register_iox_object_store; use futures_util::StreamExt; use influxdb3_catalog::catalog::SequenceNumber; + use influxdb3_id::DbId; use influxdb3_wal::{Gen1Duration, SnapshotSequenceNumber, WalFileSequenceNumber}; use iox_query::exec::IOxSessionContext; use iox_time::{MockProvider, Time}; use object_store::local::LocalFileSystem; use object_store::memory::InMemory; use object_store::{ObjectStore, PutPayload}; - use parking_lot::{Mutex, MutexGuard}; #[test] fn parse_lp_into_buffer() { @@ -744,7 +751,6 @@ mod tests { #[tokio::test] async fn last_cache_create_and_delete_is_durable() { - let lock = lock(); let (wbuf, _ctx) = setup( Time::from_timestamp_nanos(0), Arc::new(InMemory::new()), @@ -756,7 +762,6 @@ mod tests { }, ) .await; - drop(lock); let db_name = "db"; let tbl_name = "table"; let cache_name = "cache"; @@ -833,9 +838,9 @@ mod tests { let catalog_json = catalog_to_json(&wbuf.catalog); insta::assert_json_snapshot!( - "catalog-after-last-cache-create-and-new-field", - catalog_json, - { ".instance_id" => "[uuid]" } + "catalog-after-last-cache-create-and-new-field", + catalog_json, + { ".instance_id" => "[uuid]" } ); // write a new data point to fill the cache @@ -895,7 +900,6 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn returns_chunks_across_parquet_and_buffered_data() { - let lock = lock(); let (write_buffer, session_context) = setup( Time::from_timestamp_nanos(0), Arc::new(InMemory::new()), @@ -907,7 +911,6 @@ mod tests { }, ) .await; - drop(lock); let _ = write_buffer .write_lp( @@ -1095,7 +1098,6 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn catalog_snapshots_only_if_updated() { - let lock = lock(); let (write_buffer, _ctx) = setup( Time::from_timestamp_nanos(0), Arc::new(InMemory::new()), @@ -1107,7 +1109,6 @@ mod tests { }, ) .await; - drop(lock); let db_name = "foo"; // do three writes to force a snapshot @@ -1185,7 +1186,6 @@ mod tests { let object_store: Arc<dyn ObjectStore> = Arc::new(LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap()); - let lock = lock(); // create a snapshot file that will be loaded on initialization of the write buffer: // Set NEXT_FILE_ID to a non zero number for the snapshot NEXT_FILE_ID.store(500, Ordering::SeqCst); @@ -1225,7 +1225,6 @@ mod tests { // Assert that loading the snapshots sets NEXT_FILE_ID to the correct id number assert_eq!(NEXT_FILE_ID.load(Ordering::SeqCst), 500); - drop(lock); // there should be one snapshot already, i.e., the one we created above: verify_snapshot_count(1, &wbuf.persister).await; @@ -1579,6 +1578,75 @@ mod tests { assert!(snapshot.is_some(), "watcher should be notified of snapshot"); } + #[tokio::test] + async fn test_db_id_is_persisted_and_updated() { + let obj_store: Arc<dyn ObjectStore> = Arc::new(InMemory::new()); + let (wbuf, _) = setup( + Time::from_timestamp_nanos(0), + Arc::clone(&obj_store), + WalConfig { + gen1_duration: Gen1Duration::new_1m(), + max_write_buffer_size: 100, + flush_interval: Duration::from_millis(10), + snapshot_size: 1, + }, + ) + .await; + let db_name = "coffee_shop"; + let tbl_name = "menu"; + + // do some writes to get a snapshot: + do_writes( + db_name, + &wbuf, + &[ + TestWrite { + lp: format!("{tbl_name},name=espresso price=2.50"), + time_seconds: 1, + }, + // This write is way out in the future, so as to be outside the normal + // range for a snapshot: + TestWrite { + lp: format!("{tbl_name},name=americano price=3.00"), + time_seconds: 20_000, + }, + // This write will trigger the snapshot: + TestWrite { + lp: format!("{tbl_name},name=latte price=4.50"), + time_seconds: 3, + }, + ], + ) + .await; + + // Wait for snapshot to be created: + verify_snapshot_count(1, &wbuf.persister).await; + + // Now drop the write buffer, and create a new one that replays: + drop(wbuf); + + // Set DbId to a large number to make sure it is properly set on replay + // and assert that it's what we expect it to be before we replay + dbg!(DbId::next_id()); + DbId::from(10_000).set_next_id(); + assert_eq!(DbId::next_id().as_u32(), 10_000); + dbg!(DbId::next_id()); + let (_wbuf, _) = setup( + Time::from_timestamp_nanos(0), + Arc::clone(&obj_store), + WalConfig { + gen1_duration: Gen1Duration::new_1m(), + max_write_buffer_size: 100, + flush_interval: Duration::from_millis(10), + snapshot_size: 1, + }, + ) + .await; + dbg!(DbId::next_id()); + + assert_eq!(DbId::next_id().as_u32(), 1); + } + struct TestWrite<LP> { lp: LP, time_seconds: i64, @@ -1699,20 +1767,4 @@ mod tests { } batches } - - /// Lock for the NEXT_FILE_ID data which is set during some of these tests. - /// We need to have exclusive access to it to test that it works when loading - /// from a snapshot. We lock in most of the calls to setup in this test suite - /// where it would cause problems. If running under `cargo-nextest`, return a - /// different mutex guard as it does not have this problem due to running - /// each test in it's own process - fn lock() -> MutexGuard<'static, ()> { - static FILE_ID_LOCK: Mutex<()> = Mutex::new(()); - static DUMMY_LOCK: Mutex<()> = Mutex::new(()); - if std::env::var("NEXTEST").unwrap_or("0".into()) == "1" { - DUMMY_LOCK.lock() - } else { - FILE_ID_LOCK.lock() - } - } } diff --git a/influxdb3_write/src/write_buffer/queryable_buffer.rs b/influxdb3_write/src/write_buffer/queryable_buffer.rs index 4b8c11abf4..b73a061c5b 100644 --- a/influxdb3_write/src/write_buffer/queryable_buffer.rs +++ b/influxdb3_write/src/write_buffer/queryable_buffer.rs @@ -144,6 +144,7 @@ impl QueryableBuffer { let mut buffer = self.buffer.write(); let mut persisting_chunks = vec![]; + let catalog = Arc::clone(&buffer.catalog); for (database_name, table_map) in buffer.db_to_table.iter_mut() { for (table_name, table_buffer) in table_map.iter_mut() { let snapshot_chunks = table_buffer.snapshot(snapshot_details.end_time_marker); @@ -155,6 +156,11 @@ impl QueryableBuffer { chunk_time: chunk.chunk_time, path: ParquetFilePath::new_with_chunk_time( database_name.as_ref(), + catalog + .db_schema(database_name) + .expect("db exists") + .id + .as_u32(), table_name.as_ref(), chunk.chunk_time, write.wal_file_number, diff --git a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap index a280f27e5d..4fb9f2de38 100644 --- a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap +++ b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-after-last-cache-create-and-new-field.snap @@ -5,6 +5,7 @@ expression: catalog_json { "databases": { "db": { + "id": 0, "name": "db", "tables": { "table": { diff --git a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap index d23962836b..fe7816c5d9 100644 --- a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap +++ b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-create.snap @@ -5,6 +5,7 @@ expression: catalog_json { "databases": { "db": { + "id": 0, "name": "db", "tables": { "table": { diff --git a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap index ccb39da961..6916b0364d 100644 --- a/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap +++ b/influxdb3_write/src/write_buffer/snapshots/influxdb3_write__write_buffer__tests__catalog-immediately-after-last-cache-delete.snap @@ -5,6 +5,7 @@ expression: catalog_json { "databases": { "db": { + "id": 0, "name": "db", "tables": { "table": { diff --git a/influxdb3_write/src/write_buffer/validator.rs b/influxdb3_write/src/write_buffer/validator.rs index 00f6e7604f..d99e11c0b2 100644 --- a/influxdb3_write/src/write_buffer/validator.rs +++ b/influxdb3_write/src/write_buffer/validator.rs @@ -110,6 +110,7 @@ impl WriteValidator<WithCatalog> { None } else { let catalog_batch = CatalogBatch { + database_id: self.state.db_schema.id, database_name: Arc::clone(&self.state.db_schema.name), time_ns: self.state.time_now_ns, ops: catalog_updates, @@ -185,6 +186,7 @@ impl WriteValidator<WithCatalog> { None } else { let catalog_batch = CatalogBatch { + database_id: self.state.db_schema.id, time_ns: self.state.time_now_ns, database_name: Arc::clone(&self.state.db_schema.name), ops: catalog_updates, @@ -576,8 +578,11 @@ impl<'lp> WriteValidator<LinesParsed<'lp, v3::ParsedLine<'lp>>> { ); } - let write_batch = - WriteBatch::new(Arc::clone(&self.state.catalog.db_schema.name), table_chunks); + let write_batch = WriteBatch::new( + self.state.catalog.db_schema.id, + Arc::clone(&self.state.catalog.db_schema.name), + table_chunks, + ); ValidatedLines { line_count, @@ -673,8 +678,11 @@ impl<'lp> WriteValidator<LinesParsed<'lp, ParsedLine<'lp>>> { ); } - let write_batch = - WriteBatch::new(Arc::clone(&self.state.catalog.db_schema.name), table_chunks); + let write_batch = WriteBatch::new( + self.state.catalog.db_schema.id, + Arc::clone(&self.state.catalog.db_schema.name), + table_chunks, + ); ValidatedLines { line_count,
146494f6193bf737a7ba9ec7e3035056112e34ee
Dom Dwyer
2023-03-03 17:08:48
pre-allocation of SequenceNumberSet
Support pre-allocation of SequenceNumberSet for known-length sets.
null
perf: pre-allocation of SequenceNumberSet Support pre-allocation of SequenceNumberSet for known-length sets.
diff --git a/data_types/src/sequence_number_set.rs b/data_types/src/sequence_number_set.rs index 7d83cf82d1..9bd253ae55 100644 --- a/data_types/src/sequence_number_set.rs +++ b/data_types/src/sequence_number_set.rs @@ -66,6 +66,12 @@ impl SequenceNumberSet { pub fn iter(&self) -> impl Iterator<Item = SequenceNumber> + '_ { self.0.iter().map(|v| SequenceNumber::new(v as _)) } + + /// Initialise a [`SequenceNumberSet`] that is pre-allocated to contain up + /// to `n` elements without reallocating. + pub fn with_capacity(n: u32) -> Self { + Self(croaring::Bitmap::create_with_capacity(n)) + } } /// Deserialisation method.
475c8a07045daeb731c0b9ee2d7b4be55ffbcb0f
Luke Bond
2022-10-14 13:06:49
only emit ttbr metric for applied ops (#5854)
* fix: only emit ttbr metric for applied ops * fix: move DmlApplyAction to s/w accessible * chore: test for skipped ingest; comments and log improvements * fix: fixed ingester test re skipping write
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
fix: only emit ttbr metric for applied ops (#5854) * fix: only emit ttbr metric for applied ops * fix: move DmlApplyAction to s/w accessible * chore: test for skipped ingest; comments and log improvements * fix: fixed ingester test re skipping write Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/ingester/src/data.rs b/ingester/src/data.rs index d962fca3ba..c663bae41f 100644 --- a/ingester/src/data.rs +++ b/ingester/src/data.rs @@ -172,7 +172,7 @@ impl IngesterData { shard_id: ShardId, dml_operation: DmlOperation, lifecycle_handle: &dyn LifecycleHandle, - ) -> Result<bool> { + ) -> Result<DmlApplyAction> { let shard_data = self .shards .get(&shard_id) @@ -541,6 +541,16 @@ impl Persister for IngesterData { } } +/// A successful DML apply operation can perform one of these actions +#[derive(Clone, Copy, Debug)] +pub enum DmlApplyAction { + /// The DML operation was successful; bool indicates if ingestion should be paused + Applied(bool), + + /// The DML operation was skipped because it has already been applied + Skipped, +} + #[cfg(test)] mod tests { use std::{ops::DerefMut, sync::Arc, time::Duration}; @@ -634,7 +644,7 @@ mod tests { metrics, Arc::new(SystemProvider::new()), ); - let should_pause = data + let action = data .buffer_operation( shard1.id, DmlOperation::Write(w1.clone()), @@ -642,12 +652,12 @@ mod tests { ) .await .unwrap(); - assert!(!should_pause); - let should_pause = data + assert_matches!(action, DmlApplyAction::Applied(false)); + let action = data .buffer_operation(shard1.id, DmlOperation::Write(w1), &manager.handle()) .await .unwrap(); - assert!(should_pause); + assert_matches!(action, DmlApplyAction::Applied(true)); } #[tokio::test] @@ -715,13 +725,13 @@ mod tests { Arc::new(SystemProvider::new()), ); - let should_pause = data + let action = data .buffer_operation(shard1.id, DmlOperation::Write(w1), &manager.handle()) .await .unwrap(); // Exceeding the row count doesn't pause ingest (like other partition // limits) - assert!(!should_pause); + assert_matches!(action, DmlApplyAction::Applied(false)); let (table_id, partition_id) = { let sd = data.shards.get(&shard1.id).unwrap(); @@ -1291,7 +1301,7 @@ mod tests { // w1 should be ignored because the per-partition replay offset is set // to 1 already, so it shouldn't be buffered and the buffer should // remain empty. - let should_pause = data + let action = data .buffer_operation(DmlOperation::Write(w1), &catalog, &manager.handle()) .await .unwrap(); @@ -1305,7 +1315,7 @@ mod tests { ); assert!(p.data.buffer.is_none()); } - assert!(!should_pause); + assert_matches!(action, DmlApplyAction::Skipped); // w2 should be in the buffer data.buffer_operation(DmlOperation::Write(w2), &catalog, &manager.handle()) diff --git a/ingester/src/data/namespace.rs b/ingester/src/data/namespace.rs index 8ccf6e3ded..500345dcf3 100644 --- a/ingester/src/data/namespace.rs +++ b/ingester/src/data/namespace.rs @@ -17,7 +17,7 @@ use super::{ partition::resolver::PartitionProvider, table::{TableData, TableName}, }; -use crate::lifecycle::LifecycleHandle; +use crate::{data::DmlApplyAction, lifecycle::LifecycleHandle}; /// A double-referenced map where [`TableData`] can be looked up by name, or ID. #[derive(Debug, Default)] @@ -177,7 +177,7 @@ impl NamespaceData { dml_operation: DmlOperation, catalog: &Arc<dyn Catalog>, lifecycle_handle: &dyn LifecycleHandle, - ) -> Result<bool, super::Error> { + ) -> Result<DmlApplyAction, super::Error> { let sequence_number = dml_operation .meta() .sequence() @@ -194,6 +194,7 @@ impl NamespaceData { match dml_operation { DmlOperation::Write(write) => { let mut pause_writes = false; + let mut all_skipped = true; // Extract the partition key derived by the router. let partition_key = write @@ -211,7 +212,7 @@ impl NamespaceData { { // lock scope let mut table_data = table_data.write().await; - let should_pause = table_data + let action = table_data .buffer_table_write( sequence_number, b, @@ -219,13 +220,21 @@ impl NamespaceData { lifecycle_handle, ) .await?; - pause_writes = pause_writes || should_pause; + if let DmlApplyAction::Applied(should_pause) = action { + pause_writes = pause_writes || should_pause; + all_skipped = false; + } } #[cfg(test)] self.test_triggers.on_write().await; } - Ok(pause_writes) + if all_skipped { + Ok(DmlApplyAction::Skipped) + } else { + // at least some were applied + Ok(DmlApplyAction::Applied(pause_writes)) + } } DmlOperation::Delete(delete) => { // Deprecated delete support: @@ -239,7 +248,7 @@ impl NamespaceData { "discarding unsupported delete op" ); - Ok(false) + Ok(DmlApplyAction::Applied(false)) } } } diff --git a/ingester/src/data/shard.rs b/ingester/src/data/shard.rs index b01504085f..5b57fa9e27 100644 --- a/ingester/src/data/shard.rs +++ b/ingester/src/data/shard.rs @@ -13,6 +13,7 @@ use write_summary::ShardProgress; use super::{ namespace::{NamespaceData, NamespaceName}, partition::resolver::PartitionProvider, + DmlApplyAction, }; use crate::lifecycle::LifecycleHandle; @@ -99,7 +100,7 @@ impl ShardData { dml_operation: DmlOperation, catalog: &Arc<dyn Catalog>, lifecycle_handle: &dyn LifecycleHandle, - ) -> Result<bool, super::Error> { + ) -> Result<DmlApplyAction, super::Error> { let namespace_data = match self.namespace(&NamespaceName::from(dml_operation.namespace())) { Some(d) => d, None => { diff --git a/ingester/src/data/table.rs b/ingester/src/data/table.rs index 472809a783..3e0fd0d6c4 100644 --- a/ingester/src/data/table.rs +++ b/ingester/src/data/table.rs @@ -8,7 +8,7 @@ use observability_deps::tracing::*; use write_summary::ShardProgress; use super::partition::{resolver::PartitionProvider, PartitionData, UnpersistedPartitionData}; -use crate::{lifecycle::LifecycleHandle, querier_handler::PartitionStatus}; +use crate::{data::DmlApplyAction, lifecycle::LifecycleHandle, querier_handler::PartitionStatus}; /// A double-referenced map where [`PartitionData`] can be looked up by /// [`PartitionKey`], or ID. @@ -137,7 +137,7 @@ impl TableData { batch: MutableBatch, partition_key: PartitionKey, lifecycle_handle: &dyn LifecycleHandle, - ) -> Result<bool, super::Error> { + ) -> Result<DmlApplyAction, super::Error> { let partition_data = match self.partition_data.by_key.get_mut(&partition_key) { Some(p) => p, None => { @@ -165,7 +165,7 @@ impl TableData { op_sequence_number=?sequence_number, "skipping already-persisted write" ); - return Ok(false); + return Ok(DmlApplyAction::Skipped); } } @@ -188,7 +188,7 @@ impl TableData { rows, ); - Ok(should_pause) + Ok(DmlApplyAction::Applied(should_pause)) } /// Return the [`PartitionData`] for the specified ID. @@ -332,7 +332,7 @@ mod tests { assert!(table.partition_data.by_id_mut(PARTITION_ID).is_none()); // Write some test data - let pause = table + let action = table .buffer_table_write( SequenceNumber::new(42), batch, @@ -341,7 +341,7 @@ mod tests { ) .await .expect("buffer op should succeed"); - assert!(!pause); + assert_matches!(action, DmlApplyAction::Applied(false)); // Referencing the partition should succeed assert!(table.partition_data.by_key(&PARTITION_KEY.into()).is_some()); @@ -394,7 +394,7 @@ mod tests { assert!(table.partition_data.by_key(&PARTITION_KEY.into()).is_none()); // Write some test data - let pause = table + let action = table .buffer_table_write( SequenceNumber::new(42), batch, @@ -403,7 +403,7 @@ mod tests { ) .await .expect("buffer op should succeed"); - assert!(!pause); + assert_matches!(action, DmlApplyAction::Applied(false)); // Referencing the partition should succeed assert!(table.partition_data.by_key(&PARTITION_KEY.into()).is_some()); diff --git a/ingester/src/stream_handler/handler.rs b/ingester/src/stream_handler/handler.rs index 68a49d3ff8..f1d31b08d5 100644 --- a/ingester/src/stream_handler/handler.rs +++ b/ingester/src/stream_handler/handler.rs @@ -12,7 +12,10 @@ use tokio_util::sync::CancellationToken; use write_buffer::core::{WriteBufferErrorKind, WriteBufferStreamHandler}; use super::DmlSink; -use crate::lifecycle::{LifecycleHandle, LifecycleHandleImpl}; +use crate::{ + data::DmlApplyAction, + lifecycle::{LifecycleHandle, LifecycleHandleImpl}, +}; /// When the [`LifecycleManager`] indicates that ingest should be paused because /// of memory pressure, the shard will loop, sleeping this long between @@ -384,7 +387,7 @@ something clever.", op.meta().duration_since_production(&self.time_provider); let should_pause = match self.sink.apply(op).await { - Ok(should_pause) => { + Ok(DmlApplyAction::Applied(should_pause)) => { trace!( kafka_topic=%self.topic_name, shard_index=%self.shard_index, @@ -393,8 +396,31 @@ something clever.", ?op_sequence_number, "successfully applied dml operation" ); + // we only want to report the TTBR if anything was applied + if let Some(delta) = duration_since_production { + // Update the TTBR metric before potentially sleeping. + self.time_to_be_readable.set(delta); + trace!( + kafka_topic=%self.topic_name, + shard_index=%self.shard_index, + shard_id=%self.shard_id, + delta=%delta.as_millis(), + "reporting TTBR for shard (ms)" + ); + } should_pause } + Ok(DmlApplyAction::Skipped) => { + trace!( + kafka_topic=%self.topic_name, + shard_index=%self.shard_index, + shard_id=%self.shard_id, + false, + ?op_sequence_number, + "did not apply dml operation (op was already persisted previously)" + ); + false + } Err(e) => { error!( error=%e, @@ -410,18 +436,6 @@ something clever.", } }; - if let Some(delta) = duration_since_production { - // Update the TTBR metric before potentially sleeping. - self.time_to_be_readable.set(delta); - trace!( - kafka_topic=%self.topic_name, - shard_index=%self.shard_index, - shard_id=%self.shard_id, - delta=%delta.as_millis(), - "reporting TTBR for shard (ms)" - ); - } - if should_pause { // The lifecycle manager may temporarily pause ingest - wait for // persist operations to shed memory pressure if needed. @@ -772,7 +786,7 @@ mod tests { stream_ops = vec![ vec![Ok(DmlOperation::Write(make_write("bananas", 42)))] ], - sink_rets = [Ok(true)], + sink_rets = [Ok(DmlApplyAction::Applied(true))], want_ttbr = 42, want_reset = 0, want_err_metrics = [], @@ -788,7 +802,7 @@ mod tests { stream_ops = vec![ vec![Ok(DmlOperation::Delete(make_delete("platanos", 24)))] ], - sink_rets = [Ok(true)], + sink_rets = [Ok(DmlApplyAction::Applied(true))], want_ttbr = 24, want_reset = 0, want_err_metrics = [], @@ -806,7 +820,7 @@ mod tests { Err(WriteBufferError::new(WriteBufferErrorKind::IO, "explosions")), Ok(DmlOperation::Write(make_write("bananas", 13))) ]], - sink_rets = [Ok(true)], + sink_rets = [Ok(DmlApplyAction::Applied(true))], want_ttbr = 13, want_reset = 0, want_err_metrics = [ @@ -829,7 +843,7 @@ mod tests { Err(WriteBufferError::new(WriteBufferErrorKind::SequenceNumberNoLongerExists, "explosions")), Ok(DmlOperation::Write(make_write("bananas", 31))) ]], - sink_rets = [Ok(true)], + sink_rets = [Ok(DmlApplyAction::Applied(true))], want_ttbr = 31, want_reset = 0, want_err_metrics = [ @@ -858,7 +872,7 @@ mod tests { ], vec![Ok(DmlOperation::Write(make_write("bananas", 31)))], ], - sink_rets = [Ok(true)], + sink_rets = [Ok(DmlApplyAction::Applied(true))], want_ttbr = 31, want_reset = 1, want_err_metrics = [ @@ -880,7 +894,7 @@ mod tests { Err(WriteBufferError::new(WriteBufferErrorKind::InvalidData, "explosions")), Ok(DmlOperation::Write(make_write("bananas", 50))) ]], - sink_rets = [Ok(true)], + sink_rets = [Ok(DmlApplyAction::Applied(true))], want_ttbr = 50, want_reset = 0, want_err_metrics = [ @@ -902,7 +916,7 @@ mod tests { Err(WriteBufferError::new(WriteBufferErrorKind::Unknown, "explosions")), Ok(DmlOperation::Write(make_write("bananas", 60))) ]], - sink_rets = [Ok(true)], + sink_rets = [Ok(DmlApplyAction::Applied(true))], want_ttbr = 60, want_reset = 0, want_err_metrics = [ @@ -932,7 +946,7 @@ mod tests { want_sink = [] ); - // Asserts the TTBR is uses the last value in the stream. + // Asserts the TTBR used is the last value in the stream. test_stream_handler!( reports_last_ttbr, skip_to_oldest_available = false, @@ -942,7 +956,7 @@ mod tests { Ok(DmlOperation::Write(make_write("bananas", 3))), Ok(DmlOperation::Write(make_write("bananas", 42))), ]], - sink_rets = [Ok(true), Ok(false), Ok(true), Ok(false),], + sink_rets = [Ok(DmlApplyAction::Applied(true)), Ok(DmlApplyAction::Applied(false)), Ok(DmlApplyAction::Applied(true)), Ok(DmlApplyAction::Applied(false)),], want_ttbr = 42, want_reset = 0, want_err_metrics = [ @@ -967,7 +981,7 @@ mod tests { ]], sink_rets = [ Err(crate::data::Error::NamespaceNotFound{namespace: "bananas".to_string() }), - Ok(true), + Ok(DmlApplyAction::Applied(true)), ], want_ttbr = 2, want_reset = 0, @@ -986,6 +1000,21 @@ mod tests { } ); + test_stream_handler!( + skipped_op_no_ttbr, + skip_to_oldest_available = false, + stream_ops = vec![vec![Ok(DmlOperation::Write(make_write("some_op", 1)))]], + sink_rets = [Ok(DmlApplyAction::Skipped)], + want_ttbr = 0, + want_reset = 0, + want_err_metrics = [], + want_sink = [ + DmlOperation::Write(op), + ] => { + assert_eq!(op.namespace(), "some_op"); + } + ); + #[derive(Debug)] struct EmptyWriteBufferStreamHandler {} diff --git a/ingester/src/stream_handler/mock_sink.rs b/ingester/src/stream_handler/mock_sink.rs index 974e7e57aa..5f8c5ca0a0 100644 --- a/ingester/src/stream_handler/mock_sink.rs +++ b/ingester/src/stream_handler/mock_sink.rs @@ -5,11 +5,12 @@ use dml::DmlOperation; use parking_lot::Mutex; use super::DmlSink; +use crate::data::DmlApplyAction; #[derive(Debug, Default)] struct MockDmlSinkState { calls: Vec<DmlOperation>, - ret: VecDeque<Result<bool, crate::data::Error>>, + ret: VecDeque<Result<DmlApplyAction, crate::data::Error>>, } #[derive(Debug, Default)] @@ -20,7 +21,7 @@ pub struct MockDmlSink { impl MockDmlSink { pub fn with_apply_return( self, - ret: impl Into<VecDeque<Result<bool, crate::data::Error>>>, + ret: impl Into<VecDeque<Result<DmlApplyAction, crate::data::Error>>>, ) -> Self { self.state.lock().ret = ret.into(); self @@ -33,7 +34,7 @@ impl MockDmlSink { #[async_trait] impl DmlSink for MockDmlSink { - async fn apply(&self, op: DmlOperation) -> Result<bool, crate::data::Error> { + async fn apply(&self, op: DmlOperation) -> Result<DmlApplyAction, crate::data::Error> { let mut state = self.state.lock(); state.calls.push(op); state.ret.pop_front().expect("no mock sink value to return") diff --git a/ingester/src/stream_handler/sink.rs b/ingester/src/stream_handler/sink.rs index 825b012ce9..1202271c8a 100644 --- a/ingester/src/stream_handler/sink.rs +++ b/ingester/src/stream_handler/sink.rs @@ -3,12 +3,15 @@ use std::{fmt::Debug, ops::Deref, sync::Arc}; use async_trait::async_trait; use dml::DmlOperation; +use crate::data::DmlApplyAction; + /// A [`DmlSink`] handles [`DmlOperation`] instances read from a shard. #[async_trait] pub(crate) trait DmlSink: Debug + Send + Sync { - /// Apply `op` read from a shard, returning `Ok(true)` if ingest should - /// be paused. - async fn apply(&self, op: DmlOperation) -> Result<bool, crate::data::Error>; + /// Apply `op` read from a shard, returning `Ok(DmlApplyAction::Applied(bool))`, the bool indicating if the + /// ingest should be paused. Returns `Ok(DmlApplyAction::Skipped)` if the operation has been + /// applied previously and was skipped. + async fn apply(&self, op: DmlOperation) -> Result<DmlApplyAction, crate::data::Error>; } #[async_trait] @@ -16,7 +19,7 @@ impl<T> DmlSink for Arc<T> where T: DmlSink, { - async fn apply(&self, op: DmlOperation) -> Result<bool, crate::data::Error> { + async fn apply(&self, op: DmlOperation) -> Result<DmlApplyAction, crate::data::Error> { self.deref().apply(op).await } } diff --git a/ingester/src/stream_handler/sink_adaptor.rs b/ingester/src/stream_handler/sink_adaptor.rs index 4f885a496c..3780f3da5b 100644 --- a/ingester/src/stream_handler/sink_adaptor.rs +++ b/ingester/src/stream_handler/sink_adaptor.rs @@ -7,7 +7,10 @@ use data_types::ShardId; use dml::DmlOperation; use super::DmlSink; -use crate::{data::IngesterData, lifecycle::LifecycleHandleImpl}; +use crate::{ + data::{DmlApplyAction, IngesterData}, + lifecycle::LifecycleHandleImpl, +}; /// Provides a [`DmlSink`] implementation for a [`IngesterData`] instance. #[derive(Debug)] @@ -35,7 +38,7 @@ impl IngestSinkAdaptor { #[async_trait] impl DmlSink for IngestSinkAdaptor { - async fn apply(&self, op: DmlOperation) -> Result<bool, crate::data::Error> { + async fn apply(&self, op: DmlOperation) -> Result<DmlApplyAction, crate::data::Error> { self.ingest_data .buffer_operation(self.shard_id, op, &self.lifecycle_handle) .await diff --git a/ingester/src/stream_handler/sink_instrumentation.rs b/ingester/src/stream_handler/sink_instrumentation.rs index 998e14bb48..98292ebc7a 100644 --- a/ingester/src/stream_handler/sink_instrumentation.rs +++ b/ingester/src/stream_handler/sink_instrumentation.rs @@ -9,6 +9,8 @@ use iox_time::{SystemProvider, TimeProvider}; use metric::{Attributes, DurationHistogram, U64Counter, U64Gauge}; use trace::span::{SpanExt, SpanRecorder}; +use crate::data::DmlApplyAction; + use super::DmlSink; /// A [`WatermarkFetcher`] abstracts a source of the write buffer high watermark @@ -155,7 +157,7 @@ where T: DmlSink, P: TimeProvider, { - async fn apply(&self, op: DmlOperation) -> Result<bool, crate::data::Error> { + async fn apply(&self, op: DmlOperation) -> Result<DmlApplyAction, crate::data::Error> { let meta = op.meta(); // Immediately increment the "bytes read" metric as it records the @@ -292,9 +294,9 @@ mod tests { async fn test( op: impl Into<DmlOperation> + Send, metrics: &metric::Registry, - with_sink_return: Result<bool, crate::data::Error>, + with_sink_return: Result<DmlApplyAction, crate::data::Error>, with_fetcher_return: Option<i64>, - ) -> Result<bool, crate::data::Error> { + ) -> Result<DmlApplyAction, crate::data::Error> { let op = op.into(); let inner = MockDmlSink::default().with_apply_return([with_sink_return]); let instrumentation = SinkInstrumentation::new( @@ -342,8 +344,8 @@ mod tests { ); let op = make_write(meta); - let got = test(op, &metrics, Ok(true), Some(12345)).await; - assert_matches!(got, Ok(true)); + let got = test(op, &metrics, Ok(DmlApplyAction::Applied(true)), Some(12345)).await; + assert_matches!(got, Ok(DmlApplyAction::Applied(true))); // Validate the various write buffer metrics assert_matches!( @@ -487,8 +489,8 @@ mod tests { ); let op = make_write(meta); - let got = test(op, &metrics, Ok(true), None).await; - assert_matches!(got, Ok(true)); + let got = test(op, &metrics, Ok(DmlApplyAction::Applied(true)), None).await; + assert_matches!(got, Ok(DmlApplyAction::Applied(true))); // Validate the various write buffer metrics assert_matches!( @@ -556,8 +558,8 @@ mod tests { ); let op = make_write(meta); - let got = test(op, &metrics, Ok(true), Some(1)).await; - assert_matches!(got, Ok(true)); + let got = test(op, &metrics, Ok(DmlApplyAction::Applied(true)), Some(1)).await; + assert_matches!(got, Ok(DmlApplyAction::Applied(true))); // Validate the various write buffer metrics assert_matches!( @@ -617,7 +619,7 @@ mod tests { let meta = DmlMeta::unsequenced(None); let op = make_write(meta); - let _ = test(op, &metrics, Ok(true), Some(12345)).await; + let _ = test(op, &metrics, Ok(DmlApplyAction::Applied(true)), Some(12345)).await; } // The instrumentation emits per-shard metrics, so upon observing an op @@ -639,6 +641,6 @@ mod tests { ); let op = make_write(meta); - let _ = test(op, &metrics, Ok(true), Some(12345)).await; + let _ = test(op, &metrics, Ok(DmlApplyAction::Applied(true)), Some(12345)).await; } } diff --git a/query_tests/src/scenarios/util.rs b/query_tests/src/scenarios/util.rs index 477503504b..14ef4ea294 100644 --- a/query_tests/src/scenarios/util.rs +++ b/query_tests/src/scenarios/util.rs @@ -14,7 +14,9 @@ use generated_types::{ }; use influxdb_iox_client::flight::{low_level::LowLevelMessage, Error as FlightError}; use ingester::{ - data::{partition::resolver::CatalogPartitionResolver, IngesterData, Persister}, + data::{ + partition::resolver::CatalogPartitionResolver, DmlApplyAction, IngesterData, Persister, + }, lifecycle::mock_handle::MockLifecycleHandle, querier_handler::{prepare_data_to_querier, FlatIngesterQueryResponse, IngesterQueryResponse}, }; @@ -721,12 +723,14 @@ impl MockIngester { async fn buffer_operation(&mut self, dml_operation: DmlOperation) { let lifecycle_handle = MockLifecycleHandle::default(); - let should_pause = self + let action = self .ingester_data .buffer_operation(self.shard.shard.id, dml_operation, &lifecycle_handle) .await .unwrap(); - assert!(!should_pause); + if let DmlApplyAction::Applied(should_pause) = action { + assert!(!should_pause); + } } /// Persists the given set of partitions.
423308dcd4f5a5dea7e21e87826e18fce710a306
Trevor Hilton
2024-03-05 15:40:16
extend InfluxQL rewriter for SELECT and EXPLAIN (#24726)
Extended the InfluxQL rewriter to handle SELECT statements with nested sub-queries, as well as EXPLAIN statements. Tests were added to check all the rewrite cases for happy path and failure modes.
null
feat: extend InfluxQL rewriter for SELECT and EXPLAIN (#24726) Extended the InfluxQL rewriter to handle SELECT statements with nested sub-queries, as well as EXPLAIN statements. Tests were added to check all the rewrite cases for happy path and failure modes.
diff --git a/Cargo.lock b/Cargo.lock index 3177e2bee2..1b46606fec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -19,9 +19,9 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b79b82693f705137f8fb9b37871d99e4f9a7df12b917eed79c3d3954830a60b" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "const-random", @@ -132,9 +132,9 @@ checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" [[package]] name = "arc-swap" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +checksum = "7b3d0060af21e8d11a926981cc00c6c1541aa91dd64b9f881985c3da1094425f" [[package]] name = "arrayref" @@ -523,11 +523,11 @@ dependencies = [ [[package]] name = "atomic-write-file" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edcdbedc2236483ab103a53415653d6b4442ea6141baf1ffa85df29635e88436" +checksum = "a8204db279bf648d64fe845bd8840f78b39c8132ed4d6a4194c3b10d4b4cfb0b" dependencies = [ - "nix 0.27.1", + "nix 0.28.0", "rand", ] @@ -739,7 +739,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" dependencies = [ "memchr", - "regex-automata 0.4.5", + "regex-automata 0.4.6", "serde", ] @@ -846,10 +846,11 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.88" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02f341c093d19155a6e41631ce5971aac4e9a868262212153124c15fa22d1cdc" +checksum = "a0ba8f7aaa012f30d5b2861462f6708eccd49c3c39863fe083a308035f63d723" dependencies = [ + "jobserver", "libc", ] @@ -1065,9 +1066,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const-random" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaf16c9c2c612020bcfd042e170f6e32de9b9d75adb5277cdbbd2e2c8c8299a" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" dependencies = [ "const-random-macro", ] @@ -2253,9 +2254,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -2687,9 +2688,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.35.1" +version = "1.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c985c1bef99cf13c58fade470483d81a2bfe846ebde60ed28cc2dddec2df9e2" +checksum = "0a7c22c4d34ef4788c351e971c52bfdfe7ea2766f8c5466bc175dd46e52ac22e" dependencies = [ "console", "lazy_static", @@ -2937,11 +2938,20 @@ version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +[[package]] +name = "jobserver" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -3117,7 +3127,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "507460a910eb7b32ee961886ff48539633b788a36b65692b95f225b844c82553" dependencies = [ - "regex-automata 0.4.5", + "regex-automata 0.4.6", ] [[package]] @@ -3360,9 +3370,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "log", @@ -3999,9 +4009,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.7" +version = "2.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219c0dcc30b6a27553f9cc242972b67f75b60eb0db71f0b5462f38b058c41546" +checksum = "56f8023d0fb78c8e03784ea1c7f3fa36e68a723138990b8d5a47d916b651e7a8" dependencies = [ "memchr", "thiserror", @@ -4010,9 +4020,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.7" +version = "2.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e1288dbd7786462961e69bfd4df7848c1e37e8b74303dbdab82c3a9cdd2809" +checksum = "b0d24f72393fd16ab6ac5738bc33cdb6a9aa73f8b902e8fe29cf4e67d7dd1026" dependencies = [ "pest", "pest_generator", @@ -4020,9 +4030,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.7" +version = "2.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1381c29a877c6d34b8c176e734f35d7f7f5b3adaefe940cb4d1bb7af94678e2e" +checksum = "fdc17e2a6c7d0a492f0158d7a4bd66cc17280308bbaff78d5bef566dca35ab80" dependencies = [ "pest", "pest_meta", @@ -4033,9 +4043,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.7.7" +version = "2.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0934d6907f148c22a3acbda520c7eed243ad7487a30f51f6ce52b58b7077a8a" +checksum = "934cd7631c050f4674352a6e835d5f6711ffbfb9345c2fc0107155ac495ae293" dependencies = [ "once_cell", "pest", @@ -4552,7 +4562,7 @@ checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.5", + "regex-automata 0.4.6", "regex-syntax 0.8.2", ] @@ -4567,9 +4577,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", @@ -5609,9 +5619,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sysinfo" -version = "0.30.5" +version = "0.30.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb4f3438c8f6389c864e61221cbc97e9bca98b4daf39a5beb7bea660f528bb2" +checksum = "6746919caf9f2a85bff759535664c060109f21975c5ac2e8652e60102bd4d196" dependencies = [ "cfg-if", "core-foundation-sys", @@ -6429,9 +6439,9 @@ dependencies = [ [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -6452,11 +6462,17 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + [[package]] name = "wasm-bindgen" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -6464,9 +6480,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", @@ -6479,9 +6495,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if", "js-sys", @@ -6491,9 +6507,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6501,9 +6517,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", @@ -6514,9 +6530,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "wasm-streams" @@ -6533,9 +6549,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -6561,9 +6577,13 @@ dependencies = [ [[package]] name = "whoami" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" +checksum = "0fec781d48b41f8163426ed18e8fc2864c12937df9ce54c88ede7bd47270893e" +dependencies = [ + "redox_syscall", + "wasite", +] [[package]] name = "winapi" @@ -6823,7 +6843,7 @@ dependencies = [ "rand", "rand_core", "regex", - "regex-automata 0.4.5", + "regex-automata 0.4.6", "regex-syntax 0.8.2", "reqwest", "ring", diff --git a/iox_query_influxql_rewrite/src/lib.rs b/iox_query_influxql_rewrite/src/lib.rs index febb073f40..9ea2ab6b2d 100644 --- a/iox_query_influxql_rewrite/src/lib.rs +++ b/iox_query_influxql_rewrite/src/lib.rs @@ -1,19 +1,24 @@ use std::collections::HashSet; use influxdb_influxql_parser::{ - common::ParseError, identifier::Identifier, parse_statements as parse_internal, - select::MeasurementSelection, show_measurements::ExtendedOnClause, statement::Statement, + common::ParseError, + explain::ExplainStatement, + identifier::Identifier, + parse_statements as parse_internal, + select::{MeasurementSelection, SelectStatement}, + show_measurements::ExtendedOnClause, + statement::Statement, }; #[derive(Debug)] -pub struct RewrittenStatement { +pub struct Rewritten<S> { database: Option<Identifier>, retention_policy: Option<Identifier>, - statement: Statement, + statement: S, } -impl RewrittenStatement { - fn new(statement: Statement) -> Self { +impl<S> Rewritten<S> { + fn new(statement: S) -> Self { Self { database: None, retention_policy: None, @@ -39,11 +44,11 @@ impl RewrittenStatement { self.retention_policy.as_ref() } - pub fn statement(&self) -> &Statement { + pub fn statement(&self) -> &S { &self.statement } - pub fn to_statement(self) -> Statement { + pub fn to_statement(self) -> S { self.statement } @@ -62,13 +67,13 @@ impl RewrittenStatement { } } -impl From<RewrittenStatement> for Statement { - fn from(r: RewrittenStatement) -> Self { +impl From<Rewritten<Statement>> for Statement { + fn from(r: Rewritten<Statement>) -> Self { r.to_statement() } } -impl TryFrom<Statement> for RewrittenStatement { +impl TryFrom<Statement> for Rewritten<Statement> { type Error = Error; fn try_from(statement: Statement) -> Result<Self, Self::Error> { @@ -106,42 +111,83 @@ impl TryFrom<Statement> for RewrittenStatement { let identifier = s.database.take().map(Into::into); Ok(Self::new(Statement::ShowFieldKeys(s)).with_database(identifier)) } - Statement::Select(mut s) => { - let mut db_rp_set = HashSet::new(); - let from_clause = s - .from - .take() - .into_iter() - .map(|ms| match ms { - MeasurementSelection::Name(mut qn) => { - let db = qn.database.take(); - let rp = qn.retention_policy.take(); - if db_rp_set.insert((db, rp)) && db_rp_set.len() > 1 { - return Err(Error::MultiDatabase); - } - Ok(MeasurementSelection::Name(qn)) - } - // TODO - handle sub-queries? - MeasurementSelection::Subquery(_) => Ok(ms), - }) - .collect::<Result<Vec<MeasurementSelection>, Error>>()?; - s.from.replace(from_clause); - let mut result = Self::new(Statement::Select(s)); - if let Some((db, rp)) = db_rp_set.into_iter().next() { - result = result.with_database(db).with_retention_policy(rp); - } - Ok(result) + Statement::Select(s) => { + let ss = Rewritten::<SelectStatement>::try_from(*s)?; + let db = ss.database.to_owned(); + let rp = ss.retention_policy.to_owned(); + Ok(Self::new(Statement::Select(Box::new(ss.to_statement()))) + .with_database(db) + .with_retention_policy(rp)) + } + Statement::Explain(mut s) => { + let options = s.options.take(); + let s = Self::try_from(*s.statement)?; + let db = s.database.to_owned(); + let rp = s.retention_policy.to_owned(); + Ok(Self::new(Statement::Explain(Box::new(ExplainStatement { + options, + statement: Box::new(s.to_statement()), + }))) + .with_database(db) + .with_retention_policy(rp)) } + // For all other statements, we just pass them through. Explicitly + // do not use a catch-all match arm here in the event that new variants + // are added to the Statement enum, we want the compiler to direct us + // here to handle, if relevant. Statement::CreateDatabase(_) | Statement::Delete(_) | Statement::DropMeasurement(_) - | Statement::Explain(_) | Statement::ShowDatabases(_) => Ok(Self::new(statement)), } } } -#[derive(Debug, thiserror::Error, Clone)] +impl TryFrom<SelectStatement> for Rewritten<SelectStatement> { + type Error = Error; + + fn try_from(mut select_statement: SelectStatement) -> Result<Self, Self::Error> { + let mut db_rp_set = HashSet::new(); + let from_clause = select_statement + .from + .take() + .into_iter() + .map(|ms| { + let (db, rp, ms) = match ms { + MeasurementSelection::Name(mut qn) => { + let db = qn.database.take(); + let rp = qn.retention_policy.take(); + (db, rp, MeasurementSelection::Name(qn)) + } + // Recursively call try_from on nested sub-queries, and compare their + // resulting db/rp to the same at this level. Sub-queries that have + // multiple db/rp in them will throw the MultiDatabase error. + MeasurementSelection::Subquery(s) => { + let ss = Self::try_from(*s)?; + ( + ss.database.to_owned(), + ss.retention_policy.to_owned(), + MeasurementSelection::Subquery(Box::new(ss.to_statement())), + ) + } + }; + if db_rp_set.insert((db, rp)) && db_rp_set.len() > 1 { + Err(Error::MultiDatabase) + } else { + Ok(ms) + } + }) + .collect::<Result<Vec<MeasurementSelection>, Error>>()?; + select_statement.from.replace(from_clause); + let mut result = Self::new(select_statement); + if let Some((db, rp)) = db_rp_set.into_iter().next() { + result = result.with_database(db).with_retention_policy(rp); + } + Ok(result) + } +} + +#[derive(Debug, thiserror::Error, Clone, PartialEq, Eq)] pub enum Error { #[error("can only perform queries on a single database")] MultiDatabase, @@ -149,10 +195,361 @@ pub enum Error { Parse(ParseError), } -pub fn parse_statements(input: &str) -> Result<Vec<RewrittenStatement>, Error> { +pub fn parse_statements(input: &str) -> Result<Vec<Rewritten<Statement>>, Error> { parse_internal(input) .map_err(Error::Parse)? .into_iter() - .map(RewrittenStatement::try_from) - .collect::<Result<Vec<RewrittenStatement>, Error>>() + .map(Rewritten::<Statement>::try_from) + .collect::<Result<Vec<Rewritten<Statement>>, Error>>() +} + +#[cfg(test)] +mod tests { + use influxdb_influxql_parser::statement::Statement; + + use crate::{parse_statements, Error, Rewritten}; + + fn parse_single(input: &str) -> Rewritten<Statement> { + parse_statements(input).unwrap().pop().unwrap() + } + + fn parse_single_failure(input: &str) -> Error { + parse_statements(input).unwrap_err() + } + + struct TestCase { + input: &'static str, + expected: &'static str, + db: Option<&'static str>, + rp: Option<&'static str>, + } + + impl TestCase { + fn assert(&self) { + let s = parse_single(self.input); + assert_eq!(s.database().map(|db| db.as_str()), self.db); + assert_eq!(s.retention_policy().map(|rp| rp.as_str()), self.rp); + assert_eq!(self.expected, s.to_statement().to_string()); + } + } + + struct TestFailure { + input: &'static str, + expected: Error, + } + + impl TestFailure { + fn assert(&self) { + let e = parse_single_failure(self.input); + assert_eq!(self.expected, e, "input: {}", self.input); + } + } + + #[test] + fn show_measurements() { + TestCase { + input: "SHOW MEASUREMENTS", + expected: "SHOW MEASUREMENTS", + db: None, + rp: None, + } + .assert(); + TestCase { + input: "SHOW MEASUREMENTS ON foo", + expected: "SHOW MEASUREMENTS", + db: Some("foo"), + rp: None, + } + .assert(); + TestCase { + input: "SHOW MEASUREMENTS ON foo.bar", + expected: "SHOW MEASUREMENTS", + db: Some("foo"), + rp: Some("bar"), + } + .assert(); + } + + #[test] + fn show_measurements_failure_modes() { + TestFailure { + input: "SHOW MEASUREMENTS ON *.*", + expected: Error::MultiDatabase, + } + .assert(); + TestFailure { + input: r#"SHOW MEASUREMENTS ON *"#, + expected: Error::MultiDatabase, + } + .assert(); + } + + #[test] + fn show_retention_policies() { + TestCase { + input: "SHOW RETENTION POLICIES", + expected: "SHOW RETENTION POLICIES", + db: None, + rp: None, + } + .assert(); + TestCase { + input: "SHOW RETENTION POLICIES ON foo", + expected: "SHOW RETENTION POLICIES", + db: Some("foo"), + rp: None, + } + .assert(); + } + + #[test] + fn show_tag_keys() { + TestCase { + input: "SHOW TAG KEYS", + expected: "SHOW TAG KEYS", + db: None, + rp: None, + } + .assert(); + TestCase { + input: "SHOW TAG KEYS FROM cpu", + expected: "SHOW TAG KEYS FROM cpu", + db: None, + rp: None, + } + .assert(); + TestCase { + input: "SHOW TAG KEYS ON foo", + expected: "SHOW TAG KEYS", + db: Some("foo"), + rp: None, + } + .assert(); + TestCase { + input: "SHOW TAG KEYS ON foo FROM cpu", + expected: "SHOW TAG KEYS FROM cpu", + db: Some("foo"), + rp: None, + } + .assert(); + } + + #[test] + fn show_tag_values() { + TestCase { + input: "SHOW TAG VALUES WITH KEY = host", + expected: "SHOW TAG VALUES WITH KEY = host", + db: None, + rp: None, + } + .assert(); + TestCase { + input: "SHOW TAG VALUES FROM cpu WITH KEY = host", + expected: "SHOW TAG VALUES FROM cpu WITH KEY = host", + db: None, + rp: None, + } + .assert(); + TestCase { + input: "SHOW TAG VALUES ON foo WITH KEY = host", + expected: "SHOW TAG VALUES WITH KEY = host", + db: Some("foo"), + rp: None, + } + .assert(); + TestCase { + input: "SHOW TAG VALUES ON foo FROM cpu WITH KEY = host", + expected: "SHOW TAG VALUES FROM cpu WITH KEY = host", + db: Some("foo"), + rp: None, + } + .assert(); + } + + #[test] + fn show_field_keys() { + TestCase { + input: "SHOW FIELD KEYS", + expected: "SHOW FIELD KEYS", + db: None, + rp: None, + } + .assert(); + TestCase { + input: "SHOW FIELD KEYS FROM cpu", + expected: "SHOW FIELD KEYS FROM cpu", + db: None, + rp: None, + } + .assert(); + TestCase { + input: "SHOW FIELD KEYS ON foo", + expected: "SHOW FIELD KEYS", + db: Some("foo"), + rp: None, + } + .assert(); + TestCase { + input: "SHOW FIELD KEYS ON foo FROM cpu", + expected: "SHOW FIELD KEYS FROM cpu", + db: Some("foo"), + rp: None, + } + .assert(); + } + + #[test] + fn select() { + TestCase { + input: "SELECT * FROM cpu", + expected: "SELECT * FROM cpu", + db: None, + rp: None, + } + .assert(); + TestCase { + input: "SELECT * FROM bar.cpu", + expected: "SELECT * FROM cpu", + db: None, + rp: Some("bar"), + } + .assert(); + TestCase { + input: "SELECT * FROM foo.bar.cpu", + expected: "SELECT * FROM cpu", + db: Some("foo"), + rp: Some("bar"), + } + .assert(); + TestCase { + input: r#"SELECT * FROM (SELECT * FROM cpu)"#, + expected: r#"SELECT * FROM (SELECT * FROM cpu)"#, + db: None, + rp: None, + } + .assert(); + TestCase { + input: r#"SELECT * FROM (SELECT * FROM bar.cpu), bar.mem"#, + expected: r#"SELECT * FROM (SELECT * FROM cpu), mem"#, + db: None, + rp: Some("bar"), + } + .assert(); + TestCase { + input: r#"SELECT * FROM (SELECT * FROM foo.bar.cpu), foo.bar.mem"#, + expected: r#"SELECT * FROM (SELECT * FROM cpu), mem"#, + db: Some("foo"), + rp: Some("bar"), + } + .assert(); + } + + #[test] + fn select_failure_modes() { + TestFailure { + input: r#"SELECT * FROM foo.bar.cpu, baz.bop.cpu"#, + expected: Error::MultiDatabase, + } + .assert(); + TestFailure { + input: r#"SELECT * FROM cpu, baz.bop.cpu"#, + expected: Error::MultiDatabase, + } + .assert(); + TestFailure { + input: r#"SELECT * FROM bar.cpu, baz.bop.cpu"#, + expected: Error::MultiDatabase, + } + .assert(); + TestFailure { + input: r#"SELECT * FROM foo.bar.cpu, (SELECT * FROM mem)"#, + expected: Error::MultiDatabase, + } + .assert(); + } + + #[test] + fn explain() { + TestCase { + input: "EXPLAIN SELECT * FROM cpu", + expected: "EXPLAIN SELECT * FROM cpu", + db: None, + rp: None, + } + .assert(); + TestCase { + input: "EXPLAIN SELECT * FROM bar.cpu", + expected: "EXPLAIN SELECT * FROM cpu", + db: None, + rp: Some("bar"), + } + .assert(); + TestCase { + input: "EXPLAIN SELECT * FROM foo.bar.cpu", + expected: "EXPLAIN SELECT * FROM cpu", + db: Some("foo"), + rp: Some("bar"), + } + .assert(); + TestCase { + input: r#"EXPLAIN SELECT * FROM (SELECT * FROM cpu)"#, + expected: r#"EXPLAIN SELECT * FROM (SELECT * FROM cpu)"#, + db: None, + rp: None, + } + .assert(); + TestCase { + input: r#"EXPLAIN SELECT * FROM (SELECT * FROM bar.cpu), bar.mem"#, + expected: r#"EXPLAIN SELECT * FROM (SELECT * FROM cpu), mem"#, + db: None, + rp: Some("bar"), + } + .assert(); + TestCase { + input: r#"EXPLAIN SELECT * FROM (SELECT * FROM foo.bar.cpu), foo.bar.mem"#, + expected: r#"EXPLAIN SELECT * FROM (SELECT * FROM cpu), mem"#, + db: Some("foo"), + rp: Some("bar"), + } + .assert(); + } + + #[test] + fn noop_rewrites() { + TestCase { + input: "CREATE DATABASE foo", + expected: "CREATE DATABASE foo", + db: None, + rp: None, + } + .assert(); + TestCase { + input: "DELETE FROM cpu", + expected: "DELETE FROM cpu", + db: None, + rp: None, + } + .assert(); + TestCase { + input: "DROP MEASUREMENT cpu", + expected: "DROP MEASUREMENT cpu", + db: None, + rp: None, + } + .assert(); + TestCase { + input: "EXPLAIN SELECT * FROM cpu", + expected: "EXPLAIN SELECT * FROM cpu", + db: None, + rp: None, + } + .assert(); + TestCase { + input: "SHOW DATABASES", + expected: "SHOW DATABASES", + db: None, + rp: None, + } + .assert(); + } }
148c103c701599fba491e8dd1a566adc3c3ee398
wiedld
2023-04-24 17:14:28
push authz into specific configs (#7607)
* refactor: move authz-addr flag into router-specific config * refactor: move authz-addr flag into querier-specific config * refactor: remove global AuthzConfig which is now redundant with the pushdown to individual configs. Keep constant the env vars used universally. * chore: make errors lowercase, and use the required bool for the authz-addr flag ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
refactor: push authz into specific configs (#7607) * refactor: move authz-addr flag into router-specific config * refactor: move authz-addr flag into querier-specific config * refactor: remove global AuthzConfig which is now redundant with the pushdown to individual configs. Keep constant the env vars used universally. * chore: make errors lowercase, and use the required bool for the authz-addr flag --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/clap_blocks/src/authz.rs b/clap_blocks/src/authz.rs deleted file mode 100644 index eb6f65d428..0000000000 --- a/clap_blocks/src/authz.rs +++ /dev/null @@ -1,42 +0,0 @@ -//! CLI config for request authorization. - -use ::authz::{Authorizer, IoxAuthorizer}; -use snafu::Snafu; -use std::{boxed::Box, sync::Arc}; - -#[derive(Debug, Snafu)] -#[allow(missing_docs)] -pub enum Error { - #[snafu(display("Invalid authz service address {addr}: {source}"))] - BadServiceAddress { - addr: String, - source: Box<dyn std::error::Error>, - }, -} - -/// Configuration for optional request authorization. -#[derive(Clone, Debug, Default, clap::Parser)] -pub struct AuthzConfig { - #[clap(long = "authz-addr", env = "INFLUXDB_IOX_AUTHZ_ADDR")] - pub(crate) authz_addr: Option<String>, -} - -impl AuthzConfig { - /// Authorizer from the configuration. - /// - /// An authorizer is optional so will only be created if configured. - /// An error will only occur when the authorizer configuration is - /// invalid. - pub fn authorizer(&self) -> Result<Option<Arc<dyn Authorizer>>, Error> { - if let Some(s) = &self.authz_addr { - IoxAuthorizer::connect_lazy(s.clone()) - .map(|c| Some(Arc::new(c) as Arc<dyn Authorizer>)) - .map_err(|e| Error::BadServiceAddress { - addr: s.clone(), - source: e, - }) - } else { - Ok(None) - } - } -} diff --git a/clap_blocks/src/lib.rs b/clap_blocks/src/lib.rs index 01b5334377..48bba965eb 100644 --- a/clap_blocks/src/lib.rs +++ b/clap_blocks/src/lib.rs @@ -12,7 +12,6 @@ clippy::todo, clippy::dbg_macro )] -pub mod authz; pub mod catalog_dsn; pub mod compactor2; pub mod garbage_collector; @@ -22,4 +21,5 @@ pub mod object_store; pub mod querier; pub mod router2; pub mod run_config; +pub mod single_tenant; pub mod socket_addr; diff --git a/clap_blocks/src/querier.rs b/clap_blocks/src/querier.rs index 9c3aec25ca..4d0eacdf3f 100644 --- a/clap_blocks/src/querier.rs +++ b/clap_blocks/src/querier.rs @@ -1,11 +1,18 @@ //! Querier-related configs. -use crate::ingester_address::IngesterAddress; +use crate::{ + ingester_address::IngesterAddress, + single_tenant::{CONFIG_AUTHZ_ENV_NAME, CONFIG_AUTHZ_FLAG}, +}; use std::{collections::HashMap, num::NonZeroUsize}; /// CLI config for querier configuration #[derive(Debug, Clone, PartialEq, Eq, clap::Parser)] pub struct QuerierConfig { + /// Addr for connection to authz + #[clap(long = CONFIG_AUTHZ_FLAG, env = CONFIG_AUTHZ_ENV_NAME)] + pub authz_address: Option<String>, + /// The number of threads to use for queries. /// /// If not specified, defaults to the number of cores on the system diff --git a/clap_blocks/src/router2.rs b/clap_blocks/src/router2.rs index 1905e0c493..149a5219ce 100644 --- a/clap_blocks/src/router2.rs +++ b/clap_blocks/src/router2.rs @@ -1,6 +1,11 @@ //! CLI config for the router using the RPC write path -use crate::ingester_address::IngesterAddress; +use crate::{ + ingester_address::IngesterAddress, + single_tenant::{ + CONFIG_AUTHZ_ENV_NAME, CONFIG_AUTHZ_FLAG, CONFIG_CST_ENV_NAME, CONFIG_CST_FLAG, + }, +}; use std::{ num::{NonZeroUsize, ParseIntError}, time::Duration, @@ -10,14 +15,23 @@ use std::{ #[derive(Debug, Clone, clap::Parser)] #[allow(missing_copy_implementations)] pub struct Router2Config { + /// Addr for connection to authz + #[clap( + long = CONFIG_AUTHZ_FLAG, + env = CONFIG_AUTHZ_ENV_NAME, + requires("single_tenant_deployment"), + )] + pub authz_address: Option<String>, + /// Differential handling based upon deployment to CST vs MT. /// /// At minimum, differs in supports of v1 endpoint. But also includes /// differences in namespace handling, etc. #[clap( - long = "single-tenancy", - env = "INFLUXDB_IOX_SINGLE_TENANCY", - default_value = "false" + long = CONFIG_CST_FLAG, + env = CONFIG_CST_ENV_NAME, + default_value = "false", + requires_if("true", "authz_address") )] pub single_tenant_deployment: bool, diff --git a/clap_blocks/src/single_tenant.rs b/clap_blocks/src/single_tenant.rs new file mode 100644 index 0000000000..fb7fb956ad --- /dev/null +++ b/clap_blocks/src/single_tenant.rs @@ -0,0 +1,11 @@ +//! CLI config for request authorization. + +/// Env var providing authz address +pub const CONFIG_AUTHZ_ENV_NAME: &str = "INFLUXDB_IOX_AUTHZ_ADDR"; +/// CLI flag for authz address +pub const CONFIG_AUTHZ_FLAG: &str = "authz-addr"; + +/// Env var for single tenancy deployments +pub const CONFIG_CST_ENV_NAME: &str = "INFLUXDB_IOX_SINGLE_TENANCY"; +/// CLI flag for single tenancy deployments +pub const CONFIG_CST_FLAG: &str = "single-tenancy"; diff --git a/influxdb_iox/src/commands/run/all_in_one.rs b/influxdb_iox/src/commands/run/all_in_one.rs index 0d8ef2e1ee..d996d5146f 100644 --- a/influxdb_iox/src/commands/run/all_in_one.rs +++ b/influxdb_iox/src/commands/run/all_in_one.rs @@ -3,9 +3,7 @@ use crate::process_info::setup_metric_registry; use super::main; -use authz::Authorizer; use clap_blocks::{ - authz::AuthzConfig, catalog_dsn::CatalogDsnConfig, compactor2::Compactor2Config, ingester2::Ingester2Config, @@ -14,6 +12,9 @@ use clap_blocks::{ querier::QuerierConfig, router2::Router2Config, run_config::RunConfig, + single_tenant::{ + CONFIG_AUTHZ_ENV_NAME, CONFIG_AUTHZ_FLAG, CONFIG_CST_ENV_NAME, CONFIG_CST_FLAG, + }, socket_addr::SocketAddr, }; use compactor2::object_store::metrics::MetricsStore; @@ -89,9 +90,6 @@ pub enum Error { #[error("Invalid config: {0}")] InvalidConfig(#[from] CommonServerStateError), - #[error("Authz configuration error: {0}")] - AuthzConfig(#[from] clap_blocks::authz::Error), - #[error("Authz service error: {0}")] AuthzService(#[from] authz::Error), } @@ -173,9 +171,20 @@ pub type Result<T, E = Error> = std::result::Result<T, E>; )] #[group(skip)] pub struct Config { - /// Authorizer options. - #[clap(flatten)] - pub(crate) authz_config: AuthzConfig, + #[clap( + long = CONFIG_AUTHZ_FLAG, + env = CONFIG_AUTHZ_ENV_NAME, + requires("single_tenant_deployment"), + )] + pub(crate) authz_address: Option<String>, + + #[clap( + long = CONFIG_CST_FLAG, + env = CONFIG_CST_ENV_NAME, + default_value = "false", + requires_if("true", "authz_address") + )] + pub(crate) single_tenant_deployment: bool, /// logging options #[clap(flatten)] @@ -185,17 +194,6 @@ pub struct Config { #[clap(flatten)] pub(crate) tracing_config: TracingConfig, - /// Differential handling based upon deployment to CST vs MT. - /// - /// At minimum, differs in supports of v1 endpoint. But also includes - /// differences in namespace handling, etc. - #[clap( - long = "single-tenancy", - env = "INFLUXDB_IOX_SINGLE_TENANCY", - default_value = "false" - )] - pub single_tenant_deployment: bool, - /// Maximum size of HTTP requests. #[clap( long = "max-http-request-size", @@ -361,6 +359,7 @@ impl Config { /// configuration for each individual IOx service fn specialize(self) -> SpecializedConfig { let Self { + authz_address, logging_config, tracing_config, max_http_request_size, @@ -381,7 +380,6 @@ impl Config { querier_ram_pool_data_bytes, querier_max_concurrent_queries, exec_mem_pool_bytes, - authz_config, single_tenant_deployment, } = self; @@ -472,6 +470,8 @@ impl Config { }; let router_config = Router2Config { + authz_address: authz_address.clone(), + single_tenant_deployment, query_pool_name: QUERY_POOL_NAME.to_string(), http_request_limit: 1_000, ingester_addresses: ingester_addresses.clone(), @@ -481,7 +481,6 @@ impl Config { topic: QUERY_POOL_NAME.to_string(), rpc_write_timeout_seconds: Duration::new(3, 0), rpc_write_replicas: None, - single_tenant_deployment, rpc_write_max_outgoing_bytes: ingester_config.rpc_write_max_incoming_bytes, }; @@ -514,6 +513,7 @@ impl Config { }; let querier_config = QuerierConfig { + authz_address, num_query_threads: None, // will be ignored ingester_addresses, ram_pool_metadata_bytes: querier_ram_pool_metadata_bytes, @@ -536,7 +536,6 @@ impl Config { router_config, compactor_config, querier_config, - authz_config, } } } @@ -564,7 +563,6 @@ struct SpecializedConfig { router_config: Router2Config, compactor_config: Compactor2Config, querier_config: QuerierConfig, - authz_config: AuthzConfig, } pub async fn command(config: Config) -> Result<()> { @@ -578,7 +576,6 @@ pub async fn command(config: Config) -> Result<()> { router_config, compactor_config, querier_config, - authz_config, } = config.specialize(); let metrics = setup_metric_registry(); @@ -606,10 +603,6 @@ pub async fn command(config: Config) -> Result<()> { let time_provider: Arc<dyn TimeProvider> = Arc::new(SystemProvider::new()); - let authz = authz_config.authorizer()?; - // Verify the connection to the authorizer, if configured. - authz.probe().await?; - // create common state from the router and use it below let common_state = CommonServerState::from_config(router_run_config.clone())?; @@ -636,7 +629,6 @@ pub async fn command(config: Config) -> Result<()> { Arc::clone(&metrics), Arc::clone(&catalog), Arc::clone(&object_store), - authz.as_ref().map(Arc::clone), &router_config, ) .await?; @@ -684,7 +676,6 @@ pub async fn command(config: Config) -> Result<()> { exec, time_provider, querier_config, - authz: authz.as_ref().map(Arc::clone), }) .await?; diff --git a/influxdb_iox/src/commands/run/querier.rs b/influxdb_iox/src/commands/run/querier.rs index 3558c2ae12..ccfed9a637 100644 --- a/influxdb_iox/src/commands/run/querier.rs +++ b/influxdb_iox/src/commands/run/querier.rs @@ -3,10 +3,9 @@ use crate::process_info::setup_metric_registry; use super::main; -use authz::Authorizer; use clap_blocks::{ - authz::AuthzConfig, catalog_dsn::CatalogDsnConfig, object_store::make_object_store, - querier::QuerierConfig, run_config::RunConfig, + catalog_dsn::CatalogDsnConfig, object_store::make_object_store, querier::QuerierConfig, + run_config::RunConfig, }; use iox_query::exec::Executor; use iox_time::{SystemProvider, TimeProvider}; @@ -41,9 +40,6 @@ pub enum Error { #[error("Querier error: {0}")] Querier(#[from] ioxd_querier::Error), - #[error("Authz configuration error: {0}")] - AuthzConfig(#[from] clap_blocks::authz::Error), - #[error("Authz service error: {0}")] AuthzService(#[from] authz::Error), } @@ -64,10 +60,6 @@ Configuration is loaded from the following sources (highest precedence first): - pre-configured default values" )] pub struct Config { - /// Authorizer options. - #[clap(flatten)] - pub(crate) authz_config: AuthzConfig, - #[clap(flatten)] pub(crate) run_config: RunConfig, @@ -100,10 +92,6 @@ pub async fn command(config: Config) -> Result<(), Error> { let time_provider = Arc::new(SystemProvider::new()); - let authz = config.authz_config.authorizer()?; - // Verify the connection to the authorizer, if configured. - authz.probe().await?; - let num_query_threads = config.querier_config.num_query_threads(); let num_threads = num_query_threads.unwrap_or_else(|| { NonZeroUsize::new(num_cpus::get()).unwrap_or_else(|| NonZeroUsize::new(1).unwrap()) @@ -126,7 +114,6 @@ pub async fn command(config: Config) -> Result<(), Error> { exec, time_provider, querier_config: config.querier_config, - authz: authz.as_ref().map(Arc::clone), }) .await?; diff --git a/influxdb_iox/src/commands/run/router2.rs b/influxdb_iox/src/commands/run/router2.rs index b06f596db3..2c473eb1a3 100644 --- a/influxdb_iox/src/commands/run/router2.rs +++ b/influxdb_iox/src/commands/run/router2.rs @@ -1,10 +1,9 @@ //! Command line options for running a router2 that uses the RPC write path. use super::main; use crate::process_info::setup_metric_registry; -use authz::Authorizer; use clap_blocks::{ - authz::AuthzConfig, catalog_dsn::CatalogDsnConfig, object_store::make_object_store, - router2::Router2Config, run_config::RunConfig, + catalog_dsn::CatalogDsnConfig, object_store::make_object_store, router2::Router2Config, + run_config::RunConfig, }; use iox_time::{SystemProvider, TimeProvider}; use ioxd_common::{ @@ -36,9 +35,6 @@ pub enum Error { #[error("Catalog DSN error: {0}")] CatalogDsn(#[from] clap_blocks::catalog_dsn::Error), - #[error("Authz configuration error: {0}")] - AuthzConfig(#[from] clap_blocks::authz::Error), - #[error("Authz service error: {0}")] AuthzService(#[from] authz::Error), } @@ -61,9 +57,6 @@ Configuration is loaded from the following sources (highest precedence first): - pre-configured default values" )] pub struct Config { - #[clap(flatten)] - pub(crate) authz_config: AuthzConfig, - #[clap(flatten)] pub(crate) run_config: RunConfig, @@ -98,16 +91,12 @@ pub async fn command(config: Config) -> Result<()> { time_provider, &metrics, )); - let authz = config.authz_config.authorizer()?; - // Verify the connection to the authorizer, if configured. - authz.probe().await?; let server_type = create_router2_server_type( &common_state, Arc::clone(&metrics), catalog, object_store, - authz, &config.router_config, ) .await?; diff --git a/ioxd_querier/src/lib.rs b/ioxd_querier/src/lib.rs index a55729e4cc..805e901287 100644 --- a/ioxd_querier/src/lib.rs +++ b/ioxd_querier/src/lib.rs @@ -1,5 +1,5 @@ use async_trait::async_trait; -use authz::Authorizer; +use authz::{Authorizer, IoxAuthorizer}; use clap_blocks::querier::QuerierConfig; use datafusion_util::config::register_iox_object_store; use hyper::{Body, Request, Response}; @@ -160,13 +160,18 @@ pub struct QuerierServerTypeArgs<'a> { pub exec: Arc<Executor>, pub time_provider: Arc<dyn TimeProvider>, pub querier_config: QuerierConfig, - pub authz: Option<Arc<dyn Authorizer>>, } #[derive(Debug, Error)] pub enum Error { #[error("querier error: {0}")] Querier(#[from] querier::QuerierDatabaseError), + + #[error("authz configuration error for '{addr}': '{source}'")] + AuthzConfig { + source: Box<dyn std::error::Error>, + addr: String, + }, } /// Instantiate a querier server @@ -197,6 +202,21 @@ pub async fn create_querier_server_type( ); assert!(existing.is_none()); + let authz = match &args.querier_config.authz_address { + Some(addr) => { + let authz = IoxAuthorizer::connect_lazy(addr.clone()) + .map(|c| Arc::new(c) as Arc<dyn Authorizer>) + .map_err(|source| Error::AuthzConfig { + source, + addr: addr.clone(), + })?; + authz.probe().await.expect("Authz connection test failed."); + + Some(authz) + } + None => None, + }; + let ingester_connections = if args.querier_config.ingester_addresses.is_empty() { None } else { @@ -235,6 +255,6 @@ pub async fn create_querier_server_type( querier, database, args.common_state, - args.authz.as_ref().map(Arc::clone), + authz, ))) } diff --git a/ioxd_router/src/lib.rs b/ioxd_router/src/lib.rs index 77a0caef4f..24ca699d54 100644 --- a/ioxd_router/src/lib.rs +++ b/ioxd_router/src/lib.rs @@ -4,7 +4,7 @@ use std::{ }; use async_trait::async_trait; -use authz::Authorizer; +use authz::{Authorizer, IoxAuthorizer}; use clap_blocks::router2::Router2Config; use data_types::{NamespaceName, PartitionTemplate, TemplatePart}; use hashbrown::HashMap; @@ -66,6 +66,12 @@ pub enum Error { #[error("No topic named '{topic_name}' found in the catalog")] TopicCatalogLookup { topic_name: String }, + + #[error("authz configuration error for '{addr}': '{source}'")] + AuthzConfig { + source: Box<dyn std::error::Error>, + addr: String, + }, } pub type Result<T, E = Error> = std::result::Result<T, E>; @@ -193,7 +199,6 @@ pub async fn create_router2_server_type( metrics: Arc<metric::Registry>, catalog: Arc<dyn Catalog>, object_store: Arc<DynObjectStore>, - authz: Option<Arc<dyn Authorizer>>, router_config: &Router2Config, ) -> Result<Arc<dyn ServerType>> { let ingester_connections = router_config.ingester_addresses.iter().map(|addr| { @@ -349,13 +354,35 @@ pub async fn create_router2_server_type( let handler_stack = InstrumentationDecorator::new("request", &metrics, handler_stack); // Initialize the HTTP API delegate - let write_request_unifier: Result<Box<dyn WriteRequestUnifier>> = - match (router_config.single_tenant_deployment, authz) { - (true, Some(auth)) => Ok(Box::new(SingleTenantRequestUnifier::new(auth))), - (true, None) => unreachable!("INFLUXDB_IOX_SINGLE_TENANCY is set, but could not create an authz service. Check the INFLUXDB_IOX_AUTHZ_ADDR."), - (false, None) => Ok(Box::<MultiTenantRequestUnifier>::default()), - (false, Some(_)) => unreachable!("INFLUXDB_IOX_AUTHZ_ADDR is set, but authz only exists for single_tenancy. Check the INFLUXDB_IOX_SINGLE_TENANCY."), - }; + let write_request_unifier: Result<Box<dyn WriteRequestUnifier>> = match ( + router_config.single_tenant_deployment, + &router_config.authz_address, + ) { + (true, Some(addr)) => { + let authz = IoxAuthorizer::connect_lazy(addr.clone()) + .map(|c| Arc::new(c) as Arc<dyn Authorizer>) + .map_err(|source| Error::AuthzConfig { + source, + addr: addr.clone(), + })?; + authz.probe().await.expect("Authz connection test failed."); + + Ok(Box::new(SingleTenantRequestUnifier::new(authz))) + } + (true, None) => { + // Single tenancy was requested, but no auth was provided - the + // router's clap flag parse configuration should not allow this + // combination to be accepted and therefore execution should + // never reach here. + unreachable!("INFLUXDB_IOX_SINGLE_TENANCY is set, but could not create an authz service. Check the INFLUXDB_IOX_AUTHZ_ADDR") + } + (false, None) => Ok(Box::<MultiTenantRequestUnifier>::default()), + (false, Some(_)) => { + // As above, this combination should be prevented by the + // router's clap flag parse configuration. + unreachable!("INFLUXDB_IOX_AUTHZ_ADDR is set, but authz only exists for single_tenancy. Check the INFLUXDB_IOX_SINGLE_TENANCY") + } + }; let http = HttpDelegate::new( common_state.run_config().max_http_request_size, router_config.http_request_limit,
2d592a0295f23c97d6bcfa5f3bb15545fd842237
Christopher M. Wolff
2023-04-24 10:09:22
teach parser to allow arithmetic functions (#7620)
* feat: teach parser to allow arithmetic functions * refactor: remove unnecessary lowercasing of fn names ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
feat: teach parser to allow arithmetic functions (#7620) * feat: teach parser to allow arithmetic functions * refactor: remove unnecessary lowercasing of fn names --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/influxdb_influxql_parser/src/expression/conditional.rs b/influxdb_influxql_parser/src/expression/conditional.rs index ba9b3f5ccc..831a62e829 100644 --- a/influxdb_influxql_parser/src/expression/conditional.rs +++ b/influxdb_influxql_parser/src/expression/conditional.rs @@ -3,11 +3,12 @@ use crate::expression::arithmetic::{ arithmetic, call_expression, var_ref, ArithmeticParsers, Expr, }; use crate::expression::Call; -use crate::internal::Error as InternalError; -use crate::internal::{expect, verify, ParseResult}; +use crate::functions::is_scalar_math_function; +use crate::internal::{expect, verify, Error as InternalError, ParseResult}; use crate::keywords::keyword; use crate::literal::{literal_no_regex, literal_regex, Literal}; use crate::parameter::parameter; +use crate::select::is_valid_now_call; use nom::branch::alt; use nom::bytes::complete::tag; use nom::character::complete::char; @@ -289,21 +290,23 @@ fn reduce_expr( }) } -/// Returns true if `expr` is a valid [`Expr::Call`] expression for the `now` function. -pub(crate) fn is_valid_now_call(expr: &Expr) -> bool { - match expr { - Expr::Call(Call { name, args }) => name.to_lowercase() == "now" && args.is_empty(), - _ => false, - } +/// Returns true if `expr` is a valid [`Expr::Call`] expression for condtional expressions +/// in the WHERE clause. +pub(crate) fn is_valid_conditional_call(expr: &Expr) -> bool { + is_valid_now_call(expr) + || match expr { + Expr::Call(Call { name, .. }) => is_scalar_math_function(name), + _ => false, + } } impl ConditionalExpression { /// Parse the `now()` function call fn call(i: &str) -> ParseResult<&str, Expr> { verify( - "invalid expression, the only valid function call is 'now' with no arguments", + "invalid expression, the only valid function calls are 'now' with no arguments, or scalar math functions", call_expression::<Self>, - is_valid_now_call, + is_valid_conditional_call, )(i) } } @@ -384,16 +387,20 @@ mod test { let (_, got) = arithmetic_expression("now() + 3").unwrap(); assert_eq!(got, binary_op!(call!("now"), Add, 3)); + // arithmetic functions calls are permitted + let (_, got) = arithmetic_expression("abs(f) + 3").unwrap(); + assert_eq!(got, binary_op!(call!("abs", var_ref!("f")), Add, 3)); + // Fallible cases assert_expect_error!( arithmetic_expression("sum(foo)"), - "invalid expression, the only valid function call is 'now' with no arguments" + "invalid expression, the only valid function calls are 'now' with no arguments, or scalar math functions" ); assert_expect_error!( arithmetic_expression("now(1)"), - "invalid expression, the only valid function call is 'now' with no arguments" + "invalid expression, the only valid function calls are 'now' with no arguments, or scalar math functions" ); } diff --git a/influxdb_influxql_parser/src/functions.rs b/influxdb_influxql_parser/src/functions.rs new file mode 100644 index 0000000000..b42103e9dc --- /dev/null +++ b/influxdb_influxql_parser/src/functions.rs @@ -0,0 +1,74 @@ +//! # [Functions] supported by InfluxQL +//! +//! [Functions]: https://docs.influxdata.com/influxdb/v1.8/query_language/functions/ + +use std::collections::HashSet; + +use once_cell::sync::Lazy; + +/// Returns `true` if `name` is a mathematical scalar function +/// supported by InfluxQL. +pub fn is_scalar_math_function(name: &str) -> bool { + static FUNCTIONS: Lazy<HashSet<&'static str>> = Lazy::new(|| { + HashSet::from([ + "abs", "sin", "cos", "tan", "asin", "acos", "atan", "atan2", "exp", "log", "ln", + "log2", "log10", "sqrt", "pow", "floor", "ceil", "round", + ]) + }); + + FUNCTIONS.contains(name) +} + +/// Returns `true` if `name` is an aggregate or aggregate function +/// supported by InfluxQL. +pub fn is_aggregate_function(name: &str) -> bool { + static FUNCTIONS: Lazy<HashSet<&'static str>> = Lazy::new(|| { + HashSet::from([ + // Scalar-like functions + "cumulative_sum", + "derivative", + "difference", + "elapsed", + "moving_average", + "non_negative_derivative", + "non_negative_difference", + // Selector functions + "bottom", + "first", + "last", + "max", + "min", + "percentile", + "sample", + "top", + // Aggregate functions + "count", + "integral", + "mean", + "median", + "mode", + "spread", + "stddev", + "sum", + // Prediction functions + "holt_winters", + "holt_winters_with_fit", + // Technical analysis functions + "chande_momentum_oscillator", + "exponential_moving_average", + "double_exponential_moving_average", + "kaufmans_efficiency_ratio", + "kaufmans_adaptive_moving_average", + "triple_exponential_moving_average", + "triple_exponential_derivative", + "relative_strength_index", + ]) + }); + + FUNCTIONS.contains(name) +} + +/// Returns `true` if `name` is `"now"`. +pub fn is_now_function(name: &str) -> bool { + name == "now" +} diff --git a/influxdb_influxql_parser/src/lib.rs b/influxdb_influxql_parser/src/lib.rs index 765a31d68e..58adfebb62 100644 --- a/influxdb_influxql_parser/src/lib.rs +++ b/influxdb_influxql_parser/src/lib.rs @@ -30,6 +30,7 @@ pub mod delete; pub mod drop; pub mod explain; pub mod expression; +pub mod functions; pub mod identifier; mod internal; mod keywords; diff --git a/influxdb_influxql_parser/src/select.rs b/influxdb_influxql_parser/src/select.rs index 35ea92c874..0a138e1b89 100644 --- a/influxdb_influxql_parser/src/select.rs +++ b/influxdb_influxql_parser/src/select.rs @@ -11,8 +11,8 @@ use crate::expression::arithmetic::Expr::Wildcard; use crate::expression::arithmetic::{ arithmetic, call_expression, var_ref, ArithmeticParsers, Expr, WildcardType, }; -use crate::expression::conditional::is_valid_now_call; -use crate::expression::VarRef; +use crate::expression::{Call, VarRef}; +use crate::functions::is_now_function; use crate::identifier::{identifier, Identifier}; use crate::impl_tuple_clause; use crate::internal::{expect, map_fail, verify, ParseResult}; @@ -301,6 +301,14 @@ impl ArithmeticParsers for TimeCallIntervalArgument { /// The offset argument accepts either a duration, datetime-like string or `now`. struct TimeCallOffsetArgument; +/// Returns true if `expr` is a valid [`Expr::Call`] expression for the `now` function. +pub(crate) fn is_valid_now_call(expr: &Expr) -> bool { + match expr { + Expr::Call(Call { name, args }) => is_now_function(&name.to_lowercase()) && args.is_empty(), + _ => false, + } +} + impl TimeCallOffsetArgument { /// Parse the `now()` function call fn now_call(i: &str) -> ParseResult<&str, Expr> { diff --git a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql index 90474622eb..2af3ffb2ff 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql +++ b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql @@ -233,6 +233,12 @@ SELECT f64 FROM m0 WHERE f64 >= 19.5 AND non_existent = 1; SELECT f64 FROM m0 WHERE f64 >= 19.5 AND f64 =~ /foo/; SELECT f64 FROM m0 WHERE f64 >= 19.5 OR f64 =~ /foo/; +-- arithmetic scalar function calls work +SELECT time, floor(f64) FROM m0 WHERE floor(f64) = 19.0; + +-- aggregate function calls produce an error +SELECT *, floor(f64) FROM m0 WHERE sum(f64) > 100.0; + -- -- Validate column expressions -- diff --git a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected index 196798255f..9581b93e23 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected @@ -439,6 +439,15 @@ name: m0 +---------------------+------+ | 2022-10-31T02:00:10 | 21.2 | +---------------------+------+ +-- InfluxQL: SELECT time, floor(f64) FROM m0 WHERE floor(f64) = 19.0; +name: m0 ++---------------------+-------+ +| time | floor | ++---------------------+-------+ +| 2022-10-31T02:00:30 | 19.0 | ++---------------------+-------+ +-- InfluxQL: SELECT *, floor(f64) FROM m0 WHERE sum(f64) > 100.0; +Error while planning query: Error during planning: invalid expression, the only valid function calls are 'now' with no arguments, or scalar math functions at pos 35 -- InfluxQL: SELECT tag0, f64, f64 * 0.5, f64 + str FROM m0 WHERE f64 > 19; name: m0 +---------------------+-------+------+-------+---------+ diff --git a/iox_query_influxql/src/plan/planner.rs b/iox_query_influxql/src/plan/planner.rs index b7140c3007..a3f10166bc 100644 --- a/iox_query_influxql/src/plan/planner.rs +++ b/iox_query_influxql/src/plan/planner.rs @@ -41,6 +41,9 @@ use influxdb_influxql_parser::expression::{ Binary, Call, ConditionalBinary, ConditionalExpression, ConditionalOperator, VarRef, VarRefDataType, }; +use influxdb_influxql_parser::functions::{ + is_aggregate_function, is_now_function, is_scalar_math_function, +}; use influxdb_influxql_parser::select::{ FillClause, GroupByClause, SLimitClause, SOffsetClause, TimeZoneClause, }; @@ -65,7 +68,6 @@ use iox_query::exec::IOxSessionContext; use iox_query::logical_optimizer::range_predicate::find_time_range; use itertools::Itertools; use observability_deps::tracing::debug; -use once_cell::sync::Lazy; use query_functions::selectors::{ selector_first, selector_last, selector_max, selector_min, SelectorOutput, }; @@ -1112,7 +1114,7 @@ impl<'a> InfluxQLToLogicalPlan<'a> { match ctx.scope { ExprScope::Where => { - if call.name.eq_ignore_ascii_case("now") { + if is_now_function(&call.name) { error::not_implemented("now") } else { let name = &call.name; @@ -2181,68 +2183,6 @@ pub(crate) fn find_time_column_index(fields: &[Field]) -> Option<usize> { .map(|(i, _)| i) } -/// Returns `true` if `name` is a mathematical scalar function -/// supported by InfluxQL. -pub(crate) fn is_scalar_math_function(name: &str) -> bool { - static FUNCTIONS: Lazy<HashSet<&'static str>> = Lazy::new(|| { - HashSet::from([ - "abs", "sin", "cos", "tan", "asin", "acos", "atan", "atan2", "exp", "log", "ln", - "log2", "log10", "sqrt", "pow", "floor", "ceil", "round", - ]) - }); - - FUNCTIONS.contains(name) -} - -/// Returns `true` if `name` is an aggregate or aggregate function -/// supported by InfluxQL. -fn is_aggregate_function(name: &str) -> bool { - static FUNCTIONS: Lazy<HashSet<&'static str>> = Lazy::new(|| { - HashSet::from([ - // Scalar-like functions - "cumulative_sum", - "derivative", - "difference", - "elapsed", - "moving_average", - "non_negative_derivative", - "non_negative_difference", - // Selector functions - "bottom", - "first", - "last", - "max", - "min", - "percentile", - "sample", - "top", - // Aggregate functions - "count", - "integral", - "mean", - "median", - "mode", - "spread", - "stddev", - "sum", - // Prediction functions - "holt_winters", - "holt_winters_with_fit", - // Technical analysis functions - "chande_momentum_oscillator", - "exponential_moving_average", - "double_exponential_moving_average", - "kaufmans_efficiency_ratio", - "kaufmans_adaptive_moving_average", - "triple_exponential_moving_average", - "triple_exponential_derivative", - "relative_strength_index", - ]) - }); - - FUNCTIONS.contains(name) -} - /// Returns true if the conditional expression is a single node that /// refers to the `time` column. /// diff --git a/iox_query_influxql/src/plan/planner_time_range_expression.rs b/iox_query_influxql/src/plan/planner_time_range_expression.rs index 06bbed1711..2406339bc1 100644 --- a/iox_query_influxql/src/plan/planner_time_range_expression.rs +++ b/iox_query_influxql/src/plan/planner_time_range_expression.rs @@ -4,6 +4,7 @@ use crate::plan::util::binary_operator_to_df_operator; use datafusion::common::{DataFusionError, Result, ScalarValue}; use datafusion::logical_expr::{binary_expr, lit, now, BinaryExpr, Expr as DFExpr, Operator}; use influxdb_influxql_parser::expression::{Binary, BinaryOperator, Call}; +use influxdb_influxql_parser::functions::is_now_function; use influxdb_influxql_parser::{expression::Expr, literal::Literal}; type ExprResult = Result<DFExpr>; @@ -103,7 +104,7 @@ fn reduce_expr(expr: &Expr, tz: Option<chrono_tz::Tz>) -> ExprResult { match expr { Expr::Binary(v) => reduce_binary_expr(v, tz).map_err(map_expr_err(expr)), Expr::Call (Call { name, .. }) => { - if !name.eq_ignore_ascii_case("now") { + if !is_now_function(name) { return error::query( format!("invalid function call '{name}'"), ); diff --git a/iox_query_influxql/src/plan/rewriter.rs b/iox_query_influxql/src/plan/rewriter.rs index c8a6d6bcda..b0459f5131 100644 --- a/iox_query_influxql/src/plan/rewriter.rs +++ b/iox_query_influxql/src/plan/rewriter.rs @@ -3,12 +3,12 @@ use crate::plan::expr_type_evaluator::evaluate_type; use crate::plan::field::field_name; use crate::plan::field_mapper::{field_and_dimensions, FieldTypeMap, TagSet}; -use crate::plan::planner::is_scalar_math_function; use crate::plan::{error, util, SchemaProvider}; use datafusion::common::{DataFusionError, Result}; use influxdb_influxql_parser::common::{MeasurementName, QualifiedMeasurementName}; use influxdb_influxql_parser::expression::walk::{walk_expr, walk_expr_mut}; use influxdb_influxql_parser::expression::{Call, Expr, VarRef, VarRefDataType, WildcardType}; +use influxdb_influxql_parser::functions::is_scalar_math_function; use influxdb_influxql_parser::identifier::Identifier; use influxdb_influxql_parser::literal::Literal; use influxdb_influxql_parser::select::{
530acf170c8fea40de5040027f777511c95f970b
Marco Neumann
2023-08-09 14:33:53
more automatic tests for migrations (#8457)
* test: test self-tests * test: more tests for migrations
null
test: more automatic tests for migrations (#8457) * test: test self-tests * test: more tests for migrations
diff --git a/iox_catalog/src/migrate.rs b/iox_catalog/src/migrate.rs index 74913c0c84..51c7e9f17a 100644 --- a/iox_catalog/src/migrate.rs +++ b/iox_catalog/src/migrate.rs @@ -872,6 +872,105 @@ ORDER BY pg_class.relname } } +/// Testing tools for migrations. +#[cfg(test)] +pub mod test_utils { + use super::*; + + use std::future::Future; + + /// Test migration. + /// + /// This runs the migrations to check if they pass. The given factory must provide an empty schema (i.e. w/o any + /// migrations applied). + /// + /// # Tests + /// This tests that: + /// + /// - **run once:** All migrations work when ran once. + /// - **idempotency:** Migrations marked as [`idempotent`](IOxMigration::idempotent) can be executed twice. + /// + /// # Error + /// Fails if this finds a bug. + pub async fn test_migration<Factory, FactoryFut, Pool>( + migrator: &IOxMigrator, + factory: Factory, + ) -> Result<(), MigrateError> + where + Factory: (Fn() -> FactoryFut) + Send + Sync, + FactoryFut: Future<Output = Pool> + Send, + Pool: Send, + for<'a> &'a Pool: Acquire<'a> + Send, + for<'a> <<&'a Pool as Acquire<'a>>::Connection as Deref>::Target: IOxMigrate, + { + { + info!("test: run all migrations"); + let conn = factory().await; + let applied = migrator.run(&conn).await?; + assert_eq!(applied.len(), migrator.migrations.len()); + } + + info!("interrupt non-transaction migrations"); + for (idx_m, m) in migrator.migrations.iter().enumerate() { + if m.single_transaction() { + info!( + version = m.version, + "skip migration because single transaction property" + ); + continue; + } + + let steps = m.steps.len(); + info!( + version = m.version, + steps, "found non-transactional migration" + ); + + for step in 1..(steps + 1) { + info!(version = m.version, steps, step, "test: die after step"); + + let broken_cmd = "iox_this_is_a_broken_test_cmd"; + let migrator_broken = IOxMigrator::try_new( + migrator + .migrations + .iter() + .take(idx_m) + .cloned() + .chain(std::iter::once(IOxMigration { + steps: m + .steps + .iter() + .take(step) + .cloned() + .chain(std::iter::once(IOxMigrationStep::SqlStatement { + sql: broken_cmd.into(), + in_transaction: false, + })) + .collect(), + ..m.clone() + })), + ) + .expect("bug in test"); + + let conn = factory().await; + let err = migrator_broken.run(&conn).await.unwrap_err(); + if !err.to_string().contains(broken_cmd) { + panic!("migrator broke in expected way, bug in test setup: {err}"); + } + + info!( + version = m.version, + steps, step, "test: die after step, recover from error" + ); + let applied = migrator.run(&conn).await?; + assert!(applied.contains(&m.version)); + } + } + + Ok(()) + } +} + #[cfg(test)] mod tests { use super::*; @@ -2064,6 +2163,185 @@ mod tests { ); } + #[tokio::test] + async fn test_migrator_bug_selftest_multiple_dirty_migrations() { + maybe_skip_integration!(); + let mut conn = setup().await; + let conn = &mut *conn; + + let migrator = IOxMigrator::try_new([ + IOxMigration { + version: 1, + description: "".into(), + steps: [].into(), + checksum: [1, 2, 3].into(), + other_compatible_checksums: [].into(), + }, + IOxMigration { + version: 2, + description: "".into(), + steps: [].into(), + checksum: [4, 5, 6].into(), + other_compatible_checksums: [].into(), + }, + ]) + .unwrap(); + + migrator.run_direct(conn).await.unwrap(); + + conn.execute("UPDATE _sqlx_migrations SET success = FALSE;") + .await + .unwrap(); + + let err = migrator.run_direct(conn).await.unwrap_err(); + + assert_eq!( + err.to_string(), + "while resolving migrations: there are multiple dirty versions, this should not happen and is considered a bug: [1, 2]", + ); + } + + #[tokio::test] + async fn test_migrator_bug_selftest_applied_after_dirty() { + maybe_skip_integration!(); + let mut conn = setup().await; + let conn = &mut *conn; + + let migrator = IOxMigrator::try_new([ + IOxMigration { + version: 1, + description: "".into(), + steps: [].into(), + checksum: [1, 2, 3].into(), + other_compatible_checksums: [].into(), + }, + IOxMigration { + version: 2, + description: "".into(), + steps: [].into(), + checksum: [4, 5, 6].into(), + other_compatible_checksums: [].into(), + }, + ]) + .unwrap(); + + migrator.run_direct(conn).await.unwrap(); + + conn.execute("UPDATE _sqlx_migrations SET success = FALSE WHERE version = 1;") + .await + .unwrap(); + + let err = migrator.run_direct(conn).await.unwrap_err(); + + assert_eq!( + err.to_string(), + "while resolving migrations: dirty version (1) is not the last applied version (2), this is a bug", + ); + } + + #[tokio::test] + async fn test_tester_finds_invalid_migration() { + maybe_skip_integration!(); + + let migrator = IOxMigrator::try_new([IOxMigration { + version: 1, + description: "".into(), + steps: [IOxMigrationStep::SqlStatement { + sql: "foo".into(), + in_transaction: true, + }] + .into(), + checksum: [1, 2, 3].into(), + other_compatible_checksums: [].into(), + }]) + .unwrap(); + + let err = test_utils::test_migration(&migrator, setup_pool) + .await + .unwrap_err(); + + assert_eq!( + err.to_string(), + "while executing migrations: error returned from database: syntax error at or near \"foo\"", + ); + } + + #[tokio::test] + async fn test_tester_finds_non_idempotent_migration_package() { + maybe_skip_integration!(); + + let migrator = IOxMigrator::try_new([IOxMigration { + version: 1, + description: "".into(), + steps: [IOxMigrationStep::SqlStatement { + sql: "CREATE TABLE t (x INT);".into(), + // do NOT run this in a transaction, otherwise this is automatically idempotent + in_transaction: false, + }] + .into(), + checksum: [1, 2, 3].into(), + other_compatible_checksums: [].into(), + }]) + .unwrap(); + + let err = test_utils::test_migration(&migrator, setup_pool) + .await + .unwrap_err(); + + assert_eq!( + err.to_string(), + "while executing migrations: error returned from database: relation \"t\" already exists", + ); + } + + #[tokio::test] + async fn test_tester_finds_non_idempotent_migration_step() { + maybe_skip_integration!(); + + let migrator = IOxMigrator::try_new([ + IOxMigration { + version: 1, + description: "".into(), + steps: [IOxMigrationStep::SqlStatement { + sql: "CREATE TABLE t (x INT);".into(), + in_transaction: true, + }] + .into(), + checksum: [1, 2, 3].into(), + other_compatible_checksums: [].into(), + }, + IOxMigration { + version: 2, + description: "".into(), + steps: [ + IOxMigrationStep::SqlStatement { + sql: "DROP TABLE t;".into(), + // do NOT run this in a transaction, otherwise this is automatically idempotent + in_transaction: false, + }, + IOxMigrationStep::SqlStatement { + sql: "CREATE TABLE t (x INT);".into(), + // do NOT run this in a transaction, otherwise this is automatically idempotent + in_transaction: false, + }, + ] + .into(), + checksum: [4, 5, 6].into(), + other_compatible_checksums: [].into(), + }, + ]) + .unwrap(); + + let err = test_utils::test_migration(&migrator, setup_pool) + .await + .unwrap_err(); + + assert_eq!( + err.to_string(), + "while executing migrations: error returned from database: table \"t\" does not exist", + ); + } + async fn setup_pool() -> HotSwapPool<Postgres> { maybe_start_logging(); diff --git a/iox_catalog/src/postgres.rs b/iox_catalog/src/postgres.rs index fdcf41979a..88148b2936 100644 --- a/iox_catalog/src/postgres.rs +++ b/iox_catalog/src/postgres.rs @@ -2067,6 +2067,20 @@ mod tests { postgres.setup().await.unwrap(); } + #[tokio::test] + async fn test_migration_generic() { + use crate::migrate::test_utils::test_migration; + + maybe_skip_integration!(); + maybe_start_logging(); + + test_migration(&MIGRATOR, || async { + setup_db_no_migration().await.into_pool() + }) + .await + .unwrap(); + } + #[tokio::test] async fn test_catalog() { maybe_skip_integration!();
37219af9d44e503e951e6049029f139a0d4e6c9f
Trevor Hilton
2024-12-10 06:32:15
track parquet cache metrics (#25632)
* feat: parquet cache metrics * feat: track parquet cache metrics Adds metrics to track the following in the in-memory parquet cache: * cache size in bytes (also included a fix in the calculation of that) * cache size in n files * cache hits * cache misses * cache misses while the oracle is fetching a file A test was added to check this functionality * refactor: clean up logic and fix cache removal tracking error Some logic and naming was cleaned up and the boolean to optionally track metrics on entry removal was removed, as it was incorrect in the first place: a fetching entry still has a size, which counts toward the size of the cache. So, this makes is such that anytime an entry is removed, whether its state is success or fetching, its size will be decremented from the cache size metrics. The sizing caclulations were made to be correct, and the cache metrics test was updated with more thurough assertions
null
feat: track parquet cache metrics (#25632) * feat: parquet cache metrics * feat: track parquet cache metrics Adds metrics to track the following in the in-memory parquet cache: * cache size in bytes (also included a fix in the calculation of that) * cache size in n files * cache hits * cache misses * cache misses while the oracle is fetching a file A test was added to check this functionality * refactor: clean up logic and fix cache removal tracking error Some logic and naming was cleaned up and the boolean to optionally track metrics on entry removal was removed, as it was incorrect in the first place: a fetching entry still has a size, which counts toward the size of the cache. So, this makes is such that anytime an entry is removed, whether its state is success or fetching, its size will be decremented from the cache size metrics. The sizing caclulations were made to be correct, and the cache metrics test was updated with more thurough assertions
diff --git a/Cargo.lock b/Cargo.lock index fa6a782c96..29b213b86e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3105,6 +3105,7 @@ dependencies = [ "influxdb3_write", "insta", "iox_time", + "metric", "object_store", "observability_deps", "parking_lot", diff --git a/influxdb3/src/commands/serve.rs b/influxdb3/src/commands/serve.rs index 4144facfca..dcbcf847a9 100644 --- a/influxdb3/src/commands/serve.rs +++ b/influxdb3/src/commands/serve.rs @@ -393,6 +393,7 @@ pub async fn command(config: Config) -> Result<()> { let (object_store, parquet_cache) = create_cached_obj_store_and_oracle( object_store, Arc::clone(&time_provider) as _, + Arc::clone(&metrics), config.parquet_mem_cache_size.as_num_bytes(), config.parquet_mem_cache_prune_percentage.into(), config.parquet_mem_cache_prune_interval.into(), diff --git a/influxdb3_cache/Cargo.toml b/influxdb3_cache/Cargo.toml index 3d7d28788f..20203a0774 100644 --- a/influxdb3_cache/Cargo.toml +++ b/influxdb3_cache/Cargo.toml @@ -8,6 +8,7 @@ license.workspace = true [dependencies] # Core Crates iox_time.workspace = true +metric.workspace = true observability_deps.workspace = true schema.workspace = true diff --git a/influxdb3_cache/src/parquet_cache/metrics.rs b/influxdb3_cache/src/parquet_cache/metrics.rs new file mode 100644 index 0000000000..a5ab6d03ba --- /dev/null +++ b/influxdb3_cache/src/parquet_cache/metrics.rs @@ -0,0 +1,79 @@ +use metric::{Registry, U64Counter, U64Gauge}; + +#[derive(Debug)] +pub(super) struct AccessMetrics { + cache_hits: U64Counter, + cache_misses: U64Counter, + cache_misses_while_fetching: U64Counter, +} + +pub(super) const CACHE_ACCESS_NAME: &str = "influxdb3_parquet_cache_access"; + +impl AccessMetrics { + pub(super) fn new(metric_registry: &Registry) -> Self { + let m_access = metric_registry.register_metric::<U64Counter>( + CACHE_ACCESS_NAME, + "track accesses to the in-memory parquet cache", + ); + let cache_hits = m_access.recorder(&[("status", "cached")]); + let cache_misses = m_access.recorder(&[("status", "miss")]); + let cache_misses_while_fetching = m_access.recorder(&[("status", "miss_while_fetching")]); + Self { + cache_hits, + cache_misses, + cache_misses_while_fetching, + } + } + + pub(super) fn record_cache_hit(&self) { + self.cache_hits.inc(1); + } + + pub(super) fn record_cache_miss(&self) { + self.cache_misses.inc(1); + } + + pub(super) fn record_cache_miss_while_fetching(&self) { + self.cache_misses_while_fetching.inc(1); + } +} + +#[derive(Debug)] +pub(super) struct SizeMetrics { + cache_size_bytes: U64Gauge, + cache_size_n_files: U64Gauge, +} + +pub(super) const CACHE_SIZE_BYTES_NAME: &str = "influxdb3_parquet_cache_size_bytes"; +pub(super) const CACHE_SIZE_N_FILES_NAME: &str = "influxdb3_parquet_cache_size_number_of_files"; + +impl SizeMetrics { + pub(super) fn new(metric_registry: &Registry) -> Self { + let cache_size_bytes = metric_registry + .register_metric::<U64Gauge>( + CACHE_SIZE_BYTES_NAME, + "track size of in-memory parquet cache", + ) + .recorder(&[]); + let cache_size_n_files = metric_registry + .register_metric::<U64Gauge>( + CACHE_SIZE_N_FILES_NAME, + "track number of files in the in-memory parquet cache", + ) + .recorder(&[]); + Self { + cache_size_bytes, + cache_size_n_files, + } + } + + pub(super) fn record_file_additions(&self, size_bytes: u64, n_files: u64) { + self.cache_size_bytes.inc(size_bytes); + self.cache_size_n_files.inc(n_files); + } + + pub(super) fn record_file_deletions(&self, total_size_bytes: u64, n_files: u64) { + self.cache_size_bytes.dec(total_size_bytes); + self.cache_size_n_files.dec(n_files); + } +} diff --git a/influxdb3_cache/src/parquet_cache/mod.rs b/influxdb3_cache/src/parquet_cache/mod.rs index d83600cd77..96ced31e2e 100644 --- a/influxdb3_cache/src/parquet_cache/mod.rs +++ b/influxdb3_cache/src/parquet_cache/mod.rs @@ -20,6 +20,8 @@ use futures::{ FutureExt, StreamExt, TryStreamExt, }; use iox_time::TimeProvider; +use metric::Registry; +use metrics::{AccessMetrics, SizeMetrics}; use object_store::{ path::Path, Error, GetOptions, GetResult, GetResultPayload, ListResult, MultipartUpload, ObjectMeta, ObjectStore, PutMultipartOpts, PutOptions, PutPayload, PutResult, @@ -30,6 +32,8 @@ use tokio::sync::{ oneshot, watch, }; +mod metrics; + /// Shared future type for cache values that are being fetched type SharedCacheValueFuture = Shared<BoxFuture<'static, Result<Arc<CacheValue>, DynError>>>; @@ -119,16 +123,18 @@ impl ParquetCacheOracle for MemCacheOracle { pub fn create_cached_obj_store_and_oracle( object_store: Arc<dyn ObjectStore>, time_provider: Arc<dyn TimeProvider>, + metric_registry: Arc<Registry>, cache_capacity: usize, prune_percent: f64, prune_interval: Duration, ) -> (Arc<dyn ObjectStore>, Arc<dyn ParquetCacheOracle>) { - let store = Arc::new(MemCachedObjectStore::new( - object_store, - cache_capacity, + let store = Arc::new(MemCachedObjectStore::new(MemCachedObjectStoreArgs { time_provider, + metric_registry, + inner: object_store, + memory_capacity: cache_capacity, prune_percent, - )); + })); let oracle = Arc::new(MemCacheOracle::new(Arc::clone(&store), prune_interval)); (store, oracle) } @@ -137,10 +143,12 @@ pub fn create_cached_obj_store_and_oracle( pub fn test_cached_obj_store_and_oracle( object_store: Arc<dyn ObjectStore>, time_provider: Arc<dyn TimeProvider>, + metric_registry: Arc<Registry>, ) -> (Arc<dyn ObjectStore>, Arc<dyn ParquetCacheOracle>) { create_cached_obj_store_and_oracle( object_store, time_provider, + metric_registry, 1024 * 1024 * 1024, 0.1, Duration::from_millis(10), @@ -259,17 +267,28 @@ struct Cache { map: DashMap<Path, CacheEntry>, /// Provides timestamps for updating the hit time of each cache entry time_provider: Arc<dyn TimeProvider>, + /// Track metrics for observing accesses to the cache + access_metrics: AccessMetrics, + /// Track metrics for observing the size of the cache + size_metrics: SizeMetrics, } impl Cache { /// Create a new cache with a given capacity and prune percent - fn new(capacity: usize, prune_percent: f64, time_provider: Arc<dyn TimeProvider>) -> Self { + fn new( + capacity: usize, + prune_percent: f64, + time_provider: Arc<dyn TimeProvider>, + metric_registry: Arc<Registry>, + ) -> Self { Self { capacity, used: AtomicUsize::new(0), prune_percent, map: DashMap::new(), time_provider, + access_metrics: AccessMetrics::new(&metric_registry), + size_metrics: SizeMetrics::new(&metric_registry), } } @@ -278,11 +297,17 @@ impl Cache { /// This updates the hit time of the entry and returns a cloned copy of the entry state so that /// the reference into the map is dropped fn get(&self, path: &Path) -> Option<CacheEntryState> { - let entry = self.map.get(path)?; + let Some(entry) = self.map.get(path) else { + self.access_metrics.record_cache_miss(); + return None; + }; if entry.is_success() { + self.access_metrics.record_cache_hit(); entry .hit_time .store(self.time_provider.now().timestamp_nanos(), Ordering::SeqCst); + } else if entry.is_fetching() { + self.access_metrics.record_cache_miss_while_fetching(); } Some(entry.state.clone()) } @@ -303,6 +328,8 @@ impl Cache { hit_time: AtomicI64::new(self.time_provider.now().timestamp_nanos()), }; let additional = entry.size(); + self.size_metrics + .record_file_additions(additional as u64, 1); self.map.insert(path.clone(), entry); self.used.fetch_add(additional, Ordering::SeqCst); } @@ -318,13 +345,16 @@ impl Cache { // treated as immutable, this should be okay. bail!("attempted to store value in non-fetching cache entry"); } + let current_size = entry.size(); entry.state = CacheEntryState::Success(value); entry .hit_time .store(self.time_provider.now().timestamp_nanos(), Ordering::SeqCst); // TODO(trevor): what if size is greater than cache capacity? - let additional = entry.size(); - self.used.fetch_add(additional, Ordering::SeqCst); + let additional_bytes = entry.size() - current_size; + self.size_metrics + .record_file_additions(additional_bytes as u64, 0); + self.used.fetch_add(additional_bytes, Ordering::SeqCst); Ok(()) } Entry::Vacant(_) => bail!("attempted to set success state on an empty cache entry"), @@ -336,7 +366,10 @@ impl Cache { let Some((_, entry)) = self.map.remove(path) else { return; }; - self.used.fetch_sub(entry.state.size(), Ordering::SeqCst); + let removed_bytes = entry.size(); + self.size_metrics + .record_file_deletions(removed_bytes as u64, 1); + self.used.fetch_sub(removed_bytes, Ordering::SeqCst); } /// Prune least recently hit entries from the cache @@ -378,11 +411,14 @@ impl Cache { // track the total size of entries that get freed: let mut freed = 0; + let n_files = prune_heap.len() as u64; // drop entries with hit times before the cut-off: for item in prune_heap { self.map.remove(&Path::from(item.path_ref.as_ref())); freed += item.size; } + self.size_metrics + .record_file_deletions(freed as u64, n_files); // update used mem size with freed amount: self.used.fetch_sub(freed, Ordering::SeqCst); @@ -430,17 +466,34 @@ pub struct MemCachedObjectStore { cache: Arc<Cache>, } +#[derive(Debug)] +pub struct MemCachedObjectStoreArgs { + pub time_provider: Arc<dyn TimeProvider>, + pub metric_registry: Arc<Registry>, + pub inner: Arc<dyn ObjectStore>, + pub memory_capacity: usize, + pub prune_percent: f64, +} + impl MemCachedObjectStore { /// Create a new [`MemCachedObjectStore`] fn new( - inner: Arc<dyn ObjectStore>, - memory_capacity: usize, - time_provider: Arc<dyn TimeProvider>, - prune_percent: f64, + MemCachedObjectStoreArgs { + time_provider, + metric_registry, + inner, + memory_capacity, + prune_percent, + }: MemCachedObjectStoreArgs, ) -> Self { Self { inner, - cache: Arc::new(Cache::new(memory_capacity, prune_percent, time_provider)), + cache: Arc::new(Cache::new( + memory_capacity, + prune_percent, + time_provider, + metric_registry, + )), } } } @@ -707,13 +760,16 @@ pub(crate) mod tests { RequestCountedObjectStore, SynchronizedObjectStore, }; use iox_time::{MockProvider, Time, TimeProvider}; + use metric::{Attributes, Metric, Registry, U64Counter, U64Gauge}; use object_store::{memory::InMemory, path::Path, ObjectStore, PutPayload}; use pretty_assertions::assert_eq; use tokio::sync::Notify; use crate::parquet_cache::{ - create_cached_obj_store_and_oracle, test_cached_obj_store_and_oracle, CacheRequest, + create_cached_obj_store_and_oracle, + metrics::{CACHE_ACCESS_NAME, CACHE_SIZE_BYTES_NAME, CACHE_SIZE_N_FILES_NAME}, + test_cached_obj_store_and_oracle, CacheRequest, }; macro_rules! assert_payload_at_equals { @@ -741,6 +797,7 @@ pub(crate) mod tests { let (cached_store, oracle) = test_cached_obj_store_and_oracle( Arc::clone(&inner_store) as _, Arc::clone(&time_provider), + Default::default(), ); // PUT a paylaod into the object store through the outer mem cached store: let path = Path::from("0.parquet"); @@ -783,6 +840,7 @@ pub(crate) mod tests { let (cached_store, oracle) = create_cached_obj_store_and_oracle( Arc::clone(&inner_store) as _, Arc::clone(&time_provider) as _, + Default::default(), cache_capacity_bytes, cache_prune_percent, cache_prune_interval, @@ -906,6 +964,7 @@ pub(crate) mod tests { let (cached_store, oracle) = test_cached_obj_store_and_oracle( Arc::clone(&inner_store) as _, Arc::clone(&time_provider) as _, + Default::default(), ); // PUT an entry into the store: @@ -946,4 +1005,182 @@ pub(crate) mod tests { assert_payload_at_equals!(cached_store, payload, path); assert_eq!(1, counter.total_read_request_count(&path)); } + + struct MetricVerifier { + access_metrics: Metric<U64Counter>, + size_mb_metrics: Metric<U64Gauge>, + size_n_files_metrics: Metric<U64Gauge>, + } + + impl MetricVerifier { + fn new(metric_registry: Arc<Registry>) -> Self { + let access_metrics = metric_registry + .get_instrument::<Metric<U64Counter>>(CACHE_ACCESS_NAME) + .unwrap(); + let size_mb_metrics = metric_registry + .get_instrument::<Metric<U64Gauge>>(CACHE_SIZE_BYTES_NAME) + .unwrap(); + let size_n_files_metrics = metric_registry + .get_instrument::<Metric<U64Gauge>>(CACHE_SIZE_N_FILES_NAME) + .unwrap(); + Self { + access_metrics, + size_mb_metrics, + size_n_files_metrics, + } + } + + fn assert_access( + &self, + hits_expected: u64, + misses_expected: u64, + misses_while_fetching_expected: u64, + ) { + let hits_actual = self + .access_metrics + .get_observer(&Attributes::from(&[("status", "cached")])) + .unwrap() + .fetch(); + let misses_actual = self + .access_metrics + .get_observer(&Attributes::from(&[("status", "miss")])) + .unwrap() + .fetch(); + let misses_while_fetching_actual = self + .access_metrics + .get_observer(&Attributes::from(&[("status", "miss_while_fetching")])) + .unwrap() + .fetch(); + assert_eq!( + hits_actual, hits_expected, + "cache hits did not match expectation" + ); + assert_eq!( + misses_actual, misses_expected, + "cache misses did not match expectation" + ); + assert_eq!( + misses_while_fetching_actual, misses_while_fetching_expected, + "cache misses while fetching did not match expectation" + ); + } + + fn assert_size(&self, size_bytes_expected: u64, size_n_files_expected: u64) { + let size_bytes_actual = self + .size_mb_metrics + .get_observer(&Attributes::from(&[])) + .unwrap() + .fetch(); + let size_n_files_actual = self + .size_n_files_metrics + .get_observer(&Attributes::from(&[])) + .unwrap() + .fetch(); + assert_eq!( + size_bytes_actual, size_bytes_expected, + "cache size in bytes did not match actual" + ); + assert_eq!( + size_n_files_actual, size_n_files_expected, + "cache size in number of files did not match actual" + ); + } + } + + #[tokio::test] + async fn cache_metrics() { + // test setup + let to_store_notify = Arc::new(Notify::new()); + let from_store_notify = Arc::new(Notify::new()); + let counted_store = Arc::new(RequestCountedObjectStore::new(Arc::new(InMemory::new()))); + let inner_store = Arc::new( + SynchronizedObjectStore::new(Arc::clone(&counted_store) as _) + .with_get_notifies(Arc::clone(&to_store_notify), Arc::clone(&from_store_notify)), + ); + let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))); + let metric_registry = Arc::new(Registry::new()); + let (cached_store, oracle) = test_cached_obj_store_and_oracle( + Arc::clone(&inner_store) as _, + Arc::clone(&time_provider) as _, + Arc::clone(&metric_registry), + ); + let metric_verifier = MetricVerifier::new(metric_registry); + + // put something in the object store: + let path = Path::from("0.parquet"); + let payload = b"Janeway"; + cached_store + .put(&path, PutPayload::from_static(payload)) + .await + .unwrap(); + + // spin off a task to make a request to the object store on a separate thread. We will drive + // the notifiers from here, as we just need the request to go through to register a cache + // miss. + let cached_store_cloned = Arc::clone(&cached_store); + let path_cloned = path.clone(); + let h = tokio::spawn(async move { + assert_payload_at_equals!(cached_store_cloned, payload, path_cloned); + }); + + // drive the synchronized store using the notifiers: + from_store_notify.notified().await; + to_store_notify.notify_one(); + h.await.unwrap(); + + // check that there is a single cache miss: + metric_verifier.assert_access(0, 1, 0); + // nothing in the cache so sizes are 0 + metric_verifier.assert_size(0, 0); + + // there should be a single request made to the inner counted store from above: + assert_eq!(1, counted_store.total_read_request_count(&path)); + + // have the cache oracle cache the object: + let (cache_request, notifier_rx) = CacheRequest::create(path.clone()); + oracle.register(cache_request); + + // we are in the middle of a get request, i.e., the cache entry is "fetching" once this + // call to notified wakes: + let _ = from_store_notify.notified().await; + // just a fetching entry in the cache, so it will have n files of 1 and 8 bytes for the atomic + // i64 + metric_verifier.assert_size(8, 1); + + // spawn a thread to wake the in-flight get request initiated by the cache oracle after we + // have started a get request below, such that the get request below hits the cache while + // the entry is still in the "fetching" state: + let h = tokio::spawn(async move { + to_store_notify.notify_one(); + let _ = notifier_rx.await; + }); + + // make the request to the store, which hits the cache in the "fetching" state since we + // haven't made the call to notify the store to continue yet: + assert_payload_at_equals!(cached_store, payload, path); + + // check that there is a single miss while fetching, note, the metrics are cumulative, so + // the original miss is still there: + metric_verifier.assert_access(0, 1, 1); + + // drive the task to completion to ensure that the cache request has been fulfilled: + h.await.unwrap(); + + // there should only have been two requests made, i.e., one from the request before the + // object was cached, and one from the cache oracle: + assert_eq!(2, counted_store.total_read_request_count(&path)); + + // make another request, this time, it should use the cache: + assert_payload_at_equals!(cached_store, payload, path); + + // there have now been one of each access metric: + metric_verifier.assert_access(1, 1, 1); + // now the cache has a the full entry, which includes the atomic i64, the metadata, and + // the payload itself: + metric_verifier.assert_size(25, 1); + + cached_store.delete(&path).await.unwrap(); + // removing the entry should bring the cache sizes back to zero: + metric_verifier.assert_size(0, 0); + } } diff --git a/influxdb3_server/src/lib.rs b/influxdb3_server/src/lib.rs index b31f0c1b0c..e7559cb07e 100644 --- a/influxdb3_server/src/lib.rs +++ b/influxdb3_server/src/lib.rs @@ -768,8 +768,11 @@ mod tests { let metrics = Arc::new(metric::Registry::new()); let object_store: Arc<DynObjectStore> = Arc::new(object_store::memory::InMemory::new()); let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(start_time))); - let (object_store, parquet_cache) = - test_cached_obj_store_and_oracle(object_store, Arc::clone(&time_provider) as _); + let (object_store, parquet_cache) = test_cached_obj_store_and_oracle( + object_store, + Arc::clone(&time_provider) as _, + Default::default(), + ); let parquet_store = ParquetStorage::new(Arc::clone(&object_store), StorageId::from("influxdb3")); let exec = Arc::new(Executor::new_with_config_and_executor( diff --git a/influxdb3_server/src/query_executor.rs b/influxdb3_server/src/query_executor.rs index 625ec0090c..b75c7579ab 100644 --- a/influxdb3_server/src/query_executor.rs +++ b/influxdb3_server/src/query_executor.rs @@ -672,8 +672,11 @@ mod tests { let object_store: Arc<dyn ObjectStore> = Arc::new(LocalFileSystem::new_with_prefix(test_helpers::tmp_dir().unwrap()).unwrap()); let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))); - let (object_store, parquet_cache) = - test_cached_obj_store_and_oracle(object_store, Arc::clone(&time_provider) as _); + let (object_store, parquet_cache) = test_cached_obj_store_and_oracle( + object_store, + Arc::clone(&time_provider) as _, + Default::default(), + ); let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host")); let exec = make_exec(Arc::clone(&object_store)); let host_id = Arc::from("sample-host-id"); diff --git a/influxdb3_write/src/write_buffer/mod.rs b/influxdb3_write/src/write_buffer/mod.rs index 43a7522c9b..081f8a0614 100644 --- a/influxdb3_write/src/write_buffer/mod.rs +++ b/influxdb3_write/src/write_buffer/mod.rs @@ -750,8 +750,11 @@ mod tests { let object_store: Arc<dyn ObjectStore> = Arc::new(InMemory::new()); let time_provider: Arc<dyn TimeProvider> = Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))); - let (object_store, parquet_cache) = - test_cached_obj_store_and_oracle(object_store, Arc::clone(&time_provider)); + let (object_store, parquet_cache) = test_cached_obj_store_and_oracle( + object_store, + Arc::clone(&time_provider), + Default::default(), + ); let persister = Arc::new(Persister::new(Arc::clone(&object_store), "test_host")); let catalog = Arc::new(persister.load_or_create_catalog().await.unwrap()); let last_cache = LastCacheProvider::new_from_catalog(Arc::clone(&catalog) as _).unwrap(); @@ -2183,8 +2186,11 @@ mod tests { ) -> (WriteBufferImpl, IOxSessionContext, Arc<dyn TimeProvider>) { let time_provider: Arc<dyn TimeProvider> = Arc::new(MockProvider::new(start)); let (object_store, parquet_cache) = if use_cache { - let (object_store, parquet_cache) = - test_cached_obj_store_and_oracle(object_store, Arc::clone(&time_provider)); + let (object_store, parquet_cache) = test_cached_obj_store_and_oracle( + object_store, + Arc::clone(&time_provider), + Default::default(), + ); (object_store, Some(parquet_cache)) } else { (object_store, None)
1f5294c0967d5b32914f07f9ce39817d21cb5b6d
Dom Dwyer
2022-12-20 17:00:30
persistence system integration test
This test ensures the persistence system as a whole works in the happy path.
null
test: persistence system integration test This test ensures the persistence system as a whole works in the happy path.
diff --git a/ingester2/src/persist/mod.rs b/ingester2/src/persist/mod.rs index aa3bff37f0..d41a0b95ab 100644 --- a/ingester2/src/persist/mod.rs +++ b/ingester2/src/persist/mod.rs @@ -6,3 +6,225 @@ pub(crate) mod handle; pub(crate) mod hot_partitions; pub mod queue; mod worker; + +#[cfg(test)] +mod tests { + use std::{sync::Arc, time::Duration}; + + use assert_matches::assert_matches; + use data_types::{CompactionLevel, ParquetFile, PartitionKey, ShardId}; + use dml::DmlOperation; + use futures::TryStreamExt; + use iox_catalog::{ + interface::{get_schema_by_id, Catalog}, + mem::MemCatalog, + validate_or_insert_schema, + }; + use iox_query::exec::Executor; + use lazy_static::lazy_static; + use object_store::{memory::InMemory, ObjectMeta, ObjectStore}; + use parking_lot::Mutex; + use parquet_file::storage::{ParquetStorage, StorageId}; + use test_helpers::{maybe_start_logging, timeout::FutureTimeout}; + + use crate::{ + buffer_tree::{ + namespace::name_resolver::mock::MockNamespaceNameProvider, + partition::{resolver::CatalogPartitionResolver, PartitionData, SortKeyState}, + post_write::mock::MockPostWriteObserver, + table::name_resolver::mock::MockTableNameProvider, + BufferTree, + }, + dml_sink::DmlSink, + ingest_state::IngestState, + persist::queue::PersistQueue, + test_util::{make_write_op, populate_catalog}, + TRANSITION_SHARD_INDEX, + }; + + use super::handle::PersistHandle; + + const TABLE_NAME: &str = "bananas"; + const NAMESPACE_NAME: &str = "platanos"; + const TRANSITION_SHARD_ID: ShardId = ShardId::new(84); + + lazy_static! { + static ref EXEC: Arc<Executor> = Arc::new(Executor::new_testing()); + static ref PARTITION_KEY: PartitionKey = PartitionKey::from("bananas"); + } + + /// Generate a [`PartitionData`] containing one write, and populate the + /// catalog such that the schema is set (by validating the schema) and the + /// partition entry exists (by driving the buffer tree to create it). + async fn partition_with_write(catalog: Arc<dyn Catalog>) -> Arc<Mutex<PartitionData>> { + // Create the namespace in the catalog and it's the schema + let (_shard_id, namespace_id, table_id) = populate_catalog( + &*catalog, + TRANSITION_SHARD_INDEX, + NAMESPACE_NAME, + TABLE_NAME, + ) + .await; + + // Init the buffer tree + let buf = BufferTree::new( + Arc::new(MockNamespaceNameProvider::default()), + Arc::new(MockTableNameProvider::new(TABLE_NAME)), + Arc::new(CatalogPartitionResolver::new(Arc::clone(&catalog))), + Arc::new(MockPostWriteObserver::default()), + Arc::new(metric::Registry::default()), + TRANSITION_SHARD_ID, + ); + + let write = make_write_op( + &PARTITION_KEY, + namespace_id, + TABLE_NAME, + table_id, + 0, + r#"bananas,region=Asturias temp=35 4242424242"#, + ); + + let mut repos = catalog + .repositories() + .with_timeout_panic(Duration::from_secs(1)) + .await; + + let schema = get_schema_by_id(namespace_id, &mut *repos) + .await + .expect("failed to find namespace schema"); + + // Insert the schema elements into the catalog + validate_or_insert_schema( + write.tables().map(|(_id, data)| (TABLE_NAME, data)), + &schema, + &mut *repos, + ) + .await + .expect("populating schema elements failed"); + + drop(repos); // Don't you love this testing-only deadlock bug? #3859 + + // Apply the write + buf.apply(DmlOperation::Write(write)) + .await + .expect("failed to apply write to buffer"); + + // And finally return the namespace + buf.partitions().next().unwrap() + } + + /// A complete integration test of the persistence system components. + #[tokio::test] + async fn test_persist_integration() { + maybe_start_logging(); + + let object_storage: Arc<dyn ObjectStore> = Arc::new(InMemory::default()); + let storage = ParquetStorage::new(Arc::clone(&object_storage), StorageId::from("iox")); + let metrics = Arc::new(metric::Registry::default()); + let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metrics))); + let ingest_state = Arc::new(IngestState::default()); + + // Initialise the persist system. + let handle = PersistHandle::new( + 1, + 2, + Arc::clone(&ingest_state), + Arc::clone(&EXEC), + storage, + Arc::clone(&catalog), + &metrics, + ); + assert!(ingest_state.read().is_ok()); + + // Generate a partition with data + let partition = partition_with_write(Arc::clone(&catalog)).await; + let table_id = partition.lock().table_id(); + let partition_id = partition.lock().partition_id(); + let namespace_id = partition.lock().namespace_id(); + assert_matches!(partition.lock().sort_key(), SortKeyState::Provided(None)); + + // Transition it to "persisting". + let data = partition + .lock() + .mark_persisting() + .expect("partition with write should transition to persisting"); + + // Enqueue the persist job + let notify = handle.enqueue(Arc::clone(&partition), data).await; + assert!(ingest_state.read().is_ok()); + + // Wait for the persist to complete. + notify + .with_timeout(Duration::from_secs(10)) + .await + .expect("timeout waiting for completion notification") + .expect("worker task failed"); + + // Assert the partition persistence count increased, an indication that + // mark_persisted() was called. + assert_eq!(partition.lock().completed_persistence_count(), 1); + + // Assert the sort key was also updated + assert_matches!(partition.lock().sort_key(), SortKeyState::Provided(Some(p)) => { + assert_eq!(p.to_columns().collect::<Vec<_>>(), &["region", "time"]); + }); + + // Ensure a file was made visible in the catalog + let files = catalog + .repositories() + .await + .parquet_files() + .list_by_partition_not_to_delete(partition_id) + .await + .expect("query for parquet files failed"); + + // Validate a single file was inserted with the expected properties. + let (object_store_id, file_size_bytes) = assert_matches!(&*files, &[ParquetFile { + namespace_id: got_namespace_id, + table_id: got_table_id, + partition_id: got_partition_id, + object_store_id, + max_sequence_number, + row_count, + compaction_level, + file_size_bytes, + .. + }] => + { + assert_eq!(got_namespace_id, namespace_id); + assert_eq!(got_table_id, table_id); + assert_eq!(got_partition_id, partition_id); + + assert_eq!(row_count, 1); + assert_eq!(compaction_level, CompactionLevel::Initial); + + assert_eq!(max_sequence_number.get(), 0); // Unused, dummy value + + (object_store_id, file_size_bytes) + } + ); + + // Validate the file exists in object storage. + let files: Vec<ObjectMeta> = object_storage + .list(None) + .await + .expect("listing object storage failed") + .try_collect::<Vec<_>>() + .await + .expect("failed to list object store files"); + + assert_matches!( + &*files, + &[ObjectMeta { + ref location, + size, + .. + }] => { + let want_path = format!("{object_store_id}.parquet"); + assert!(location.as_ref().ends_with(&want_path)); + assert_eq!(size, file_size_bytes as usize); + } + ) + } +}
7b3fa4320924e68cfa225f80b593cf0b7cc4e57c
Dom Dwyer
2022-10-21 17:39:22
disable incremental snapshot generation
This commit removes the on-demand, incremental snapshot generation driven by queries. This functionality is "on hold" due to concerns documented in: https://github.com/influxdata/influxdb_iox/issues/5805 Incremental snapshots will be introduced alongside incremental compactions of those same snapshots.
null
refactor: disable incremental snapshot generation This commit removes the on-demand, incremental snapshot generation driven by queries. This functionality is "on hold" due to concerns documented in: https://github.com/influxdata/influxdb_iox/issues/5805 Incremental snapshots will be introduced alongside incremental compactions of those same snapshots.
diff --git a/ingester/src/data/partition/buffer/mutable_buffer.rs b/ingester/src/data/partition/buffer/mutable_buffer.rs index cd607a37bd..6b8ed74ae3 100644 --- a/ingester/src/data/partition/buffer/mutable_buffer.rs +++ b/ingester/src/data/partition/buffer/mutable_buffer.rs @@ -50,4 +50,8 @@ impl Buffer { pub(super) fn is_empty(&self) -> bool { self.buffer.is_none() } + + pub(super) fn buffer(&self) -> Option<&MutableBatch> { + self.buffer.as_ref() + } } diff --git a/ingester/src/data/partition/buffer/state_machine.rs b/ingester/src/data/partition/buffer/state_machine.rs index 24c73a1538..278d0384f9 100644 --- a/ingester/src/data/partition/buffer/state_machine.rs +++ b/ingester/src/data/partition/buffer/state_machine.rs @@ -6,14 +6,11 @@ use data_types::SequenceNumber; use mutable_batch::MutableBatch; mod buffering; -mod buffering_with_snapshot; mod persisting; mod snapshot; pub(in crate::data::partition::buffer) use buffering::*; -pub(in crate::data::partition::buffer) use buffering_with_snapshot::*; pub(crate) use persisting::*; -pub(in crate::data::partition::buffer) use snapshot::*; use crate::data::SequenceNumberRange; @@ -54,19 +51,14 @@ impl<A, B> Transition<A, B> { /// lifecycle within a partition buffer: /// /// ```text -/// ┌──────────────┐ -/// │ Buffering │ -/// └───────┬──────┘ -/// │ -/// ▼ -/// ┌ ─ ─ ─ ─ ─ ─ ─ ┌ ─ ─ ─ ─ ─ ─ ─ -/// ┌─────▶ Snapshot ├─────▶ Persisting │ -/// │ └ ─ ─ ─ ┬ ─ ─ ─ └ ─ ─ ─ ─ ─ ─ ─ -/// │ │ -/// │ ▼ -/// │ ┌───────────────────────┐ -/// └──│ BufferingWithSnapshot │ -/// └───────────────────────┘ +/// ┌──────────────┐ +/// │ Buffering │ +/// └───────┬──────┘ +/// │ +/// ▼ +/// ┌ ─ ─ ─ ─ ─ ─ ─ ┌ ─ ─ ─ ─ ─ ─ ─ +/// Snapshot ├─────▶ Persisting │ +/// └ ─ ─ ─ ─ ─ ─ ─ └ ─ ─ ─ ─ ─ ─ ─ /// ``` /// /// Boxes with dashed lines indicate immutable, queryable states that contain @@ -131,13 +123,14 @@ where /// Returns the current buffer data. /// /// This is always a cheap method call. - fn get_query_data(&self) -> &[Arc<RecordBatch>] { + fn get_query_data(&self) -> Vec<Arc<RecordBatch>> { self.state.get_query_data() } } #[cfg(test)] mod tests { + use snapshot::*; use std::ops::Deref; use arrow_util::assert_batches_eq; @@ -158,17 +151,14 @@ mod tests { // Write some data to a buffer. buffer .write( - lp_to_mutable_batch(r#"bananas,tag=platanos great=true 668563242000000042"#).1, + lp_to_mutable_batch( + r#"bananas,tag=platanos great=true,how_much=42 668563242000000042"#, + ) + .1, SequenceNumber::new(0), ) .expect("write to empty buffer should succeed"); - // Snapshot the buffer into an immutable, queryable data format. - let buffer: BufferState<Snapshot> = match buffer.snapshot() { - Transition::Ok(v) => v, - Transition::Unchanged(_) => panic!("did not transition to snapshot state"), - }; - // Extract the queryable data from the buffer and validate it. // // Keep the data to validate they are ref-counted copies after further @@ -177,23 +167,19 @@ mod tests { let w1_data = buffer.get_query_data().to_owned(); let expected = vec![ - "+-------+----------+--------------------------------+", - "| great | tag | time |", - "+-------+----------+--------------------------------+", - "| true | platanos | 1991-03-10T00:00:42.000000042Z |", - "+-------+----------+--------------------------------+", + "+-------+----------+----------+--------------------------------+", + "| great | how_much | tag | time |", + "+-------+----------+----------+--------------------------------+", + "| true | 42 | platanos | 1991-03-10T00:00:42.000000042Z |", + "+-------+----------+----------+--------------------------------+", ]; assert_batches_eq!(&expected, &[w1_data[0].deref().clone()]); - // Transition the buffer into a mutable state in which it can accept - // writes. - let mut buffer: BufferState<BufferingWithSnapshot> = buffer.into_buffering(); - // Apply another write. buffer .write( lp_to_mutable_batch( - r#"bananas,tag=platanos great=true,how_much=1000 668563242000000042"#, + r#"bananas,tag=platanos great=true,how_much=1000 668563242000000043"#, ) .1, SequenceNumber::new(1), @@ -201,24 +187,23 @@ mod tests { .expect("write to empty buffer should succeed"); // Snapshot the buffer into an immutable, queryable data format. - let buffer: BufferState<Snapshot> = buffer.snapshot(); + let buffer: BufferState<Snapshot> = match buffer.snapshot() { + Transition::Ok(v) => v, + Transition::Unchanged(_) => panic!("did not transition to snapshot state"), + }; - // Verify the second write was buffered. + // Verify the writes are still queryable. let w2_data = buffer.get_query_data().to_owned(); let expected = vec![ "+-------+----------+----------+--------------------------------+", "| great | how_much | tag | time |", "+-------+----------+----------+--------------------------------+", - "| true | 1000 | platanos | 1991-03-10T00:00:42.000000042Z |", + "| true | 42 | platanos | 1991-03-10T00:00:42.000000042Z |", + "| true | 1000 | platanos | 1991-03-10T00:00:42.000000043Z |", "+-------+----------+----------+--------------------------------+", ]; - assert_batches_eq!(&expected, &[w2_data[1].deref().clone()]); - - // Verify the first write has not changed, and has not been - // re-ordered. - assert_eq!(w1_data, w2_data[..1]); - // Furthermore, ensure no data was actually copied - assert!(Arc::ptr_eq(&w1_data[0], &w2_data[0])); + assert_eq!(w2_data.len(), 1); + assert_batches_eq!(&expected, &[w2_data[0].deref().clone()]); // Ensure the same data is returned for a second read. { diff --git a/ingester/src/data/partition/buffer/state_machine/buffering.rs b/ingester/src/data/partition/buffer/state_machine/buffering.rs index 85ab01bbcd..5faac058fa 100644 --- a/ingester/src/data/partition/buffer/state_machine/buffering.rs +++ b/ingester/src/data/partition/buffer/state_machine/buffering.rs @@ -1,8 +1,15 @@ //! A write buffer. +use std::sync::Arc; + +use arrow::record_batch::RecordBatch; use mutable_batch::MutableBatch; +use schema::selection::Selection; -use crate::data::partition::buffer::{mutable_buffer::Buffer, traits::Writeable}; +use crate::data::partition::buffer::{ + mutable_buffer::Buffer, + traits::{Queryable, Writeable}, +}; use super::{snapshot::Snapshot, BufferState, Transition}; @@ -16,6 +23,29 @@ pub(crate) struct Buffering { buffer: Buffer, } +/// Implement on-demand querying of the buffered contents without storing the +/// generated snapshot. +/// +/// In the future this [`Queryable`] should NOT be implemented for +/// [`Buffering`], and instead snapshots should be incrementally generated and +/// compacted. See https://github.com/influxdata/influxdb_iox/issues/5805 for +/// context. +impl Queryable for Buffering { + fn get_query_data(&self) -> Vec<Arc<RecordBatch>> { + let data = self.buffer.buffer().map(|v| { + Arc::new( + v.to_arrow(Selection::All) + .expect("failed to snapshot buffer data"), + ) + }); + + match data { + Some(v) => vec![v], + None => vec![], + } + } +} + impl Writeable for Buffering { fn write(&mut self, batch: MutableBatch) -> Result<(), mutable_batch::Error> { self.buffer.buffer_write(batch) diff --git a/ingester/src/data/partition/buffer/state_machine/buffering_with_snapshot.rs b/ingester/src/data/partition/buffer/state_machine/buffering_with_snapshot.rs deleted file mode 100644 index 4b2af6d379..0000000000 --- a/ingester/src/data/partition/buffer/state_machine/buffering_with_snapshot.rs +++ /dev/null @@ -1,62 +0,0 @@ -//! A mutable buffer, containing one or more snapshots. - -use std::sync::Arc; - -use arrow::record_batch::RecordBatch; -use mutable_batch::MutableBatch; - -use crate::data::partition::buffer::{mutable_buffer::Buffer, traits::Writeable}; - -use super::{snapshot::Snapshot, BufferState}; - -/// A mutable state that buffers incoming writes while holding at least one -/// previously generated buffer snapshot. -#[derive(Debug)] -pub(crate) struct BufferingWithSnapshot { - /// The buffer for incoming writes. - /// - /// NOTE: this buffer MAY be empty. - buffer: Buffer, - - /// Snapshots generated from previous buffer contents. - /// - /// INVARIANT: this array is always non-empty. - snapshots: Vec<Arc<RecordBatch>>, -} - -impl BufferingWithSnapshot { - pub(super) fn new(snapshots: Vec<Arc<RecordBatch>>) -> Self { - Self { - buffer: Buffer::default(), - snapshots, - } - } -} - -impl Writeable for BufferingWithSnapshot { - fn write(&mut self, batch: MutableBatch) -> Result<(), mutable_batch::Error> { - // TODO(5806): assert schema compatibility with existing snapshots - self.buffer.buffer_write(batch) - } -} - -impl BufferState<BufferingWithSnapshot> { - /// Snapshot the current buffer contents and transition to an immutable, - /// queryable state containing only snapshots. - /// - /// This call MAY be a NOP if the buffer has accrued no writes. - pub(crate) fn snapshot(self) -> BufferState<Snapshot> { - assert!(!self.state.snapshots.is_empty()); - - BufferState { - state: Snapshot::new( - self.state - .snapshots - .into_iter() - .chain(self.state.buffer.snapshot()) - .collect(), - ), - sequence_range: self.sequence_range, - } - } -} diff --git a/ingester/src/data/partition/buffer/state_machine/persisting.rs b/ingester/src/data/partition/buffer/state_machine/persisting.rs index 239747ff6b..925631e793 100644 --- a/ingester/src/data/partition/buffer/state_machine/persisting.rs +++ b/ingester/src/data/partition/buffer/state_machine/persisting.rs @@ -1,4 +1,4 @@ -//! A buffer in the "persisting" state, containing one or more snapshots. +//! A writfield1 buffer, with one or more snapshots. use std::sync::Arc; @@ -24,8 +24,8 @@ impl Persisting { } impl Queryable for Persisting { - fn get_query_data(&self) -> &[Arc<RecordBatch>] { - &self.snapshots + fn get_query_data(&self) -> Vec<Arc<RecordBatch>> { + self.snapshots.clone() } } diff --git a/ingester/src/data/partition/buffer/state_machine/snapshot.rs b/ingester/src/data/partition/buffer/state_machine/snapshot.rs index dd82501c29..85093254b9 100644 --- a/ingester/src/data/partition/buffer/state_machine/snapshot.rs +++ b/ingester/src/data/partition/buffer/state_machine/snapshot.rs @@ -1,5 +1,4 @@ -//! An immutable buffer, containing one or more snapshots in an efficient query -//! format. +//! A writfield1 buffer, with one or more snapshots. use std::sync::Arc; @@ -7,7 +6,7 @@ use arrow::record_batch::RecordBatch; use crate::data::partition::buffer::{state_machine::persisting::Persisting, traits::Queryable}; -use super::{buffering_with_snapshot::BufferingWithSnapshot, BufferState}; +use super::BufferState; /// An immutable, queryable FSM state containing at least one buffer snapshot. #[derive(Debug)] @@ -26,20 +25,12 @@ impl Snapshot { } impl Queryable for Snapshot { - fn get_query_data(&self) -> &[Arc<RecordBatch>] { - &self.snapshots + fn get_query_data(&self) -> Vec<Arc<RecordBatch>> { + self.snapshots.clone() } } impl BufferState<Snapshot> { - pub(crate) fn into_buffering(self) -> BufferState<BufferingWithSnapshot> { - assert!(!self.state.snapshots.is_empty()); - BufferState { - state: BufferingWithSnapshot::new(self.state.snapshots), - sequence_range: self.sequence_range, - } - } - pub(crate) fn into_persisting(self) -> BufferState<Persisting> { assert!(!self.state.snapshots.is_empty()); BufferState { diff --git a/ingester/src/data/partition/buffer/traits.rs b/ingester/src/data/partition/buffer/traits.rs index bff6dfda8f..24241910b6 100644 --- a/ingester/src/data/partition/buffer/traits.rs +++ b/ingester/src/data/partition/buffer/traits.rs @@ -13,5 +13,5 @@ pub(crate) trait Writeable: Debug { /// A state that can return the contents of the buffer as one or more /// [`RecordBatch`] instances. pub(crate) trait Queryable: Debug { - fn get_query_data(&self) -> &[Arc<RecordBatch>]; + fn get_query_data(&self) -> Vec<Arc<RecordBatch>>; }
45c8519a0b9f434ae234eeb09f073da08a7f8adb
Dom Dwyer
2023-03-15 15:10:37
assert macro import scopes
The assert_counter! and assert_histogram! macros use items in the metric crate, but the macros can be called from other crates/modules that may not have those items in scope.
null
refactor(metric): assert macro import scopes The assert_counter! and assert_histogram! macros use items in the metric crate, but the macros can be called from other crates/modules that may not have those items in scope.
diff --git a/compactor2/src/components/commit/metrics.rs b/compactor2/src/components/commit/metrics.rs index fb1bf4d235..4a63cec426 100644 --- a/compactor2/src/components/commit/metrics.rs +++ b/compactor2/src/components/commit/metrics.rs @@ -305,7 +305,7 @@ where mod tests { use std::sync::Arc; - use metric::{assert_histogram, Attributes, Metric}; + use metric::{assert_histogram, Attributes}; use crate::components::commit::mock::{CommitHistoryEntry, MockCommit}; use iox_tests::ParquetFileBuilder; diff --git a/compactor2/src/components/partition_done_sink/metrics.rs b/compactor2/src/components/partition_done_sink/metrics.rs index cb00ef19f1..1999e8e0f4 100644 --- a/compactor2/src/components/partition_done_sink/metrics.rs +++ b/compactor2/src/components/partition_done_sink/metrics.rs @@ -83,7 +83,7 @@ where mod tests { use std::{collections::HashMap, sync::Arc}; - use metric::{assert_counter, Attributes, Metric}; + use metric::{assert_counter, Attributes}; use object_store::Error as ObjectStoreError; use crate::components::partition_done_sink::mock::MockPartitionDoneSink; diff --git a/compactor2/src/components/partition_filter/metrics.rs b/compactor2/src/components/partition_filter/metrics.rs index ccb2fa1326..a0291a63e7 100644 --- a/compactor2/src/components/partition_filter/metrics.rs +++ b/compactor2/src/components/partition_filter/metrics.rs @@ -85,7 +85,7 @@ where mod tests { use std::sync::Arc; - use metric::{assert_counter, Attributes, Metric}; + use metric::{assert_counter, Attributes}; use crate::{ components::partition_filter::has_files::HasFilesPartitionFilter, diff --git a/compactor2/src/components/partition_source/metrics.rs b/compactor2/src/components/partition_source/metrics.rs index aef81a4e26..372aa81509 100644 --- a/compactor2/src/components/partition_source/metrics.rs +++ b/compactor2/src/components/partition_source/metrics.rs @@ -64,7 +64,7 @@ where #[cfg(test)] mod tests { - use metric::{assert_counter, Attributes, Metric}; + use metric::{assert_counter, Attributes}; use crate::components::partition_source::mock::MockPartitionSource; use iox_tests::PartitionBuilder; diff --git a/compactor2/src/components/partitions_source/metrics.rs b/compactor2/src/components/partitions_source/metrics.rs index 40566fc74a..797460c563 100644 --- a/compactor2/src/components/partitions_source/metrics.rs +++ b/compactor2/src/components/partitions_source/metrics.rs @@ -69,7 +69,7 @@ where #[cfg(test)] mod tests { - use metric::{assert_counter, Attributes, Metric}; + use metric::assert_counter; use crate::components::partitions_source::mock::MockPartitionsSource; diff --git a/compactor2/src/components/split_or_compact/metrics.rs b/compactor2/src/components/split_or_compact/metrics.rs index ee8a2ef5c3..22407656d0 100644 --- a/compactor2/src/components/split_or_compact/metrics.rs +++ b/compactor2/src/components/split_or_compact/metrics.rs @@ -106,7 +106,7 @@ mod tests { use compactor2_test_utils::{create_overlapped_l0_l1_files_2, create_overlapped_l1_l2_files_2}; use data_types::CompactionLevel; - use metric::{assert_counter, assert_histogram, Attributes, Metric}; + use metric::{assert_counter, assert_histogram}; use crate::{ components::split_or_compact::{split_compact::SplitCompact, SplitOrCompact}, diff --git a/metric/src/counter.rs b/metric/src/counter.rs index 872a7a15fa..6e31585f4e 100644 --- a/metric/src/counter.rs +++ b/metric/src/counter.rs @@ -48,10 +48,10 @@ macro_rules! assert_counter { #[allow(unused)] let mut attr = None; $(attr = Some($attr);)* - let attr = attr.unwrap_or_else(|| Attributes::from(&[])); + let attr = attr.unwrap_or_else(|| metric::Attributes::from(&[])); let counter = $metrics - .get_instrument::<Metric<$counter>>($name) + .get_instrument::<metric::Metric<$counter>>($name) .expect("failed to find metric with provided name") .get_observer(&attr) .expect("failed to find metric with provided attributes") diff --git a/metric/src/histogram.rs b/metric/src/histogram.rs index 63fddb7912..4416c84978 100644 --- a/metric/src/histogram.rs +++ b/metric/src/histogram.rs @@ -106,10 +106,10 @@ macro_rules! assert_histogram { #[allow(unused)] let mut attr = None; $(attr = Some($attr);)* - let attr = attr.unwrap_or_else(|| Attributes::from(&[])); + let attr = attr.unwrap_or_else(|| metric::Attributes::from(&[])); let hist = $metrics - .get_instrument::<Metric<$hist>>($name) + .get_instrument::<metric::Metric<$hist>>($name) .expect("failed to find metric with provided name") .get_observer(&attr) .expect("failed to find metric with provided attributes")
6729b5681a1afc3ec183d08a5be54c9f9062b0fe
Marco Neumann
2023-05-23 16:27:11
re-transmit schema over flight if it changes (#7812)
* fix(ingester): re-transmit schema over flight if it changes Fixes https://github.com/influxdata/idpe/issues/17408 . So a `[Sendable]RecordBatchStream` contains `RecordBatch`es of the SAME schema. When the ingester crafts a response for a specific partition, this is also almost always the case however when there's a persist job running (I think) it may have multiple snapshots for a partition. These snapshots may have different schemas (since the ingester only creates columns if the contain any data). Now the current implementation munches all these snapshots into a single stream, and hands them over to arrow flight which has a high-perf encode routine (i.e. it does not re-check every single schema) so it sends the schema once and then sends the data for every batch (the data only, schema data is NOT repeated). On the receiver side (= querier) we decode that data and get confused why on earth some batches have a different column count compared to the schema. For the OG ingester I carefully crafted the response to ensure that we do not run into this problem, but apparently a number of rewrites and refactors broke that. So here is the fix: - remove the stream that isn't really as stream (and cannot error) - for each partition go over the `RecordBatch`es and chunk them according to the schema (because this check is likely cheaper than re-transmitting the schema for every `RecordBatch`) - adjust a bunch of testing code to cope with this * refactor: nicify code * test: adjust test
null
fix(ingester): re-transmit schema over flight if it changes (#7812) * fix(ingester): re-transmit schema over flight if it changes Fixes https://github.com/influxdata/idpe/issues/17408 . So a `[Sendable]RecordBatchStream` contains `RecordBatch`es of the SAME schema. When the ingester crafts a response for a specific partition, this is also almost always the case however when there's a persist job running (I think) it may have multiple snapshots for a partition. These snapshots may have different schemas (since the ingester only creates columns if the contain any data). Now the current implementation munches all these snapshots into a single stream, and hands them over to arrow flight which has a high-perf encode routine (i.e. it does not re-check every single schema) so it sends the schema once and then sends the data for every batch (the data only, schema data is NOT repeated). On the receiver side (= querier) we decode that data and get confused why on earth some batches have a different column count compared to the schema. For the OG ingester I carefully crafted the response to ensure that we do not run into this problem, but apparently a number of rewrites and refactors broke that. So here is the fix: - remove the stream that isn't really as stream (and cannot error) - for each partition go over the `RecordBatch`es and chunk them according to the schema (because this check is likely cheaper than re-transmitting the schema for every `RecordBatch`) - adjust a bunch of testing code to cope with this * refactor: nicify code * test: adjust test
diff --git a/ingester/Cargo.toml b/ingester/Cargo.toml index 8fe93aa97c..ed2dc28036 100644 --- a/ingester/Cargo.toml +++ b/ingester/Cargo.toml @@ -15,7 +15,6 @@ backoff = { version = "0.1.0", path = "../backoff" } bytes = "1.4.0" crossbeam-utils = "0.8.15" data_types = { version = "0.1.0", path = "../data_types" } -datafusion_util = { path = "../datafusion_util" } datafusion.workspace = true dml = { version = "0.1.0", path = "../dml" } flatbuffers = "23.1.21" diff --git a/ingester/src/buffer_tree/root.rs b/ingester/src/buffer_tree/root.rs index e18a669038..f78cfc4e18 100644 --- a/ingester/src/buffer_tree/root.rs +++ b/ingester/src/buffer_tree/root.rs @@ -232,7 +232,7 @@ mod tests { use assert_matches::assert_matches; use data_types::{PartitionId, PartitionKey}; use datafusion::{assert_batches_eq, assert_batches_sorted_eq}; - use futures::{StreamExt, TryStreamExt}; + use futures::StreamExt; use metric::{Attributes, Metric}; use super::*; @@ -360,10 +360,10 @@ mod tests { .query_exec(ARBITRARY_NAMESPACE_ID, ARBITRARY_TABLE_ID, vec![], None) .await .expect("query should succeed") - .into_record_batches() - .try_collect::<Vec<_>>() - .await - .expect("query failed"); + .into_partition_stream() + .flat_map(|ps| futures::stream::iter(ps.into_record_batches())) + .collect::<Vec<_>>() + .await; // Assert the contents of ARBITRARY_NAMESPACE_ID and ARBITRARY_TABLE_ID assert_batches_sorted_eq!( @@ -952,11 +952,7 @@ mod tests { let partition = partitions.pop().unwrap(); // Perform the partition read - let batches = datafusion::physical_plan::common::collect( - partition.into_record_batch_stream().unwrap(), - ) - .await - .expect("failed to collate query results"); + let batches = partition.into_record_batches(); // Assert the contents of p1 contains both the initial write, and the // 3rd write in a single RecordBatch. diff --git a/ingester/src/buffer_tree/table.rs b/ingester/src/buffer_tree/table.rs index 3bf805a8be..be7b586df8 100644 --- a/ingester/src/buffer_tree/table.rs +++ b/ingester/src/buffer_tree/table.rs @@ -6,7 +6,6 @@ use std::{fmt::Debug, sync::Arc}; use async_trait::async_trait; use data_types::{NamespaceId, PartitionKey, SequenceNumber, TableId}; -use datafusion_util::MemoryStream; use mutable_batch::MutableBatch; use parking_lot::Mutex; use schema::Projection; @@ -238,12 +237,10 @@ where Projection::Some(columns.as_ref()) }; - let data = Box::pin(MemoryStream::new( - data.project_selection(selection).into_iter().collect(), - )); - PartitionResponse::new(Some(data), id, completed_persistence_count) + let data = data.project_selection(selection).into_iter().collect(); + PartitionResponse::new(data, id, completed_persistence_count) } - None => PartitionResponse::new(None, id, completed_persistence_count), + None => PartitionResponse::new(vec![], id, completed_persistence_count), }; span.ok("read partition data"); diff --git a/ingester/src/query/partition_response.rs b/ingester/src/query/partition_response.rs index 43c5802749..9fd6e6c672 100644 --- a/ingester/src/query/partition_response.rs +++ b/ingester/src/query/partition_response.rs @@ -2,13 +2,14 @@ //! //! [`QueryResponse`]: super::response::QueryResponse +use arrow::record_batch::RecordBatch; use data_types::PartitionId; -use datafusion::physical_plan::SendableRecordBatchStream; /// Response data for a single partition. +#[derive(Debug)] pub(crate) struct PartitionResponse { /// Stream of snapshots. - batches: Option<SendableRecordBatchStream>, + batches: Vec<RecordBatch>, /// Partition ID. id: PartitionId, @@ -17,28 +18,9 @@ pub(crate) struct PartitionResponse { completed_persistence_count: u64, } -impl std::fmt::Debug for PartitionResponse { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("PartitionResponse") - .field( - "batches", - &match self.batches { - Some(_) => "<SNAPSHOT STREAM>", - None => "<NO DATA>,", - }, - ) - .field("partition_id", &self.id) - .field( - "completed_persistence_count", - &self.completed_persistence_count, - ) - .finish() - } -} - impl PartitionResponse { pub(crate) fn new( - data: Option<SendableRecordBatchStream>, + data: Vec<RecordBatch>, id: PartitionId, completed_persistence_count: u64, ) -> Self { @@ -57,7 +39,7 @@ impl PartitionResponse { self.completed_persistence_count } - pub(crate) fn into_record_batch_stream(self) -> Option<SendableRecordBatchStream> { + pub(crate) fn into_record_batches(self) -> Vec<RecordBatch> { self.batches } } diff --git a/ingester/src/query/response.rs b/ingester/src/query/response.rs index e0b7f107bf..cc89d51f4a 100644 --- a/ingester/src/query/response.rs +++ b/ingester/src/query/response.rs @@ -2,10 +2,8 @@ //! //! [`QueryExec::query_exec()`]: super::QueryExec::query_exec() -use std::{future, pin::Pin}; +use std::pin::Pin; -use arrow::record_batch::RecordBatch; -use datafusion::error::DataFusionError; use futures::{Stream, StreamExt}; use super::partition_response::PartitionResponse; @@ -48,13 +46,4 @@ impl QueryResponse { pub(crate) fn into_partition_stream(self) -> impl Stream<Item = PartitionResponse> { self.partitions.0 } - - /// Reduce the [`QueryResponse`] to a stream of [`RecordBatch`]. - pub(crate) fn into_record_batches( - self, - ) -> impl Stream<Item = Result<RecordBatch, DataFusionError>> { - self.into_partition_stream() - .filter_map(|partition| future::ready(partition.into_record_batch_stream())) - .flatten() - } } diff --git a/ingester/src/query/result_instrumentation.rs b/ingester/src/query/result_instrumentation.rs index 21be8a2bfa..02f3a422a8 100644 --- a/ingester/src/query/result_instrumentation.rs +++ b/ingester/src/query/result_instrumentation.rs @@ -25,41 +25,34 @@ //! ┼ //! │ ┌ Observe ─╱┴╲─ ─ ─ ─ ─ ┐ //! ╔═══════════════════╗ -//! └ ─ ─ ─ ─ ─ ─ ▶ ║ PartitionResponse ║ │──────────────┐ -//! ╚═══════════════════╝ Injects -//! └ ─ ─ ─ ─ ─ ┬ ─ ─ ─ ─ ─ ┘ │ -//! │ ▼ -//! │ ┌───────────────────┐ -//! ┼ │BatchStreamRecorder│ -//! ┌ Observe ─╱┴╲─ ─ ─ ─ ─ ┐ └───────────────────┘ -//! ╔═══════════════════╗ │ -//! │ ║ RecordBatchStream ║ ├ ─ ─ ─ ─ ─ ─ ─ +//! └ ─ ─ ─ ─ ─ ─ ▶ ║ PartitionResponse ║ │ +//! ╚═══════════════════╝ +//! └ ─ ─ ─ ─ ─ ┬ ─ ─ ─ ─ ─ ┘ +//! │ +//! │ +//! ┼ +//! ┌ Observe ─╱┴╲─ ─ ─ ─ ─ ┐ +//! ╔═══════════════════╗ +//! │ ║ RecordBatchStream ║ │ //! ╚═══════════════════╝ //! └ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┘ //! ``` //! //! The [`QueryMetricContext`] is injected into the [`QueryResponse`], recording -//! the lifetime of the [`QueryResponse`] itself, and further injecting -//! instances of [`BatchStreamRecorder`] into each [`PartitionResponse`] to -//! observe the per-partition stream of [`RecordBatch`] that are yielded from -//! it. +//! the lifetime of the [`QueryResponse`] itself, and observes the [`RecordBatch`]es +//! produced by each [`PartitionResponse`]. +//! +//! +//! [`RecordBatch`]: arrow::record_batch::RecordBatch use std::{ pin::Pin, - sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, - Arc, - }, + sync::atomic::{AtomicUsize, Ordering}, task::{Context, Poll}, }; -use arrow::record_batch::RecordBatch; use async_trait::async_trait; use data_types::{NamespaceId, TableId}; -use datafusion::{ - error::DataFusionError, - physical_plan::{RecordBatchStream, SendableRecordBatchStream}, -}; use futures::Stream; use iox_time::{SystemProvider, Time, TimeProvider}; use metric::{DurationHistogram, Metric, U64Histogram, U64HistogramOptions}; @@ -82,24 +75,21 @@ use crate::query::{ /// /// Additionally the distribution of row, partition and [`RecordBatch`] counts /// are recorded. +/// +/// +/// [`RecordBatch`]: arrow::record_batch::RecordBatch #[derive(Debug, Clone)] pub(crate) struct QueryResultInstrumentation<T, P = SystemProvider> { inner: T, time_provider: P, /// A histogram to capture the consume time for a stream that was entirely - /// consumed (yielded [`Poll::Ready(None)`]) without ever observing an - /// [`Err`]. - completed_ok: DurationHistogram, + /// consumed (yielded [`Poll::Ready(None)`]). + completed: DurationHistogram, - /// As above but the stream returned at least one [`Err`] item; the stream - /// was still consumed to completion. - completed_err: DurationHistogram, - - /// Like [`Self::completed_ok`], but for a stream that was not consumed to + /// Like [`Self::completed`], but for a stream that was not consumed to /// completion (dropped before returning [`Poll::Ready(None)`])]). - aborted_ok: DurationHistogram, - aborted_err: DurationHistogram, + aborted: DurationHistogram, // Histograms to capture the distribution of row/batch/partition // counts per query at the end of the query. @@ -173,10 +163,8 @@ impl<T> QueryResultInstrumentation<T> { Self { inner, time_provider: Default::default(), - completed_ok: duration.recorder(&[("request", "complete"), ("has_error", "false")]), - completed_err: duration.recorder(&[("request", "complete"), ("has_error", "true")]), - aborted_ok: duration.recorder(&[("request", "incomplete"), ("has_error", "false")]), - aborted_err: duration.recorder(&[("request", "incomplete"), ("has_error", "true")]), + completed: duration.recorder(&[("request", "complete")]), + aborted: duration.recorder(&[("request", "incomplete")]), row_hist, record_batch_hist, partition_hist, @@ -193,10 +181,8 @@ impl<T, P> QueryResultInstrumentation<T, P> { QueryResultInstrumentation { inner: self.inner, time_provider, - completed_ok: self.completed_ok, - completed_err: self.completed_err, - aborted_ok: self.aborted_ok, - aborted_err: self.aborted_err, + completed: self.completed, + aborted: self.aborted, row_hist: self.row_hist, record_batch_hist: self.record_batch_hist, partition_hist: self.partition_hist, @@ -230,10 +216,8 @@ where stream.into_partition_stream(), started_at, self.time_provider.clone(), - self.completed_ok.clone(), - self.completed_err.clone(), - self.aborted_ok.clone(), - self.aborted_err.clone(), + self.completed.clone(), + self.aborted.clone(), self.row_hist.clone(), self.record_batch_hist.clone(), self.partition_hist.clone(), @@ -248,10 +232,11 @@ where /// Once the last [`PartitionResponse`] is consumed to completion, this type is /// dropped and the metrics it has gathered are emitted at drop time. /// -/// This type is responsible for decorating all [`PartitionResponse`] yielded -/// from the result stream with [`BatchStreamRecorder`] instances, in turn -/// capturing the statistics of each [`RecordBatch`] in the -/// [`PartitionResponse`]. +/// This type is responsible for capturing the statistics of each [`RecordBatch`] +/// in the [`PartitionResponse`]. +/// +/// +/// [`RecordBatch`]: arrow::record_batch::RecordBatch #[pin_project(PinnedDrop)] #[derive(Debug)] struct QueryMetricContext<S, P = SystemProvider> @@ -264,8 +249,13 @@ where #[pin] inner: S, - /// The metric state shared with child [`BatchStreamRecorder`] instances. - state: Arc<MetricState>, + /// Running counts of row, partition, and [`RecordBatch`] + /// returned for this query so far. + /// + /// + /// [`RecordBatch`]: arrow::record_batch::RecordBatch + row_count: AtomicUsize, + record_batch_count: AtomicUsize, /// The timestamp at which the read request began, inclusive of the work /// required to acquire the inner stream (which may involve fetching all the @@ -281,10 +271,8 @@ where partition_count: usize, /// The latency histograms faceted by completion/error state. - completed_ok: DurationHistogram, - completed_err: DurationHistogram, - aborted_ok: DurationHistogram, - aborted_err: DurationHistogram, + completed: DurationHistogram, + aborted: DurationHistogram, /// Row/record batch/partition count distribution histograms. row_hist: U64Histogram, @@ -301,10 +289,8 @@ where stream: S, started_at: Time, time_provider: P, - completed_ok: DurationHistogram, - completed_err: DurationHistogram, - aborted_ok: DurationHistogram, - aborted_err: DurationHistogram, + completed: DurationHistogram, + aborted: DurationHistogram, row_hist: U64Histogram, record_batch_hist: U64Histogram, partition_hist: U64Histogram, @@ -314,15 +300,14 @@ where time_provider, started_at, completed_at: None, - completed_ok, - completed_err, - aborted_ok, - aborted_err, + completed, + aborted, row_hist, record_batch_hist, partition_hist, partition_count: 0, - state: Default::default(), + row_count: Default::default(), + record_batch_count: Default::default(), } } } @@ -348,16 +333,15 @@ where // And wrap the underlying stream of RecordBatch for this // partition with a metric observer. - let record_stream = p.into_record_batch_stream().map(|s| { - Box::pin(BatchStreamRecorder::new(s, Arc::clone(this.state))) - as SendableRecordBatchStream - }); - - Poll::Ready(Some(PartitionResponse::new( - record_stream, - id, - persist_count, - ))) + let data = p.into_record_batches(); + this.row_count.fetch_add( + data.iter().map(|batch| batch.num_rows()).sum::<usize>(), + Ordering::Relaxed, + ); + this.record_batch_count + .fetch_add(data.len(), Ordering::Relaxed); + + Poll::Ready(Some(PartitionResponse::new(data, id, persist_count))) } Poll::Ready(None) => { // Record the wall clock timestamp of the stream end. @@ -380,9 +364,8 @@ where { fn drop(self: Pin<&mut Self>) { // Record the captured metrics. - let did_observe_error = self.state.did_observe_error.load(Ordering::Relaxed); - let row_count = self.state.row_count.load(Ordering::Relaxed) as u64; - let record_batch_count = self.state.record_batch_count.load(Ordering::Relaxed) as u64; + let row_count = self.row_count.load(Ordering::Relaxed) as u64; + let record_batch_count = self.record_batch_count.load(Ordering::Relaxed) as u64; let partition_count = self.partition_count; // Record the row/record batch/partition counts for this query. @@ -395,10 +378,8 @@ where // // If completed_at is None, the stream was aborted before completion. let hist = match self.completed_at { - Some(_) if !did_observe_error => &self.completed_ok, - Some(_) => &self.completed_err, - None if !did_observe_error => &self.aborted_ok, - None => &self.aborted_err, + Some(_) => &self.completed, + None => &self.aborted, }; // Record the duration, either up to the time of stream completion, or @@ -415,104 +396,28 @@ where match self.completed_at { Some(_) => debug!( ?duration, - did_observe_error, - row_count, - record_batch_count, - partition_count, - "completed streaming query results", + row_count, record_batch_count, partition_count, "completed streaming query results", ), None => debug!( ?duration, - did_observe_error, - row_count, - record_batch_count, - partition_count, - "aborted streaming query results", + row_count, record_batch_count, partition_count, "aborted streaming query results", ), }; } } -/// State shared between the parent [`QueryMetricContext`] and all of the child -/// [`BatchStreamRecorder`] it has instantiated. +/// State for every call query (used to aggregate data that will later be written into histograms). #[derive(Debug, Default)] struct MetricState { - /// True if at least one [`Result`] yielded by this result stream so far has - /// been an [`Err`]. - // - /// This is used to select the correct success/error histogram which records - /// the operation duration. - did_observe_error: AtomicBool, - /// Running counts of row, partition, and [`RecordBatch`] /// returned for this query so far. + /// + /// + /// [`RecordBatch`]: arrow::record_batch::RecordBatch row_count: AtomicUsize, record_batch_count: AtomicUsize, } -/// Capture row/[`RecordBatch`]/error statistics. -/// -/// Inspects each [`RecordBatch`] yielded in the result stream, scoped to a -/// single [`PartitionResponse`]. -#[pin_project] -struct BatchStreamRecorder { - #[pin] - inner: SendableRecordBatchStream, - shared_state: Arc<MetricState>, -} - -impl BatchStreamRecorder { - fn new(stream: SendableRecordBatchStream, shared_state: Arc<MetricState>) -> Self { - Self { - inner: stream, - shared_state, - } - } -} - -impl RecordBatchStream for BatchStreamRecorder { - fn schema(&self) -> arrow::datatypes::SchemaRef { - self.inner.schema() - } -} - -impl Stream for BatchStreamRecorder { - type Item = Result<RecordBatch, DataFusionError>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { - let this = self.project(); - - let res = this.inner.poll_next(cx); - match &res { - Poll::Ready(Some(Ok(batch))) => { - // Record the count statistics in this batch. - this.shared_state - .row_count - .fetch_add(batch.num_rows(), Ordering::Relaxed); - this.shared_state - .record_batch_count - .fetch_add(1, Ordering::Relaxed); - } - Poll::Ready(Some(Err(_e))) => { - // Record that at least one poll returned an error. - this.shared_state - .did_observe_error - .store(true, Ordering::Relaxed); - } - Poll::Ready(None) => {} - Poll::Pending => {} - } - - res - } - - fn size_hint(&self) -> (usize, Option<usize>) { - // Impl the default size_hint() so this wrapper doesn't mask the size - // hint from the inner stream, if any. - self.inner.size_hint() - } -} - #[cfg(test)] mod tests { use std::{sync::Arc, time::Duration}; @@ -523,7 +428,6 @@ mod tests { use arrow::array::{Float32Array, Int64Array}; use data_types::PartitionId; - use datafusion::physical_plan::stream::RecordBatchStreamAdapter; use futures::{stream, StreamExt}; use iox_time::MockProvider; use metric::{assert_histogram, Attributes}; @@ -540,7 +444,7 @@ mod tests { // Construct a stream with no batches. let stream = PartitionStream::new(stream::iter([PartitionResponse::new( - None, + vec![], PartitionId::new(42), 42, )])); @@ -560,11 +464,7 @@ mod tests { // Drain the query results, moving past any errors, and collecting the // final set of all Ok record batches for comparison. - let _batches = response - .into_record_batches() - .filter_map(|v| async { v.ok() }) - .collect::<Vec<_>>() - .await; + let _partitions = response.into_partition_stream().collect::<Vec<_>>().await; assert_histogram!( metrics, @@ -591,7 +491,7 @@ mod tests { metrics, DurationHistogram, "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "complete"), ("has_error", "false")]), + labels = Attributes::from(&[("request", "complete")]), samples = 1, sum = TIME_STEP, ); @@ -599,21 +499,7 @@ mod tests { metrics, DurationHistogram, "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "complete"), ("has_error", "true")]), - samples = 0, - ); - assert_histogram!( - metrics, - DurationHistogram, - "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "incomplete"), ("has_error", "false")]), - samples = 0, - ); - assert_histogram!( - metrics, - DurationHistogram, - "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "incomplete"), ("has_error", "true")]), + labels = Attributes::from(&[("request", "incomplete")]), samples = 0, ); } @@ -659,11 +545,7 @@ mod tests { // Drain the query results, moving past any errors, and collecting the // final set of all Ok record batches for comparison. - let _batches = response - .into_record_batches() - .filter_map(|v| async { v.ok() }) - .collect::<Vec<_>>() - .await; + let _partitions = response.into_partition_stream().collect::<Vec<_>>().await; assert_histogram!( metrics, @@ -690,7 +572,7 @@ mod tests { metrics, DurationHistogram, "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "complete"), ("has_error", "false")]), + labels = Attributes::from(&[("request", "complete")]), samples = 1, sum = TIME_STEP, ); @@ -698,21 +580,7 @@ mod tests { metrics, DurationHistogram, "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "complete"), ("has_error", "true")]), - samples = 0, - ); - assert_histogram!( - metrics, - DurationHistogram, - "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "incomplete"), ("has_error", "false")]), - samples = 0, - ); - assert_histogram!( - metrics, - DurationHistogram, - "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "incomplete"), ("has_error", "true")]), + labels = Attributes::from(&[("request", "incomplete")]), samples = 0, ); } @@ -784,31 +652,17 @@ mod tests { metrics, DurationHistogram, "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "complete"), ("has_error", "false")]), + labels = Attributes::from(&[("request", "complete")]), samples = 0, ); assert_histogram!( metrics, DurationHistogram, "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "complete"), ("has_error", "true")]), - samples = 0, - ); - assert_histogram!( - metrics, - DurationHistogram, - "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "incomplete"), ("has_error", "false")]), + labels = Attributes::from(&[("request", "incomplete")]), samples = 1, sum = TIME_STEP, // It was recorded as an incomplete request ); - assert_histogram!( - metrics, - DurationHistogram, - "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "incomplete"), ("has_error", "true")]), - samples = 0, - ); } /// A query result which is dropped after partially reading the data should @@ -849,234 +703,25 @@ mod tests { // Now the response has been created, advance the clock mock_time.inc(TIME_STEP); - let mut response = response.into_record_batches(); - let got = response - .next() - .await - .expect("should yield first batch") - .expect("mock doesn't return error"); - drop(response); - - assert_histogram!( - metrics, - U64Histogram, - "ingester_query_result_row", - samples = 1, - sum = got.num_rows() as u64, - ); - assert_histogram!( - metrics, - U64Histogram, - "ingester_query_result_record_batch", - samples = 1, - sum = 1, - ); - assert_histogram!( - metrics, - U64Histogram, - "ingester_query_result_partition", - samples = 1, - sum = 1, - ); - assert_histogram!( - metrics, - DurationHistogram, - "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "complete"), ("has_error", "false")]), - samples = 0, - ); - assert_histogram!( - metrics, - DurationHistogram, - "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "complete"), ("has_error", "true")]), - samples = 0, - ); - assert_histogram!( - metrics, - DurationHistogram, - "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "incomplete"), ("has_error", "false")]), - samples = 1, - sum = TIME_STEP, // It was recorded as an incomplete request - ); - assert_histogram!( - metrics, - DurationHistogram, - "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "incomplete"), ("has_error", "true")]), - samples = 0, - ); - } - - /// A query result which is dropped when observing an error should record - /// the various count statistics from any yielded batches and categorise the - /// result as having observed an error. - #[tokio::test] - async fn test_multi_partition_stream_with_error_abort() { - let metrics = metric::Registry::default(); - - // Construct the set of partitions and their record batches - let (ok_batch, schema) = make_batch!( - Int64Array("c" => vec![1, 2, 3, 4, 5]), - ); - - let stream = Box::pin(RecordBatchStreamAdapter::new( - schema, - stream::iter([ - Ok(ok_batch.clone()), - Err(DataFusionError::Internal("bananas".to_string())), - Ok(ok_batch), - ]), - )) as SendableRecordBatchStream; - - let stream = PartitionStream::new(stream::iter([PartitionResponse::new( - Some(stream), - PartitionId::new(1), - 42, - )])); - - let mock_time = Arc::new(MockProvider::new(Time::MIN)); - let mock_inner = MockQueryExec::default().with_result(Ok(QueryResponse::new(stream))); - let layer = QueryResultInstrumentation::new(mock_inner, &metrics) - .with_time_provider(Arc::clone(&mock_time)); - - let response = layer - .query_exec(NAMESPACE_ID, TABLE_ID, vec![], None) - .await - .expect("query should succeed"); - - // Now the response has been created, advance the clock - mock_time.inc(TIME_STEP); - - let mut response = response.into_record_batches(); - let got = response - .next() - .await - .expect("should yield first batch") - .expect("mock doesn't return error"); - - response - .next() - .await - .expect("more results should be available") - .expect_err("this batch should be an error"); - - // Drop the rest of the batches after observing an error. + let mut response = response.into_partition_stream(); + let got = response.next().await.expect("should yield first batch"); drop(response); - assert_histogram!( - metrics, - U64Histogram, - "ingester_query_result_row", - samples = 1, - sum = got.num_rows() as u64, - ); - assert_histogram!( - metrics, - U64Histogram, - "ingester_query_result_record_batch", - samples = 1, - sum = 1, - ); - assert_histogram!( - metrics, - U64Histogram, - "ingester_query_result_partition", - samples = 1, - sum = 1, - ); - assert_histogram!( - metrics, - DurationHistogram, - "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "complete"), ("has_error", "false")]), - samples = 0, - ); - assert_histogram!( - metrics, - DurationHistogram, - "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "complete"), ("has_error", "true")]), - samples = 0, - ); - assert_histogram!( - metrics, - DurationHistogram, - "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "incomplete"), ("has_error", "false")]), - samples = 0, - ); - assert_histogram!( - metrics, - DurationHistogram, - "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "incomplete"), ("has_error", "true")]), - samples = 1, - sum = TIME_STEP, // Recorded as an incomplete request with error - ); - } - - /// A query result which is consumed to completion even after observing an - /// error should be correctly catagorised. - #[tokio::test] - async fn test_multi_partition_stream_with_error_completion() { - let metrics = metric::Registry::default(); - - // Construct the set of partitions and their record batches - let (ok_batch, schema) = make_batch!( - Int64Array("c" => vec![1, 2, 3, 4, 5]), - ); - - let stream = Box::pin(RecordBatchStreamAdapter::new( - schema, - stream::iter([ - Ok(ok_batch.clone()), - Err(DataFusionError::Internal("bananas".to_string())), - Ok(ok_batch), - ]), - )) as SendableRecordBatchStream; - - let stream = PartitionStream::new(stream::iter([PartitionResponse::new( - Some(stream), - PartitionId::new(1), - 42, - )])); - - let mock_time = Arc::new(MockProvider::new(Time::MIN)); - let mock_inner = MockQueryExec::default().with_result(Ok(QueryResponse::new(stream))); - let layer = QueryResultInstrumentation::new(mock_inner, &metrics) - .with_time_provider(Arc::clone(&mock_time)); - - let response = layer - .query_exec(NAMESPACE_ID, TABLE_ID, vec![], None) - .await - .expect("query should succeed"); - - // Now the response has been created, advance the clock - mock_time.inc(TIME_STEP); - - // Drain the query results, moving past any errors, and collecting the - // final set of all Ok record batches for comparison. - let _batches = response - .into_record_batches() - .filter_map(|v| async { v.ok() }) - .collect::<Vec<_>>() - .await; + let batches = got.into_record_batches(); assert_histogram!( metrics, U64Histogram, "ingester_query_result_row", samples = 1, - sum = 10, // 5 + 5 + sum = batches.iter().map(|batch| batch.num_rows()).sum::<usize>() as u64, ); assert_histogram!( metrics, U64Histogram, "ingester_query_result_record_batch", samples = 1, - sum = 2, + sum = batches.len() as u64, ); assert_histogram!( metrics, @@ -1089,30 +734,16 @@ mod tests { metrics, DurationHistogram, "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "complete"), ("has_error", "false")]), + labels = Attributes::from(&[("request", "complete")]), samples = 0, ); assert_histogram!( metrics, DurationHistogram, "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "complete"), ("has_error", "true")]), + labels = Attributes::from(&[("request", "incomplete")]), samples = 1, - sum = TIME_STEP, // Recorded as a complete request with error - ); - assert_histogram!( - metrics, - DurationHistogram, - "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "incomplete"), ("has_error", "false")]), - samples = 0, - ); - assert_histogram!( - metrics, - DurationHistogram, - "ingester_query_stream_duration", - labels = Attributes::from(&[("request", "incomplete"), ("has_error", "true")]), - samples = 0, + sum = TIME_STEP, // It was recorded as an incomplete request ); } } diff --git a/ingester/src/server/grpc/query.rs b/ingester/src/server/grpc/query.rs index aba97e47ce..a4b0f66d4b 100644 --- a/ingester/src/server/grpc/query.rs +++ b/ingester/src/server/grpc/query.rs @@ -306,29 +306,43 @@ fn encode_response( response.into_partition_stream().flat_map(move |partition| { let partition_id = partition.id(); let completed_persistence_count = partition.completed_persistence_count(); + + // prefix payload data w/ metadata for that particular partition let head = futures::stream::once(async move { encode_partition(partition_id, completed_persistence_count, ingester_id) }); - match partition.into_record_batch_stream() { - Some(stream) => { - let stream = stream.map_err(|e| FlightError::ExternalError(Box::new(e))); - - let tail = FlightDataEncoderBuilder::new().build(stream); - - head.chain(tail).boxed() - } - None => head.boxed(), + // An output vector of FlightDataEncoder streams, each entry stream with + // a differing schema. + // + // Optimized for the common case of there being a single consistent + // schema across all batches (1 stream). + let mut output = Vec::with_capacity(1); + + let mut batch_iter = partition.into_record_batches().into_iter().peekable(); + + // While there are more batches to process. + while let Some(schema) = batch_iter.peek().map(|v| v.schema()) { + output.push( + FlightDataEncoderBuilder::new().build(futures::stream::iter( + // Take all the RecordBatch with a matching schema + std::iter::from_fn(|| batch_iter.next_if(|v| v.schema() == schema)) + .map(Ok) + .collect::<Vec<Result<_, FlightError>>>(), + )), + ) } + + head.chain(futures::stream::iter(output).flatten()) }) } #[cfg(test)] mod tests { use arrow::array::{Float64Array, Int32Array}; - use arrow_flight::decode::FlightRecordBatchStream; + use arrow_flight::decode::{DecodedPayload, FlightRecordBatchStream}; + use assert_matches::assert_matches; use bytes::Bytes; - use datafusion_util::MemoryStream; use tonic::Code; use crate::{ @@ -375,30 +389,40 @@ mod tests { /// Regression test for https://github.com/influxdata/idpe/issues/17408 #[tokio::test] - #[should_panic( - expected = "Invalid argument error: number of columns(1) must match number of fields(2) in schema" - )] async fn test_chunks_with_different_schemas() { - let (batch1, _schema1) = make_batch!( + let ingester_id = IngesterId::new(); + let (batch1, schema1) = make_batch!( Float64Array("float" => vec![1.1, 2.2, 3.3]), Int32Array("int" => vec![1, 2, 3]), ); - let (batch2, _schema2) = make_batch!( - Int32Array("int" => vec![3, 4]), + let (batch2, schema2) = make_batch!( + Float64Array("float" => vec![4.4]), + Int32Array("int" => vec![4]), + ); + assert_eq!(schema1, schema2); + let (batch3, schema3) = make_batch!( + Int32Array("int" => vec![5, 6]), + ); + let (batch4, schema4) = make_batch!( + Float64Array("float" => vec![7.7]), + Int32Array("int" => vec![8]), ); + assert_eq!(schema1, schema4); let flight = FlightService::new( MockQueryExec::default().with_result(Ok(QueryResponse::new(PartitionStream::new( futures::stream::iter([PartitionResponse::new( - Some(Box::pin(MemoryStream::new(vec![ + vec![ batch1.clone(), batch2.clone(), - ]))), - PartitionId::new(1), - 1, + batch3.clone(), + batch4.clone(), + ], + PartitionId::new(2), + 42, )]), )))), - IngesterId::new(), + ingester_id, 100, &metric::Registry::default(), ); @@ -412,10 +436,82 @@ mod tests { .unwrap() .into_inner() .map_err(FlightError::Tonic); - let batch_stream = FlightRecordBatchStream::new_from_flight_data(response_stream); - let batches = batch_stream.try_collect::<Vec<_>>().await.unwrap(); - assert_eq!(batches.len(), 2); - assert_eq!(batches[0], batch1); - assert_eq!(batches[1], batch2); + let flight_decoder = + FlightRecordBatchStream::new_from_flight_data(response_stream).into_inner(); + let flight_data = flight_decoder.try_collect::<Vec<_>>().await.unwrap(); + assert_eq!(flight_data.len(), 8); + + // partition info + assert_matches!(flight_data[0].payload, DecodedPayload::None); + let md_actual = + proto::IngesterQueryResponseMetadata::decode(flight_data[0].app_metadata()).unwrap(); + let md_expected = proto::IngesterQueryResponseMetadata { + partition_id: 2, + ingester_uuid: ingester_id.to_string(), + completed_persistence_count: 42, + }; + assert_eq!(md_actual, md_expected); + + // first & second chunk + match &flight_data[1].payload { + DecodedPayload::Schema(actual) => { + assert_eq!(actual, &schema1); + } + other => { + panic!("Unexpected payload: {other:?}"); + } + } + match &flight_data[2].payload { + DecodedPayload::RecordBatch(actual) => { + assert_eq!(actual, &batch1); + } + other => { + panic!("Unexpected payload: {other:?}"); + } + } + match &flight_data[3].payload { + DecodedPayload::RecordBatch(actual) => { + assert_eq!(actual, &batch2); + } + other => { + panic!("Unexpected payload: {other:?}"); + } + } + + // third chunk + match &flight_data[4].payload { + DecodedPayload::Schema(actual) => { + assert_eq!(actual, &schema3); + } + other => { + panic!("Unexpected payload: {other:?}"); + } + } + match &flight_data[5].payload { + DecodedPayload::RecordBatch(actual) => { + assert_eq!(actual, &batch3); + } + other => { + panic!("Unexpected payload: {other:?}"); + } + } + + // forth chunk + match &flight_data[6].payload { + DecodedPayload::Schema(actual) => { + assert_eq!(actual, &schema4); + } + other => { + panic!("Unexpected payload: {other:?}"); + } + } + match &flight_data[7].payload { + DecodedPayload::RecordBatch(actual) => { + assert_eq!(actual, &batch4); + } + other => { + panic!("Unexpected payload: {other:?}"); + } + } } } diff --git a/ingester/src/test_util.rs b/ingester/src/test_util.rs index 2bda8c2721..8332d691fc 100644 --- a/ingester/src/test_util.rs +++ b/ingester/src/test_util.rs @@ -223,7 +223,6 @@ macro_rules! make_partition_stream { )+ ) => {{ use arrow::datatypes::Schema; - use datafusion::physical_plan::memory::MemoryStream; use $crate::query::{response::PartitionStream, partition_response::PartitionResponse}; use futures::stream; @@ -236,10 +235,10 @@ macro_rules! make_partition_stream { batches.push(batch); schema = Schema::try_merge([schema, (*this_schema).clone()]).expect("incompatible batch schemas"); )+ + drop(schema); - let batch = MemoryStream::try_new(batches, Arc::new(schema), None).unwrap(); PartitionResponse::new( - Some(Box::pin(batch)), + batches, $id, 42, ) diff --git a/ingester/tests/write.rs b/ingester/tests/write.rs index c046fa6ba9..e7f9a20bdb 100644 --- a/ingester/tests/write.rs +++ b/ingester/tests/write.rs @@ -80,7 +80,7 @@ async fn write_query() { let hist = ctx .get_metric::<DurationHistogram, _>( "ingester_query_stream_duration", - &[("request", "complete"), ("has_error", "false")], + &[("request", "complete")], ) .fetch(); assert_eq!(hist.sample_count(), 1);
87b553fe9d61eaa535b845a848e4caa318df801f
Dom Dwyer
2023-01-23 14:53:37
WARN logs w/ endpoint for unhealthy upstream
Changes the DEBUG log event to a WARN now that it includes the endpoint to which the event applies.
null
feat: WARN logs w/ endpoint for unhealthy upstream Changes the DEBUG log event to a WARN now that it includes the endpoint to which the event applies.
diff --git a/ioxd_router/src/lib.rs b/ioxd_router/src/lib.rs index f04f27a2a9..0d2b8841f4 100644 --- a/ioxd_router/src/lib.rs +++ b/ioxd_router/src/lib.rs @@ -262,11 +262,10 @@ pub async fn create_router2_server_type( // Hack to handle multiple ingester addresses separated by commas in potentially many uses of // the CLI arg let ingester_connections = router_config.ingester_addresses.join(","); - let ingester_connections = ingester_connections.split(',').map(|s| { - let endpoint = Endpoint::from_shared(format!("http://{s}")) + let ingester_connections = ingester_connections.split(',').map(|addr| { + let endpoint = Endpoint::from_shared(format!("http://{addr}")) .expect("invalid ingester connection address"); - - LazyConnector::new(endpoint) + (LazyConnector::new(endpoint), addr) }); // Initialise the DML handler that sends writes to the ingester using the RPC write path. diff --git a/router/src/dml_handlers/rpc_write.rs b/router/src/dml_handlers/rpc_write.rs index 4b76432b1d..c45c32333b 100644 --- a/router/src/dml_handlers/rpc_write.rs +++ b/router/src/dml_handlers/rpc_write.rs @@ -17,7 +17,7 @@ use hashbrown::HashMap; use mutable_batch::MutableBatch; use mutable_batch_pb::encode::encode_write; use observability_deps::tracing::*; -use std::{fmt::Debug, time::Duration}; +use std::{fmt::Debug, sync::Arc, time::Duration}; use thiserror::Error; use trace::ctx::SpanContext; @@ -71,12 +71,17 @@ pub struct RpcWrite<C> { impl<C> RpcWrite<C> { /// Initialise a new [`RpcWrite`] that sends requests to an arbitrary /// downstream Ingester, using a round-robin strategy. - pub fn new(endpoints: impl IntoIterator<Item = C>) -> Self + pub fn new<N>(endpoints: impl IntoIterator<Item = (C, N)>) -> Self where C: Send + Sync + Debug, + N: Into<Arc<str>>, { Self { - endpoints: Balancer::new(endpoints.into_iter().map(CircuitBreakingClient::new)), + endpoints: Balancer::new( + endpoints + .into_iter() + .map(|(client, name)| CircuitBreakingClient::new(client, name.into())), + ), } } } @@ -219,7 +224,7 @@ mod tests { // Init the write handler with a mock client to capture the rpc calls. let client = Arc::new(MockWriteClient::default()); - let handler = RpcWrite::new([Arc::clone(&client)]); + let handler = RpcWrite::new([(Arc::clone(&client), "mock client")]); // Drive the RPC writer let got = handler @@ -271,7 +276,10 @@ mod tests { .with_ret([Err(RpcWriteError::Upstream(tonic::Status::internal("")))]), ); let client2 = Arc::new(MockWriteClient::default()); - let handler = RpcWrite::new([Arc::clone(&client1), Arc::clone(&client2)]); + let handler = RpcWrite::new([ + (Arc::clone(&client1), "client1"), + (Arc::clone(&client2), "client2"), + ]); // Drive the RPC writer let got = handler diff --git a/router/src/dml_handlers/rpc_write/balancer.rs b/router/src/dml_handlers/rpc_write/balancer.rs index 1ad1fc4d57..64a3644604 100644 --- a/router/src/dml_handlers/rpc_write/balancer.rs +++ b/router/src/dml_handlers/rpc_write/balancer.rs @@ -101,13 +101,15 @@ mod tests { // two returns a unhealthy state, one is healthy. let circuit_err_1 = Arc::new(MockCircuitBreaker::default()); circuit_err_1.set_usable(false); - let client_err_1 = CircuitBreakingClient::new(Arc::new(MockWriteClient::default())) - .with_circuit_breaker(Arc::clone(&circuit_err_1)); + let client_err_1 = + CircuitBreakingClient::new(Arc::new(MockWriteClient::default()), "bananas") + .with_circuit_breaker(Arc::clone(&circuit_err_1)); let circuit_err_2 = Arc::new(MockCircuitBreaker::default()); circuit_err_2.set_usable(false); - let client_err_2 = CircuitBreakingClient::new(Arc::new(MockWriteClient::default())) - .with_circuit_breaker(Arc::clone(&circuit_err_2)); + let client_err_2 = + CircuitBreakingClient::new(Arc::new(MockWriteClient::default()), "bananas") + .with_circuit_breaker(Arc::clone(&circuit_err_2)); assert_eq!(circuit_err_1.ok_count(), 0); assert_eq!(circuit_err_2.ok_count(), 0); @@ -128,17 +130,19 @@ mod tests { // two returns a unhealthy state, one is healthy. let circuit_err_1 = Arc::new(MockCircuitBreaker::default()); circuit_err_1.set_usable(false); - let client_err_1 = CircuitBreakingClient::new(Arc::new(MockWriteClient::default())) - .with_circuit_breaker(Arc::clone(&circuit_err_1)); + let client_err_1 = + CircuitBreakingClient::new(Arc::new(MockWriteClient::default()), "bananas") + .with_circuit_breaker(Arc::clone(&circuit_err_1)); let circuit_err_2 = Arc::new(MockCircuitBreaker::default()); circuit_err_2.set_usable(false); - let client_err_2 = CircuitBreakingClient::new(Arc::new(MockWriteClient::default())) - .with_circuit_breaker(Arc::clone(&circuit_err_2)); + let client_err_2 = + CircuitBreakingClient::new(Arc::new(MockWriteClient::default()), "bananas") + .with_circuit_breaker(Arc::clone(&circuit_err_2)); let circuit_ok = Arc::new(MockCircuitBreaker::default()); circuit_ok.set_usable(true); - let client_ok = CircuitBreakingClient::new(Arc::new(MockWriteClient::default())) + let client_ok = CircuitBreakingClient::new(Arc::new(MockWriteClient::default()), "bananas") .with_circuit_breaker(Arc::clone(&circuit_ok)); assert_eq!(circuit_ok.ok_count(), 0); @@ -232,18 +236,21 @@ mod tests { // two returns a healthy state, one is unhealthy. let circuit_err = Arc::new(MockCircuitBreaker::default()); circuit_err.set_usable(false); - let client_err = CircuitBreakingClient::new(Arc::new(MockWriteClient::default())) - .with_circuit_breaker(Arc::clone(&circuit_err)); + let client_err = + CircuitBreakingClient::new(Arc::new(MockWriteClient::default()), "bananas") + .with_circuit_breaker(Arc::clone(&circuit_err)); let circuit_ok_1 = Arc::new(MockCircuitBreaker::default()); circuit_ok_1.set_usable(true); - let client_ok_1 = CircuitBreakingClient::new(Arc::new(MockWriteClient::default())) - .with_circuit_breaker(Arc::clone(&circuit_ok_1)); + let client_ok_1 = + CircuitBreakingClient::new(Arc::new(MockWriteClient::default()), "bananas") + .with_circuit_breaker(Arc::clone(&circuit_ok_1)); let circuit_ok_2 = Arc::new(MockCircuitBreaker::default()); circuit_ok_2.set_usable(true); - let client_ok_2 = CircuitBreakingClient::new(Arc::new(MockWriteClient::default())) - .with_circuit_breaker(Arc::clone(&circuit_ok_2)); + let client_ok_2 = + CircuitBreakingClient::new(Arc::new(MockWriteClient::default()), "bananas") + .with_circuit_breaker(Arc::clone(&circuit_ok_2)); let balancer = Balancer::new([client_err, client_ok_1, client_ok_2]); diff --git a/router/src/dml_handlers/rpc_write/circuit_breaker.rs b/router/src/dml_handlers/rpc_write/circuit_breaker.rs index 80b0ecd05d..0da517bace 100644 --- a/router/src/dml_handlers/rpc_write/circuit_breaker.rs +++ b/router/src/dml_handlers/rpc_write/circuit_breaker.rs @@ -159,6 +159,11 @@ pub(crate) struct CircuitBreaker { /// A task to reset the request count at intervals of [`ERROR_WINDOW`]. reset_task: JoinHandle<()>, + + /// A string description of the endpoint this [`CircuitBreaker`] models. + /// + /// Used for logging context only. + endpoint: Arc<str>, } #[derive(Debug, Default)] @@ -172,8 +177,8 @@ struct ProbeState { probes_started: u64, } -impl Default for CircuitBreaker { - fn default() -> Self { +impl CircuitBreaker { + pub(crate) fn new(endpoint: impl Into<Arc<str>>) -> Self { let requests = Arc::new(RequestCounter::default()); let s = Self { requests: Arc::clone(&requests), @@ -186,13 +191,12 @@ impl Default for CircuitBreaker { reset_closed_state_counters(&requests); } }), + endpoint: endpoint.into(), }; s.set_healthy(); s } -} -impl CircuitBreaker { /// Force-set the state of the circuit breaker to "closed" / healthy. pub(crate) fn set_healthy(&self) { self.requests.set(NUM_PROBES as u32, 0); @@ -265,7 +269,7 @@ impl CircuitBreaker { match guard.probe_window_started_at { // It is time to begin probing again. Some(p) if now.duration_since(p) > PROBE_INTERVAL => { - debug!("remote unavailable, probing"); + warn!(endpoint=%self.endpoint, "remote unavailable, probing"); // It should be impossible to have allowed more than NUM_PROBES // requests through since the last time `guard.probes_started` @@ -292,7 +296,10 @@ impl CircuitBreaker { // If there have already been the configured number of probes, // do not allow more. if guard.probes_started >= NUM_PROBES { - debug!("remote unavailable, probes exhausted"); + debug!( + endpoint=%self.endpoint, + "probes exhausted" + ); return false; } } @@ -313,6 +320,7 @@ impl CircuitBreaker { debug!( nth_probe = guard.probes_started, max_probes = NUM_PROBES, + endpoint=%self.endpoint, "sending probe" ); @@ -456,7 +464,7 @@ mod tests { /// Return a new [`CircuitBreaker`] with the reset ticker disabled. fn new_no_reset() -> CircuitBreaker { - let c = CircuitBreaker::default(); + let c = CircuitBreaker::new("bananas"); c.reset_task.abort(); c } @@ -595,7 +603,7 @@ mod tests { /// ERROR_WINDOW periods from changing the circuit to open/unhealthy. #[tokio::test] async fn test_periodic_counter_reset() { - let c = CircuitBreaker::default(); + let c = CircuitBreaker::new("bananas"); // Assert the circuit breaker as healthy. assert!(c.is_healthy()); diff --git a/router/src/dml_handlers/rpc_write/circuit_breaking_client.rs b/router/src/dml_handlers/rpc_write/circuit_breaking_client.rs index 188de23c48..b756c44549 100644 --- a/router/src/dml_handlers/rpc_write/circuit_breaking_client.rs +++ b/router/src/dml_handlers/rpc_write/circuit_breaking_client.rs @@ -1,4 +1,4 @@ -use std::fmt::Debug; +use std::{fmt::Debug, sync::Arc}; use async_trait::async_trait; use generated_types::influxdata::iox::ingester::v1::WriteRequest; @@ -36,8 +36,8 @@ pub(super) struct CircuitBreakingClient<T, C = CircuitBreaker> { } impl<T> CircuitBreakingClient<T> { - pub(super) fn new(inner: T) -> Self { - let state = CircuitBreaker::default(); + pub(super) fn new(inner: T, endpoint: impl Into<Arc<str>>) -> Self { + let state = CircuitBreaker::new(endpoint); state.set_healthy(); Self { inner, state } } @@ -133,7 +133,7 @@ mod tests { #[tokio::test] async fn test_healthy() { let circuit_breaker = Arc::new(MockCircuitBreaker::default()); - let wrapper = CircuitBreakingClient::new(MockWriteClient::default()) + let wrapper = CircuitBreakingClient::new(MockWriteClient::default(), "bananas") .with_circuit_breaker(Arc::clone(&circuit_breaker)); circuit_breaker.set_usable(true); @@ -151,7 +151,7 @@ mod tests { MockWriteClient::default() .with_ret(vec![Ok(()), Err(RpcWriteError::DeletesUnsupported)]), ); - let wrapper = CircuitBreakingClient::new(Arc::clone(&mock_client)) + let wrapper = CircuitBreakingClient::new(Arc::clone(&mock_client), "bananas") .with_circuit_breaker(Arc::clone(&circuit_breaker)); assert_eq!(circuit_breaker.ok_count(), 0);
fe48a685ece5be9bb1f40f30023803cf4e542df0
Stuart Carnie
2023-03-09 09:29:20
Move InfluxQL behaviour from iox_query to new crate (#7156)
* refactor: Break unnecessary dependencies from `iox_query` crate In the process, the test code has been simplified. * refactor: Move InfluxQL plan module to iox_query_influxql crate * refactor: Move remaining behaviour from iox_query to iox_query_influxql * chore: rustfmt 🙄 I was under the impression `clippy` would catch formatting
null
refactor: Move InfluxQL behaviour from iox_query to new crate (#7156) * refactor: Break unnecessary dependencies from `iox_query` crate In the process, the test code has been simplified. * refactor: Move InfluxQL plan module to iox_query_influxql crate * refactor: Move remaining behaviour from iox_query to iox_query_influxql * chore: rustfmt 🙄 I was under the impression `clippy` would catch formatting
diff --git a/Cargo.lock b/Cargo.lock index 191d27c33a..d2b427fb73 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2946,29 +2946,23 @@ dependencies = [ "assert_matches", "async-trait", "chrono", - "chrono-tz", "data_types", "datafusion", "datafusion_util", "executor", "futures", - "generated_types", "hashbrown 0.13.2", "indexmap", - "influxdb_influxql_parser", "insta", "itertools", "object_store", "observability_deps", - "once_cell", "parking_lot 0.12.1", "parquet_file", "predicate", "query_functions", - "regex", "schema", "serde", - "serde_json", "snafu", "test_helpers", "tokio", @@ -2977,6 +2971,33 @@ dependencies = [ "workspace-hack", ] +[[package]] +name = "iox_query_influxql" +version = "0.1.0" +dependencies = [ + "arrow", + "assert_matches", + "chrono", + "chrono-tz", + "data_types", + "datafusion", + "datafusion_util", + "generated_types", + "influxdb_influxql_parser", + "insta", + "iox_query", + "itertools", + "observability_deps", + "once_cell", + "query_functions", + "regex", + "schema", + "serde", + "serde_json", + "test_helpers", + "workspace-hack", +] + [[package]] name = "iox_tests" version = "0.1.0" @@ -5073,6 +5094,7 @@ dependencies = [ "datafusion", "flightsql", "iox_query", + "iox_query_influxql", "metric", "parking_lot 0.12.1", "predicate", diff --git a/Cargo.toml b/Cargo.toml index 0236616fde..c4c116454f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,6 +33,7 @@ members = [ "iox_catalog", "iox_data_generator", "iox_query", + "iox_query_influxql", "iox_tests", "iox_time", "ioxd_common", diff --git a/iox_query/Cargo.toml b/iox_query/Cargo.toml index dd90967e42..0d72ee2376 100644 --- a/iox_query/Cargo.toml +++ b/iox_query/Cargo.toml @@ -19,26 +19,20 @@ arrow = { workspace = true, features = ["prettyprint"] } arrow_util = { path = "../arrow_util" } async-trait = "0.1" chrono = { version = "0.4", default-features = false } -chrono-tz = { version = "0.8" } data_types = { path = "../data_types" } datafusion = { workspace = true } datafusion_util = { path = "../datafusion_util" } executor = { path = "../executor"} futures = "0.3" -generated_types = { path = "../generated_types" } hashbrown = { workspace = true } indexmap = { version = "1.9", features = ["std"] } -influxdb_influxql_parser = { path = "../influxdb_influxql_parser" } itertools = "0.10.5" -once_cell = "1" object_store = "0.5.5" observability_deps = { path = "../observability_deps" } parking_lot = "0.12" parquet_file = { path = "../parquet_file" } query_functions = { path = "../query_functions"} -regex = "1" schema = { path = "../schema" } -serde_json = "1.0.93" snafu = "0.7" tokio = { version = "1.26", features = ["macros", "parking_lot"] } tokio-stream = "0.1" diff --git a/iox_query/src/frontend.rs b/iox_query/src/frontend.rs index a12c80bfd5..05f56e15f0 100644 --- a/iox_query/src/frontend.rs +++ b/iox_query/src/frontend.rs @@ -1,5 +1,4 @@ pub mod common; -pub mod influxql; pub mod influxrpc; pub mod reorg; pub mod sql; diff --git a/iox_query/src/plan.rs b/iox_query/src/plan.rs index af576a8266..693ff90cdc 100644 --- a/iox_query/src/plan.rs +++ b/iox_query/src/plan.rs @@ -1,4 +1,3 @@ pub mod fieldlist; -pub mod influxql; pub mod seriesset; pub mod stringset; diff --git a/iox_query/src/plan/influxql/test_utils.rs b/iox_query/src/plan/influxql/test_utils.rs deleted file mode 100644 index 9971ddb44d..0000000000 --- a/iox_query/src/plan/influxql/test_utils.rs +++ /dev/null @@ -1,217 +0,0 @@ -//! APIs for testing. -#![cfg(test)] - -use crate::plan::influxql::SchemaProvider; -use crate::test::{TestChunk, TestDatabase}; -use crate::QueryChunkMeta; -use datafusion::common::DataFusionError; -use datafusion::datasource::empty::EmptyTable; -use datafusion::datasource::provider_as_source; -use datafusion::logical_expr::TableSource; -use influxdb_influxql_parser::parse_statements; -use influxdb_influxql_parser::select::{Field, SelectStatement}; -use influxdb_influxql_parser::statement::Statement; -use predicate::rpc_predicate::QueryNamespaceMeta; -use schema::Schema; -use std::collections::HashMap; -use std::sync::Arc; - -/// Returns the first `Field` of the `SELECT` statement. -pub(crate) fn get_first_field(s: &str) -> Field { - parse_select(s).fields.head().unwrap().clone() -} - -/// Returns the InfluxQL [`SelectStatement`] for the specified SQL, `s`. -pub(crate) fn parse_select(s: &str) -> SelectStatement { - let statements = parse_statements(s).unwrap(); - match statements.first() { - Some(Statement::Select(sel)) => *sel.clone(), - _ => panic!("expected SELECT statement"), - } -} - -/// Module which provides a test database and schema for InfluxQL tests. -pub(crate) mod database { - use super::*; - - /// Return a set of chunks that make up the test database. - /// - /// ## NOTE - /// The chunks returned by this function start numbering their - /// IDs from 1,000,000. A caller may wish to merge additional chunks - /// by using IDs less than 1,000,000. - pub(crate) fn chunks() -> Vec<Arc<TestChunk>> { - let mut chunk_id = 1_000_000; - let mut next_chunk_id = || { - chunk_id += 1; - chunk_id - }; - - vec![ - Arc::new( - TestChunk::new("cpu") - .with_id(next_chunk_id()) - .with_quiet() - .with_time_column() - .with_tag_column("host") - .with_tag_column("region") - .with_tag_column("cpu") - .with_f64_field_column("usage_user") - .with_f64_field_column("usage_system") - .with_f64_field_column("usage_idle") - .with_one_row_of_data(), - ), - Arc::new( - TestChunk::new("disk") - .with_id(next_chunk_id()) - .with_quiet() - .with_time_column() - .with_tag_column("host") - .with_tag_column("region") - .with_tag_column("device") - .with_i64_field_column("bytes_used") - .with_i64_field_column("bytes_free") - .with_one_row_of_data(), - ), - Arc::new( - TestChunk::new("diskio") - .with_id(next_chunk_id()) - .with_quiet() - .with_time_column() - .with_tag_column("host") - .with_tag_column("region") - .with_tag_column("status") - .with_i64_field_column("bytes_read") - .with_i64_field_column("bytes_written") - .with_f64_field_column("read_utilization") - .with_f64_field_column("write_utilization") - .with_bool_field_column("is_local") - .with_one_row_of_data(), - ), - // Schemas for testing merged schemas - Arc::new( - TestChunk::new("temp_01") - .with_id(next_chunk_id()) - .with_quiet() - .with_time_column() - .with_tag_column("shared_tag0") - .with_tag_column("shared_tag1") - .with_f64_field_column("shared_field0") - .with_f64_field_column("field_f64") - .with_i64_field_column("field_i64") - .with_u64_field_column_no_stats("field_u64") - .with_string_field_column_with_stats("field_str", None, None) - .with_one_row_of_data(), - ), - Arc::new( - TestChunk::new("temp_02") - .with_id(next_chunk_id()) - .with_quiet() - .with_time_column() - .with_tag_column("shared_tag0") - .with_tag_column("shared_tag1") - .with_i64_field_column("shared_field0") - .with_one_row_of_data(), - ), - Arc::new( - TestChunk::new("temp_03") - .with_id(next_chunk_id()) - .with_quiet() - .with_time_column() - .with_tag_column("shared_tag0") - .with_tag_column("shared_tag1") - .with_string_field_column_with_stats("shared_field0", None, None) - .with_one_row_of_data(), - ), - // Schemas for testing clashing column names when merging across measurements - Arc::new( - TestChunk::new("merge_00") - .with_id(next_chunk_id()) - .with_quiet() - .with_time_column() - .with_tag_column("col0") - .with_f64_field_column("col1") - .with_bool_field_column("col2") - .with_string_field_column_with_stats("col3", None, None) - .with_one_row_of_data(), - ), - Arc::new( - TestChunk::new("merge_01") - .with_id(next_chunk_id()) - .with_quiet() - .with_time_column() - .with_tag_column("col1") - .with_f64_field_column("col0") - .with_bool_field_column("col3") - .with_string_field_column_with_stats("col2", None, None) - .with_one_row_of_data(), - ), - ] - } -} - -pub(crate) struct MockSchemaProvider { - chunks: Vec<Arc<TestChunk>>, -} - -impl Default for MockSchemaProvider { - fn default() -> Self { - let chunks = database::chunks(); - Self { chunks } - } -} - -impl SchemaProvider for MockSchemaProvider { - fn get_table_provider( - &self, - _name: &str, - ) -> crate::exec::context::Result<Arc<dyn TableSource>> { - unimplemented!() - } - - fn table_names(&self) -> Vec<&'_ str> { - self.chunks.iter().map(|x| x.table_name()).collect() - } - - fn table_schema(&self, name: &str) -> Option<Schema> { - let c = self.chunks.iter().find(|x| x.table_name() == name)?; - Some(c.schema().clone()) - } -} - -pub(crate) struct TestDatabaseAdapter { - tables: HashMap<String, (Arc<dyn TableSource>, Schema)>, -} - -impl SchemaProvider for TestDatabaseAdapter { - fn get_table_provider(&self, name: &str) -> crate::exec::context::Result<Arc<dyn TableSource>> { - self.tables - .get(name) - .map(|(t, _)| Arc::clone(t)) - .ok_or_else(|| DataFusionError::Plan(format!("measurement does not exist: {name}"))) - } - - fn table_names(&self) -> Vec<&'_ str> { - self.tables.keys().map(|k| k.as_str()).collect::<Vec<_>>() - } - - fn table_schema(&self, name: &str) -> Option<Schema> { - self.tables.get(name).map(|(_, s)| s.clone()) - } -} - -impl TestDatabaseAdapter { - pub(crate) fn new(db: &TestDatabase) -> Self { - let table_names = db.table_names(); - let mut res = Self { - tables: HashMap::with_capacity(table_names.len()), - }; - for table in table_names { - let schema = db.table_schema(&table).unwrap(); - let s = Arc::new(EmptyTable::new(schema.as_arrow())); - res.tables.insert(table, (provider_as_source(s), schema)); - } - - res - } -} diff --git a/iox_query_influxql/Cargo.toml b/iox_query_influxql/Cargo.toml new file mode 100644 index 0000000000..af95837ebf --- /dev/null +++ b/iox_query_influxql/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "iox_query_influxql" +version.workspace = true +authors.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +arrow = { workspace = true, features = ["prettyprint"] } +chrono = { version = "0.4", default-features = false } +chrono-tz = { version = "0.8" } +data_types = { path = "../data_types" } +datafusion = { workspace = true } +datafusion_util = { path = "../datafusion_util" } +generated_types = { path = "../generated_types" } +influxdb_influxql_parser = { path = "../influxdb_influxql_parser" } +iox_query = { path = "../iox_query" } +itertools = "0.10.5" +observability_deps = { path = "../observability_deps" } +once_cell = "1" +query_functions = { path = "../query_functions"} +regex = "1" +schema = { path = "../schema" } +serde_json = "1.0.93" +workspace-hack = { version = "0.1", path = "../workspace-hack" } + +[dev-dependencies] # In alphabetical order +test_helpers = { path = "../test_helpers" } +assert_matches = "1" +insta = { version = "1", features = ["yaml"] } +serde = { version = "1.0", features = ["derive"] } diff --git a/iox_query_influxql/src/frontend/mod.rs b/iox_query_influxql/src/frontend/mod.rs new file mode 100644 index 0000000000..5e480858b4 --- /dev/null +++ b/iox_query_influxql/src/frontend/mod.rs @@ -0,0 +1 @@ +pub mod planner; diff --git a/iox_query/src/frontend/influxql.rs b/iox_query_influxql/src/frontend/planner.rs similarity index 96% rename from iox_query/src/frontend/influxql.rs rename to iox_query_influxql/src/frontend/planner.rs index 5eb70f28fb..b95fc64a30 100644 --- a/iox_query/src/frontend/influxql.rs +++ b/iox_query_influxql/src/frontend/planner.rs @@ -6,9 +6,7 @@ use std::fmt::Debug; use std::ops::Deref; use std::sync::Arc; -use crate::exec::context::IOxSessionContext; -use crate::plan::influxql; -use crate::plan::influxql::{InfluxQLToLogicalPlan, SchemaProvider}; +use crate::plan::{parse_regex, InfluxQLToLogicalPlan, SchemaProvider}; use datafusion::common::Statistics; use datafusion::datasource::provider_as_source; use datafusion::execution::context::TaskContext; @@ -23,6 +21,7 @@ use influxdb_influxql_parser::common::MeasurementName; use influxdb_influxql_parser::parse_statements; use influxdb_influxql_parser::statement::Statement; use influxdb_influxql_parser::visit::{Visitable, Visitor}; +use iox_query::exec::IOxSessionContext; use observability_deps::tracing::debug; use schema::Schema; @@ -106,7 +105,7 @@ impl ExecutionPlan for SchemaExec { } } -/// This struct can create plans for running SQL queries against databases +/// Create plans for running InfluxQL queries against databases #[derive(Debug, Default)] pub struct InfluxQLQueryPlanner {} @@ -122,11 +121,10 @@ impl InfluxQLQueryPlanner { query: &str, ctx: &IOxSessionContext, ) -> Result<Arc<dyn ExecutionPlan>> { - let ctx = ctx.child_ctx("query"); debug!(text=%query, "planning InfluxQL query"); let statement = self.query_to_statement(query)?; - let logical_plan = self.statement_to_plan(statement, &ctx).await?; + let logical_plan = self.statement_to_plan(statement, ctx).await?; let input = ctx.create_physical_plan(&logical_plan).await?; @@ -225,7 +223,7 @@ fn find_all_measurements(stmt: &Statement, tables: &[String]) -> Result<HashSet< } } MeasurementName::Regex(re) => { - let re = influxql::parse_regex(re)?; + let re = parse_regex(re)?; self.1 .iter() diff --git a/iox_query_influxql/src/lib.rs b/iox_query_influxql/src/lib.rs new file mode 100644 index 0000000000..dc53de2c64 --- /dev/null +++ b/iox_query_influxql/src/lib.rs @@ -0,0 +1,14 @@ +//! Contains the IOx InfluxQL query planner +#![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)] +#![warn( + missing_debug_implementations, + clippy::explicit_iter_loop, + clippy::use_self, + clippy::clone_on_ref_ptr, + clippy::future_not_send, + clippy::todo, + clippy::dbg_macro +)] + +pub mod frontend; +pub mod plan; diff --git a/iox_query/src/plan/influxql/expr_type_evaluator.rs b/iox_query_influxql/src/plan/expr_type_evaluator.rs similarity index 97% rename from iox_query/src/plan/influxql/expr_type_evaluator.rs rename to iox_query_influxql/src/plan/expr_type_evaluator.rs index 89bc51ba25..60b0ed7958 100644 --- a/iox_query/src/plan/influxql/expr_type_evaluator.rs +++ b/iox_query_influxql/src/plan/expr_type_evaluator.rs @@ -1,6 +1,6 @@ -use crate::plan::influxql::field::field_by_name; -use crate::plan::influxql::field_mapper::map_type; -use crate::plan::influxql::SchemaProvider; +use crate::plan::field::field_by_name; +use crate::plan::field_mapper::map_type; +use crate::plan::SchemaProvider; use datafusion::common::{DataFusionError, Result}; use influxdb_influxql_parser::common::{MeasurementName, QualifiedMeasurementName}; use influxdb_influxql_parser::expression::{Expr, VarRefDataType}; @@ -148,8 +148,8 @@ impl<'a> TypeEvaluator<'a> { #[cfg(test)] mod test { - use crate::plan::influxql::expr_type_evaluator::evaluate_type; - use crate::plan::influxql::test_utils::{parse_select, MockSchemaProvider}; + use crate::plan::expr_type_evaluator::evaluate_type; + use crate::plan::test_utils::{parse_select, MockSchemaProvider}; use assert_matches::assert_matches; use influxdb_influxql_parser::expression::VarRefDataType; diff --git a/iox_query/src/plan/influxql/field.rs b/iox_query_influxql/src/plan/field.rs similarity index 97% rename from iox_query/src/plan/influxql/field.rs rename to iox_query_influxql/src/plan/field.rs index 678fc32647..add93810c2 100644 --- a/iox_query/src/plan/influxql/field.rs +++ b/iox_query_influxql/src/plan/field.rs @@ -84,8 +84,8 @@ fn binary_expr_name(expr: &Expr) -> String { #[cfg(test)] mod test { - use crate::plan::influxql::field::{field_by_name, field_name}; - use crate::plan::influxql::test_utils::{get_first_field, parse_select}; + use crate::plan::field::{field_by_name, field_name}; + use crate::plan::test_utils::{get_first_field, parse_select}; use assert_matches::assert_matches; #[test] diff --git a/iox_query/src/plan/influxql/field_mapper.rs b/iox_query_influxql/src/plan/field_mapper.rs similarity index 94% rename from iox_query/src/plan/influxql/field_mapper.rs rename to iox_query_influxql/src/plan/field_mapper.rs index d50e387df2..751b315b4c 100644 --- a/iox_query/src/plan/influxql/field_mapper.rs +++ b/iox_query_influxql/src/plan/field_mapper.rs @@ -1,7 +1,7 @@ #![allow(dead_code)] -use crate::plan::influxql::var_ref::field_type_to_var_ref_data_type; -use crate::plan::influxql::SchemaProvider; +use crate::plan::var_ref::field_type_to_var_ref_data_type; +use crate::plan::SchemaProvider; use datafusion::common::Result; use influxdb_influxql_parser::expression::VarRefDataType; use schema::InfluxColumnType; @@ -51,7 +51,7 @@ pub(crate) fn map_type( #[cfg(test)] mod test { use super::*; - use crate::plan::influxql::test_utils::MockSchemaProvider; + use crate::plan::test_utils::MockSchemaProvider; use assert_matches::assert_matches; #[test] diff --git a/iox_query/src/plan/influxql.rs b/iox_query_influxql/src/plan/mod.rs similarity index 89% rename from iox_query/src/plan/influxql.rs rename to iox_query_influxql/src/plan/mod.rs index f1cf7f7cef..d40197a594 100644 --- a/iox_query/src/plan/influxql.rs +++ b/iox_query_influxql/src/plan/mod.rs @@ -12,4 +12,4 @@ mod var_ref; pub use planner::InfluxQLToLogicalPlan; pub use planner::SchemaProvider; -pub use util::parse_regex; +pub(crate) use util::parse_regex; diff --git a/iox_query/src/plan/influxql/planner.rs b/iox_query_influxql/src/plan/planner.rs similarity index 97% rename from iox_query/src/plan/influxql/planner.rs rename to iox_query_influxql/src/plan/planner.rs index 24bfa1b373..1ab406f9f5 100644 --- a/iox_query/src/plan/influxql/planner.rs +++ b/iox_query_influxql/src/plan/planner.rs @@ -1,13 +1,10 @@ -use crate::plan::influxql::planner_rewrite_expression::{rewrite_conditional, rewrite_expr}; -use crate::plan::influxql::planner_time_range_expression::time_range_to_df_expr; -use crate::plan::influxql::rewriter::rewrite_statement; -use crate::plan::influxql::util::{binary_operator_to_df_operator, Schemas}; -use crate::plan::influxql::var_ref::{ - column_type_to_var_ref_data_type, var_ref_data_type_to_data_type, -}; -use crate::DataFusionError; +use crate::plan::planner_rewrite_expression::{rewrite_conditional, rewrite_expr}; +use crate::plan::planner_time_range_expression::time_range_to_df_expr; +use crate::plan::rewriter::rewrite_statement; +use crate::plan::util::{binary_operator_to_df_operator, Schemas}; +use crate::plan::var_ref::{column_type_to_var_ref_data_type, var_ref_data_type_to_data_type}; use arrow::datatypes::DataType; -use datafusion::common::{DFSchema, DFSchemaRef, Result, ScalarValue, ToDFSchema}; +use datafusion::common::{DFSchema, DFSchemaRef, DataFusionError, Result, ScalarValue, ToDFSchema}; use datafusion::logical_expr::expr_rewriter::{normalize_col, ExprRewritable, ExprRewriter}; use datafusion::logical_expr::logical_plan::builder::project; use datafusion::logical_expr::logical_plan::Analyze; @@ -1020,63 +1017,44 @@ fn find_expr(cond: &ConditionalExpression) -> Result<&IQLExpr> { #[cfg(test)] mod test { use super::*; - use crate::exec::Executor; - use crate::plan::influxql::test_utils; - use crate::plan::influxql::test_utils::{parse_select, TestDatabaseAdapter}; - use crate::test::{TestChunk, TestDatabase}; + use crate::plan::test_utils::{parse_select, MockSchemaProvider}; use influxdb_influxql_parser::parse_statements; use insta::assert_snapshot; + use schema::SchemaBuilder; fn logical_plan(sql: &str) -> Result<LogicalPlan> { let mut statements = parse_statements(sql).unwrap(); - // index of columns in the above chunk: [bar, foo, i64_field, i64_field_2, time] - let executor = Arc::new(Executor::new_testing()); - let test_db = TestDatabase::new(Arc::clone(&executor)); - test_db.add_chunk( - "my_partition_key", - Arc::new( - TestChunk::new("data") - .with_quiet() - .with_id(0) - .with_tag_column("foo") - .with_tag_column("bar") - .with_f64_field_column("f64_field") - .with_f64_field_column("mixedCase") - .with_f64_field_column("with space") - .with_i64_field_column("i64_field") - .with_string_field_column_with_stats("str_field", None, None) - .with_bool_field_column("bool_field") - // InfluxQL is case sensitive - .with_bool_field_column("TIME") - .with_time_column() - .with_one_row_of_data(), - ), - ); - - // Table with tags and all field types - test_db.add_chunk( - "my_partition_key", - Arc::new( - TestChunk::new("all_types") - .with_quiet() - .with_id(1) - .with_tag_column("tag0") - .with_tag_column("tag1") - .with_f64_field_column("f64_field") - .with_i64_field_column("i64_field") - .with_string_field_column_with_stats("str_field", None, None) - .with_bool_field_column("bool_field") - .with_u64_field_column_no_stats("u64_field") - .with_time_column() - .with_one_row_of_data(), - ), - ); - - test_utils::database::chunks().iter().for_each(|c| { - test_db.add_chunk("my_partition_key", Arc::clone(c)); - }); - - let sp = TestDatabaseAdapter::new(&test_db); + let mut sp = MockSchemaProvider::default(); + sp.add_schemas(vec![ + SchemaBuilder::new() + .measurement("data") + .timestamp() + .tag("foo") + .tag("bar") + .influx_field("f64_field", InfluxFieldType::Float) + .influx_field("mixedCase", InfluxFieldType::Float) + .influx_field("with space", InfluxFieldType::Float) + .influx_field("i64_field", InfluxFieldType::Integer) + .influx_field("str_field", InfluxFieldType::String) + .influx_field("bool_field", InfluxFieldType::Boolean) + // InfluxQL is case sensitive + .influx_field("TIME", InfluxFieldType::Boolean) + .build() + .unwrap(), + // Table with tags and all field types + SchemaBuilder::new() + .measurement("all_types") + .timestamp() + .tag("tag0") + .tag("tag1") + .influx_field("f64_field", InfluxFieldType::Float) + .influx_field("i64_field", InfluxFieldType::Integer) + .influx_field("str_field", InfluxFieldType::String) + .influx_field("bool_field", InfluxFieldType::Boolean) + .influx_field("u64_field", InfluxFieldType::UInteger) + .build() + .unwrap(), + ]); let planner = InfluxQLToLogicalPlan::new(&sp); diff --git a/iox_query/src/plan/influxql/planner_rewrite_expression.rs b/iox_query_influxql/src/plan/planner_rewrite_expression.rs similarity index 99% rename from iox_query/src/plan/influxql/planner_rewrite_expression.rs rename to iox_query_influxql/src/plan/planner_rewrite_expression.rs index 781d89a6c0..1e3bf8f1e6 100644 --- a/iox_query/src/plan/influxql/planner_rewrite_expression.rs +++ b/iox_query_influxql/src/plan/planner_rewrite_expression.rs @@ -121,7 +121,7 @@ //! [`Reduce`]: https://github.com/influxdata/influxql/blob/1ba470371ec093d57a726b143fe6ccbacf1b452b/ast.go#L4850-L4852 //! [`EvalBool`]: https://github.com/influxdata/influxql/blob/1ba470371ec093d57a726b143fe6ccbacf1b452b/ast.go#L4181-L4183 //! [`Eval`]: https://github.com/influxdata/influxql/blob/1ba470371ec093d57a726b143fe6ccbacf1b452b/ast.go#L4137 -use crate::plan::influxql::util::Schemas; +use crate::plan::util::Schemas; use arrow::datatypes::DataType; use datafusion::common::{Result, ScalarValue}; use datafusion::logical_expr::expr_rewriter::{ExprRewritable, ExprRewriter}; @@ -130,7 +130,7 @@ use datafusion::logical_expr::{ }; /// Rewrite the expression tree and return a boolean result. -pub(super) fn rewrite_conditional(expr: Expr, schemas: &Schemas) -> Result<Expr> { +pub(in crate::plan) fn rewrite_conditional(expr: Expr, schemas: &Schemas) -> Result<Expr> { let expr = expr.rewrite(&mut RewriteAndCoerce { schemas })?; Ok(match expr { Expr::Literal(ScalarValue::Null) => lit(false), @@ -140,7 +140,7 @@ pub(super) fn rewrite_conditional(expr: Expr, schemas: &Schemas) -> Result<Expr> /// Rewrite the expression tree and return a result or `NULL` if some of the operands are /// incompatible. -pub(super) fn rewrite_expr(expr: Expr, schemas: &Schemas) -> Result<Expr> { +pub(in crate::plan) fn rewrite_expr(expr: Expr, schemas: &Schemas) -> Result<Expr> { expr.rewrite(&mut RewriteAndCoerce { schemas }) } diff --git a/iox_query/src/plan/influxql/planner_time_range_expression.rs b/iox_query_influxql/src/plan/planner_time_range_expression.rs similarity index 99% rename from iox_query/src/plan/influxql/planner_time_range_expression.rs rename to iox_query_influxql/src/plan/planner_time_range_expression.rs index 36e33d8df5..573dbfb385 100644 --- a/iox_query/src/plan/influxql/planner_time_range_expression.rs +++ b/iox_query_influxql/src/plan/planner_time_range_expression.rs @@ -1,5 +1,5 @@ -use crate::plan::influxql::timestamp::parse_timestamp; -use crate::plan::influxql::util::binary_operator_to_df_operator; +use crate::plan::timestamp::parse_timestamp; +use crate::plan::util::binary_operator_to_df_operator; use datafusion::common::{DataFusionError, Result, ScalarValue}; use datafusion::logical_expr::{binary_expr, lit, now, BinaryExpr, Expr as DFExpr, Operator}; use influxdb_influxql_parser::expression::BinaryOperator; @@ -40,7 +40,7 @@ type ExprResult = Result<DFExpr>; /// [`Reduce`]: https://github.com/influxdata/influxql/blob/1ba470371ec093d57a726b143fe6ccbacf1b452b/ast.go#L4850-L4852 /// [conditionExpr]: https://github.com/influxdata/influxql/blob/1ba470371ec093d57a726b143fe6ccbacf1b452b/ast.go#L5751-L5756 /// [`TZ`]: https://docs.influxdata.com/influxdb/v1.8/query_language/explore-data/#the-time-zone-clause -pub(super) fn time_range_to_df_expr(expr: &Expr, tz: Option<chrono_tz::Tz>) -> ExprResult { +pub(in crate::plan) fn time_range_to_df_expr(expr: &Expr, tz: Option<chrono_tz::Tz>) -> ExprResult { let df_expr = reduce_expr(expr, tz)?; // Attempt to coerce the final expression into a timestamp diff --git a/iox_query/src/plan/influxql/rewriter.rs b/iox_query_influxql/src/plan/rewriter.rs similarity index 98% rename from iox_query/src/plan/influxql/rewriter.rs rename to iox_query_influxql/src/plan/rewriter.rs index bd25cace33..a49ef51ba9 100644 --- a/iox_query/src/plan/influxql/rewriter.rs +++ b/iox_query_influxql/src/plan/rewriter.rs @@ -1,9 +1,9 @@ #![allow(dead_code)] -use crate::plan::influxql::expr_type_evaluator::evaluate_type; -use crate::plan::influxql::field::field_name; -use crate::plan::influxql::field_mapper::{field_and_dimensions, FieldTypeMap, TagSet}; -use crate::plan::influxql::{util, SchemaProvider}; +use crate::plan::expr_type_evaluator::evaluate_type; +use crate::plan::field::field_name; +use crate::plan::field_mapper::{field_and_dimensions, FieldTypeMap, TagSet}; +use crate::plan::{util, SchemaProvider}; use datafusion::common::{DataFusionError, Result}; use influxdb_influxql_parser::common::{MeasurementName, QualifiedMeasurementName}; use influxdb_influxql_parser::expression::walk::{walk_expr, walk_expr_mut}; @@ -490,8 +490,8 @@ pub(crate) fn rewrite_statement( #[cfg(test)] mod test { - use crate::plan::influxql::rewriter::{has_wildcards, rewrite_statement}; - use crate::plan::influxql::test_utils::{parse_select, MockSchemaProvider}; + use crate::plan::rewriter::{has_wildcards, rewrite_statement}; + use crate::plan::test_utils::{parse_select, MockSchemaProvider}; use test_helpers::assert_contains; #[test] diff --git a/iox_query_influxql/src/plan/test_utils.rs b/iox_query_influxql/src/plan/test_utils.rs new file mode 100644 index 0000000000..45e22ac9d6 --- /dev/null +++ b/iox_query_influxql/src/plan/test_utils.rs @@ -0,0 +1,173 @@ +//! APIs for testing. +#![cfg(test)] + +use crate::plan::SchemaProvider; +use datafusion::common::{DataFusionError, Result as DataFusionResult}; +use datafusion::datasource::empty::EmptyTable; +use datafusion::datasource::provider_as_source; +use datafusion::logical_expr::TableSource; +use influxdb_influxql_parser::parse_statements; +use influxdb_influxql_parser::select::{Field, SelectStatement}; +use influxdb_influxql_parser::statement::Statement; +use itertools::Itertools; +use schema::{Schema, SchemaBuilder}; +use std::collections::HashMap; +use std::sync::Arc; + +/// Returns the first `Field` of the `SELECT` statement. +pub(crate) fn get_first_field(s: &str) -> Field { + parse_select(s).fields.head().unwrap().clone() +} + +/// Returns the InfluxQL [`SelectStatement`] for the specified SQL, `s`. +pub(crate) fn parse_select(s: &str) -> SelectStatement { + let statements = parse_statements(s).unwrap(); + match statements.first() { + Some(Statement::Select(sel)) => *sel.clone(), + _ => panic!("expected SELECT statement"), + } +} + +/// Module which provides a test database and schema for InfluxQL tests. +pub(crate) mod database { + use super::*; + use schema::InfluxFieldType; + + /// Return a set of schemas that make up the test database. + pub(crate) fn schemas() -> Vec<Schema> { + vec![ + SchemaBuilder::new() + .measurement("cpu") + .timestamp() + .tag("host") + .tag("region") + .tag("cpu") + .influx_field("usage_user", InfluxFieldType::Float) + .influx_field("usage_system", InfluxFieldType::Float) + .influx_field("usage_idle", InfluxFieldType::Float) + .build() + .unwrap(), + SchemaBuilder::new() + .measurement("disk") + .timestamp() + .tag("host") + .tag("region") + .tag("device") + .influx_field("bytes_used", InfluxFieldType::Integer) + .influx_field("bytes_free", InfluxFieldType::Integer) + .build() + .unwrap(), + SchemaBuilder::new() + .measurement("diskio") + .timestamp() + .tag("host") + .tag("region") + .tag("status") + .influx_field("bytes_read", InfluxFieldType::Integer) + .influx_field("bytes_written", InfluxFieldType::Integer) + .influx_field("read_utilization", InfluxFieldType::Float) + .influx_field("write_utilization", InfluxFieldType::Float) + .influx_field("is_local", InfluxFieldType::Boolean) + .build() + .unwrap(), + // Schemas for testing merged schemas + SchemaBuilder::new() + .measurement("temp_01") + .timestamp() + .tag("shared_tag0") + .tag("shared_tag1") + .influx_field("shared_field0", InfluxFieldType::Float) + .influx_field("field_f64", InfluxFieldType::Float) + .influx_field("field_i64", InfluxFieldType::Integer) + .influx_field("field_u64", InfluxFieldType::UInteger) + .influx_field("field_str", InfluxFieldType::String) + .build() + .unwrap(), + SchemaBuilder::new() + .measurement("temp_02") + .timestamp() + .tag("shared_tag0") + .tag("shared_tag1") + .influx_field("shared_field0", InfluxFieldType::Integer) + .build() + .unwrap(), + SchemaBuilder::new() + .measurement("temp_03") + .timestamp() + .tag("shared_tag0") + .tag("shared_tag1") + .influx_field("shared_field0", InfluxFieldType::String) + .build() + .unwrap(), + // Schemas for testing clashing column names when merging across measurements + SchemaBuilder::new() + .measurement("merge_00") + .timestamp() + .tag("col0") + .influx_field("col1", InfluxFieldType::Float) + .influx_field("col2", InfluxFieldType::Boolean) + .influx_field("col3", InfluxFieldType::String) + .build() + .unwrap(), + SchemaBuilder::new() + .measurement("merge_01") + .timestamp() + .tag("col1") + .influx_field("col0", InfluxFieldType::Float) + .influx_field("col3", InfluxFieldType::Boolean) + .influx_field("col2", InfluxFieldType::String) + .build() + .unwrap(), + ] + } +} + +pub(crate) struct MockSchemaProvider { + tables: HashMap<String, (Arc<dyn TableSource>, Schema)>, +} + +impl Default for MockSchemaProvider { + fn default() -> Self { + let mut res = Self { + tables: HashMap::new(), + }; + res.add_schemas(database::schemas()); + res + } +} + +impl MockSchemaProvider { + pub(crate) fn add_schema(&mut self, schema: Schema) { + let schema = schema.sort_fields_by_name(); + + let table_name = schema.measurement().unwrap().clone(); + let s = Arc::new(EmptyTable::new(schema.as_arrow())); + self.tables + .insert(table_name, (provider_as_source(s), schema)); + } + + pub(crate) fn add_schemas(&mut self, schemas: impl IntoIterator<Item = Schema>) { + schemas.into_iter().for_each(|s| self.add_schema(s)); + } +} + +impl SchemaProvider for MockSchemaProvider { + fn get_table_provider(&self, name: &str) -> DataFusionResult<Arc<dyn TableSource>> { + self.tables + .get(name) + .map(|(t, _)| Arc::clone(t)) + .ok_or_else(|| DataFusionError::Plan(format!("measurement does not exist: {name}"))) + } + + fn table_names(&self) -> Vec<&'_ str> { + self.tables + .keys() + .map(|k| k.as_str()) + .sorted() + .collect::<Vec<_>>() + } + + fn table_schema(&self, name: &str) -> Option<Schema> { + self.tables.get(name).map(|(_, s)| s.clone()) + } +} diff --git a/iox_query/src/plan/influxql/timestamp.rs b/iox_query_influxql/src/plan/timestamp.rs similarity index 100% rename from iox_query/src/plan/influxql/timestamp.rs rename to iox_query_influxql/src/plan/timestamp.rs diff --git a/iox_query/src/plan/influxql/util.rs b/iox_query_influxql/src/plan/util.rs similarity index 78% rename from iox_query/src/plan/influxql/util.rs rename to iox_query_influxql/src/plan/util.rs index d23e84f989..d2d1871a03 100644 --- a/iox_query/src/plan/influxql/util.rs +++ b/iox_query_influxql/src/plan/util.rs @@ -6,7 +6,7 @@ use query_functions::clean_non_meta_escapes; use schema::Schema; use std::sync::Arc; -pub(super) fn binary_operator_to_df_operator(op: BinaryOperator) -> Operator { +pub(in crate::plan) fn binary_operator_to_df_operator(op: BinaryOperator) -> Operator { match op { BinaryOperator::Add => Operator::Plus, BinaryOperator::Sub => Operator::Minus, @@ -20,7 +20,7 @@ pub(super) fn binary_operator_to_df_operator(op: BinaryOperator) -> Operator { } /// Return the IOx schema for the specified DataFusion schema. -pub(super) fn schema_from_df(schema: &DFSchema) -> Result<Schema> { +pub(in crate::plan) fn schema_from_df(schema: &DFSchema) -> Result<Schema> { let s: Arc<arrow::datatypes::Schema> = Arc::new(schema.into()); s.try_into().map_err(|err| { DataFusionError::Internal(format!( @@ -30,13 +30,13 @@ pub(super) fn schema_from_df(schema: &DFSchema) -> Result<Schema> { } /// Container for both the DataFusion and equivalent IOx schema. -pub(super) struct Schemas { - pub(super) df_schema: DFSchemaRef, - pub(super) iox_schema: Schema, +pub(in crate::plan) struct Schemas { + pub(in crate::plan) df_schema: DFSchemaRef, + pub(in crate::plan) iox_schema: Schema, } impl Schemas { - pub(super) fn new(df_schema: &DFSchemaRef) -> Result<Self> { + pub(in crate::plan) fn new(df_schema: &DFSchemaRef) -> Result<Self> { Ok(Self { df_schema: Arc::clone(df_schema), iox_schema: schema_from_df(df_schema)?, @@ -45,7 +45,7 @@ impl Schemas { } /// Sanitize an InfluxQL regular expression and create a compiled [`regex::Regex`]. -pub fn parse_regex(re: &Regex) -> Result<regex::Regex> { +pub(crate) fn parse_regex(re: &Regex) -> Result<regex::Regex> { let pattern = clean_non_meta_escapes(re.as_str()); regex::Regex::new(&pattern).map_err(|e| { DataFusionError::External(format!("invalid regular expression '{re}': {e}").into()) diff --git a/iox_query/src/plan/influxql/var_ref.rs b/iox_query_influxql/src/plan/var_ref.rs similarity index 100% rename from iox_query/src/plan/influxql/var_ref.rs rename to iox_query_influxql/src/plan/var_ref.rs diff --git a/service_common/Cargo.toml b/service_common/Cargo.toml index 44a8f205cc..f0bb132820 100644 --- a/service_common/Cargo.toml +++ b/service_common/Cargo.toml @@ -10,6 +10,7 @@ async-trait = "0.1.66" bytes = "1.4" datafusion = { workspace = true } iox_query = { path = "../iox_query" } +iox_query_influxql = { path = "../iox_query_influxql" } flightsql = { path = "../flightsql" } metric = { path = "../metric" } parking_lot = "0.12" diff --git a/service_common/src/planner.rs b/service_common/src/planner.rs index 8044d4a308..17be02b0bb 100644 --- a/service_common/src/planner.rs +++ b/service_common/src/planner.rs @@ -12,7 +12,7 @@ use iox_query::{ }; pub use datafusion::error::{DataFusionError as Error, Result}; -use iox_query::frontend::influxql::InfluxQLQueryPlanner; +use iox_query_influxql::frontend::planner::InfluxQLQueryPlanner; use predicate::rpc_predicate::InfluxRpcPredicate; /// Query planner that plans queries on a separate threadpool.
bef29a683775aaa4b9e2cd66073c1115b64e4c44
Dom Dwyer
2023-03-03 17:14:03
segement file reference counting
Reference count rotated WAL segment files, tracking the number of unpersisted operations in each and deleting the file once all the data within them has been persisted to object storage. This commit contains the implementation of the reference counting logic, and is currently unused. A follow-up PR will wire this into the various places needed to feed it the necessary information. Part of https://github.com/influxdata/influxdb_iox/issues/6566.
null
feat(wal): segement file reference counting Reference count rotated WAL segment files, tracking the number of unpersisted operations in each and deleting the file once all the data within them has been persisted to object storage. This commit contains the implementation of the reference counting logic, and is currently unused. A follow-up PR will wire this into the various places needed to feed it the necessary information. Part of https://github.com/influxdata/influxdb_iox/issues/6566.
diff --git a/ingester2/src/persist/completion_observer.rs b/ingester2/src/persist/completion_observer.rs index b52189b4a9..762df271cc 100644 --- a/ingester2/src/persist/completion_observer.rs +++ b/ingester2/src/persist/completion_observer.rs @@ -35,7 +35,7 @@ pub struct CompletedPersist { impl CompletedPersist { /// Construct a new completion notification. - pub(super) fn new( + pub(crate) fn new( namespace_id: NamespaceId, table_id: TableId, partition_id: PartitionId, diff --git a/ingester2/src/wal/mod.rs b/ingester2/src/wal/mod.rs index 343f6c0a76..1359a7800e 100644 --- a/ingester2/src/wal/mod.rs +++ b/ingester2/src/wal/mod.rs @@ -4,6 +4,7 @@ //! [`DmlSink`]: crate::dml_sink::DmlSink //! [`DmlOperation`]: dml::DmlOperation +pub(crate) mod reference_tracker; pub(crate) mod rotate_task; mod traits; pub(crate) mod wal_sink; diff --git a/ingester2/src/wal/reference_tracker.rs b/ingester2/src/wal/reference_tracker.rs new file mode 100644 index 0000000000..f5664deb42 --- /dev/null +++ b/ingester2/src/wal/reference_tracker.rs @@ -0,0 +1,646 @@ +//! A WAL file reference tracker, responsible for deleting files that contain +//! entirely persisted data. + +use std::{fmt::Debug, sync::Arc}; + +use async_trait::async_trait; +use data_types::{ + sequence_number_set::{self, SequenceNumberSet}, + SequenceNumber, +}; +use hashbrown::HashMap; +use observability_deps::tracing::{debug, info, warn}; +use tokio::{ + select, + sync::mpsc::{self, error::TrySendError}, +}; +use wal::SegmentId; + +use crate::persist::completion_observer::CompletedPersist; + +/// An abstraction defining the ability of an implementer to delete WAL segment +/// files by ID. +#[async_trait] +pub(crate) trait WalFileDeleter: Debug + Send + Sync + 'static { + /// Delete the WAL segment with the specified [`SegmentId`], or panic if + /// deletion fails. + async fn delete_file(&self, id: SegmentId); +} + +#[async_trait] +impl WalFileDeleter for Arc<wal::Wal> { + async fn delete_file(&self, id: SegmentId) { + self.delete(id).await.expect("failed to drop wal segment"); + } +} + +/// A WAL file reference-count tracker handle. +/// +/// The [`WalReferenceHandle`] feeds three inputs to the [`WalReferenceActor`]: +/// +/// * The [`SequenceNumberSet`] and ID of rotated out WAL segment files +/// * The [`SequenceNumberSet`] of each completed persistence task +/// * All [`SequenceNumber`] of writes that failed to buffer +/// +/// ```text +/// ┌ Write Processing ─ ─ ─ ─ ─ ─ ─ ─ ─ +/// │ +/// │ ┌────────────┐ ┌─────────────┐ +/// │ WAL Rotate │ │ WAL DmlSink │ │ +/// │ └────────────┘ └─────────────┘ +/// │ │ │ +/// │ IDs in │ +/// rotated Failed │ +/// │ segment write IDs +/// file │ │ +/// │ │ │ +/// ─ ─ ─ ─ ─│─ ─ ─ ─ ─ ─ ─ ─ ┼ ─ ─ ─ ─ ┘ +/// ▼ ▼ +/// ┌────────────────────────────────────┐ +/// │ │ +/// │ WalReferenceActor │─ ─▶ Delete Files +/// │ │ +/// └────────────────────────────────────┘ +/// ▲ +/// │ +/// ┌ Persist System ─│─ ─ ─ ─ ─ ─ ─ ─ ─ +/// │ │ +/// │ ┌──────────────────┐ +/// │ Completed │ │ +/// │ │ Persistence │ +/// │ Observer │ │ +/// │ └──────────────────┘ +/// ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┘ +/// ``` +/// +/// Using these three streams of information, the [`WalReferenceActor`] computes +/// the number of unpersisted operations referenced in each WAL segment file, +/// and updates this count as more persist operations complete. +/// +/// Once all the operations in a given WAL file have been observed as persisted +/// (or failed to apply), the WAL file is no longer required (all data it +/// contains is durable in the object store) and it is deleted. +/// +/// The [`WalReferenceActor`] is tolerant of out-of-order events - that is, a +/// "persisted" event can be received and processed before the WAL file the data +/// is in is known. This is necessary to handle "hot partition persistence" +/// where data is persisted before the WAL file is rotated. +/// +/// The [`WalReferenceActor`] gracefully stops once all [`WalReferenceHandle`] +/// instances to it are dropped. +#[derive(Debug, Clone)] +pub(crate) struct WalReferenceHandle { + /// A stream of newly rotated segment files and the set of + /// [`SequenceNumber`] within them. + file_tx: mpsc::Sender<(SegmentId, SequenceNumberSet)>, + + /// A steam of persist notifications - the [`SequenceNumberSet`] of the + /// persisted data that is now durable in object storage, and which no + /// longer requires WAL entries for. + persist_tx: mpsc::Sender<Arc<CompletedPersist>>, + + /// A stream of [`SequenceNumber`] identifying operations that have been (or + /// will be) added to the WAL, but failed to buffer/complete. These should + /// be treated as if they were "persisted", as they will never be persisted, + /// and are not expected to remain durable (user did not get an ACK). + unbuffered_tx: mpsc::Sender<SequenceNumber>, +} + +impl WalReferenceHandle { + /// Construct a new [`WalReferenceActor`] and [`WalReferenceHandle`] pair. + /// + /// The returned [`WalReferenceActor`] SHOULD be + /// [`WalReferenceActor::run()`] before the handle is used to avoid + /// potential deadlocks. + pub(crate) fn new<T>(wal: T) -> (Self, WalReferenceActor<T>) + where + T: WalFileDeleter, + { + let (file_tx, file_rx) = mpsc::channel(5); + let (persist_tx, persist_rx) = mpsc::channel(50); + let (unbuffered_tx, unbuffered_rx) = mpsc::channel(50); + + let actor = WalReferenceActor { + wal, + persisted: SequenceNumberSet::default(), + wal_files: HashMap::with_capacity(3), + file_rx, + persist_rx, + unbuffered_rx, + }; + + ( + Self { + file_tx, + persist_tx, + unbuffered_tx, + }, + actor, + ) + } + + /// Enqueue a new file rotation event, providing the [`SegmentId`] of the + /// WAL file and the [`SequenceNumberSet`] the WAL segment contains. + pub(crate) async fn enqueue_rotated_file(&self, segment_id: SegmentId, set: SequenceNumberSet) { + Self::send(&self.file_tx, (segment_id, set)).await + } + + /// Enqueue a persist completion notification for newly persisted data. + pub(crate) async fn enqueue_persist_notification(&self, note: Arc<CompletedPersist>) { + Self::send(&self.persist_tx, note).await + } + + /// Enqueue a notification that a write appearing in some WAL segment will + /// not be buffered/persisted (either the active, not-yet-rotated segment or + /// a prior, already-rotated segment). + /// + /// This can happen when a write is added to the WAL segment and + /// subsequently fails to be applied to the in-memory buffer. It is + /// important to track these unusual cases to ensure the WAL file is not + /// kept forever due to an outstanding reference, waiting for the unbuffered + /// write to be persisted (which it never will). + pub(crate) async fn enqueue_unbuffered_write(&self, id: SequenceNumber) { + Self::send(&self.unbuffered_tx, id).await + } + + /// Send `val` over `chan`, logging a warning if `chan` is at capacity. + async fn send<T>(chan: &mpsc::Sender<T>, val: T) + where + T: Debug + Send, + { + match chan.try_send(val) { + Ok(()) => {} + Err(TrySendError::Full(val)) => { + warn!(?val, "notification buffer is full"); + chan.send(val).await.expect("wal reference actor stopped"); + } + Err(TrySendError::Closed(_)) => panic!("wal reference actor stopped"), + } + } +} + +/// A WAL file reference-count tracker. +/// +/// See [`WalReferenceHandle`]. +#[derive(Debug)] +pub(crate) struct WalReferenceActor<T = Arc<wal::Wal>> { + wal: T, + + /// The set of IDs of persisted data that do not yet appear in + /// `wal_segments`, the set of WAL files rotated out of active use. This is + /// an intermediate buffer necessary to tolerate out-of-order persist + /// notifications w.r.t file notifications. + /// + /// IDs that appear in this set are most likely part of the active WAL + /// segment file and should be reconciled when it rotates. + persisted: SequenceNumberSet, + + /// The set of closed WAL segment files, and the set of unpersisted + /// [`SequenceNumber`] they contain. + /// + /// These [`SequenceNumberSet`] are slowly drained / have IDs removed in + /// response to persisted data notifications. Once the set is of length 0, + /// the file can be deleted as all the entries the file contains has been + /// persisted. + /// + /// Invariant: sets in this map are always non-empty. + wal_files: HashMap<wal::SegmentId, SequenceNumberSet>, + + file_rx: mpsc::Receiver<(SegmentId, SequenceNumberSet)>, + persist_rx: mpsc::Receiver<Arc<CompletedPersist>>, + unbuffered_rx: mpsc::Receiver<SequenceNumber>, +} + +impl<T> WalReferenceActor<T> +where + T: WalFileDeleter, +{ + /// Execute the actor task. + /// + /// This task exits once the sender side of the input channels have been + /// dropped. + pub(crate) async fn run(mut self) { + loop { + select! { + // Prefer polling the channels in the specified order. + // + // By consuming file_rx first, there's a greater chance that + // subsequent persist/ignore events can be applied directly to + // the file sets, rather than having to wait in the intermediate + // "persisted" set, reducing memory utilisation. + biased; + + Some((id, f)) = self.file_rx.recv() => self.handle_new_file(id, f).await, + Some(p) = self.persist_rx.recv() => self.handle_persisted(p).await, + Some(i) = self.unbuffered_rx.recv() => self.handle_unbuffered(i).await, + else => break + } + } + + debug!("stopping wal reference counter task"); + } + + /// Track a newly rotated WAL segment, with the given [`SegmentId`] and + /// containing the operations specified in [`SequenceNumberSet`]. + /// + /// This method tolerates an empty `set`. + async fn handle_new_file(&mut self, segment_id: SegmentId, mut set: SequenceNumberSet) { + debug!( + %segment_id, + sequence_number_set = ?set, + "notified of new segment file" + ); + + // Clear the overlap between the "persisted" set, and this new file from + // both. + let n = clear_intersection(&mut self.persisted, &mut set); + if n > 0 { + debug!(n, "released previously persisted IDs"); + } + + // If the file set is now completely empty, it can be immediately + // deleted. + if set.is_empty() { + debug!(n, "immediately dropping empty segment file"); + return delete_file(&self.wal, segment_id).await; + } + + // Otherwise, retain this file for later persist notifications. + // + // Run-optimise the bitmap to minimise memory utilisation of this set. + // This is a relatively fast operation, and the file sets are expected + // to be highly suitable for RLE compression due to the monotonic + // sequence number assignments. + set.run_optimise(); + + // Insert the file set into the files being tracked + assert!(!set.is_empty()); // Invariant: sets in file map are never empty + assert!( + self.wal_files.insert(segment_id, set).is_none(), + "duplicate segment ID" + ); + } + + /// Process a persistence completion notification, decreasing the reference + /// counts against tracked WAL files, and holding any remaining IDs (in the + /// untracked active WAL segment) in a temporary "persisted" buffer. + async fn handle_persisted(&mut self, note: Arc<CompletedPersist>) { + debug!( + namespace_id = %note.namespace_id(), + table_id = %note.table_id(), + partition_id = %note.partition_id(), + sequence_number_set = ?note.sequence_numbers(), + "notified of persisted data" + ); + + self.remove(note.owned_sequence_numbers()).await; + } + + /// Handle a write that has been added to the WAL, but that did not complete + /// / buffer. + /// + /// Because the write was added to the WAL, its ID will be part of the WAL + /// file's [`SequenceNumberSet`], but because the write was not buffered, it + /// will never be persisted and therefore the WAL set will always have an + /// outstanding reference unless it is accounted for here. + async fn handle_unbuffered(&mut self, id: SequenceNumber) { + debug!(sequence_number = id.get(), "notified of unbuffered write"); + + // Delegate to the same code as persisted by presenting this ID as a set + // - the same behaviour is required. + let mut set = SequenceNumberSet::with_capacity(1); + set.add(id); + + self.remove(set).await; + } + + /// Remove the intersection of `set` from all the sets in `self` (file sets, + /// and the untracked / "persisted" buffer set). + /// + /// Deletes all WAL files that are no longer referenced / have unpersisted + /// entries. + async fn remove(&mut self, mut set: SequenceNumberSet) { + // First remove this set from the "persisted" / file-less set. + let n = clear_intersection(&mut set, &mut self.persisted); + if n > 0 { + debug!(n, "released previously persisted IDs"); + } + + if set.is_empty() { + debug!(n, "fully matched previously persisted IDs"); + return; + } + + // And then walk the WAL file sets. + let mut remove_ids = Vec::with_capacity(0); + for (id, file_set) in self.wal_files.iter_mut() { + // Invariant: files in the file set always have at least 1 reference + assert!(!file_set.is_empty()); + + // Early exit the loop if possible. + if set.is_empty() { + break; + } + + // Clear the intersection of both sets. + let n = clear_intersection(&mut set, file_set); + if n == 0 { + continue; + } + + debug!(n, segment_id=%id, "matched file IDs"); + + // At least 1 element was removed from the file set, it may now be + // empty. + if file_set.is_empty() { + remove_ids.push(*id); + } + } + + // Union whatever IDs remain with the file-less persisted set. + if !set.is_empty() { + debug!(n = set.len(), "retaining file-less IDs"); + self.persisted.add_set(&set); + } + + // And delete any newly empty files + for id in remove_ids { + let file_set = self + .wal_files + .remove(&id) + .expect("id was obtained during iter"); + + // Invariant: the file being removed always has no references. + assert!(file_set.is_empty()); + + delete_file(&self.wal, id).await + } + } +} + +/// Remove the intersection of `a` and `b`, from both `a` and `b`, and return +/// the cardinality of the intersection. +fn clear_intersection(a: &mut SequenceNumberSet, b: &mut SequenceNumberSet) -> usize { + let intersection = sequence_number_set::intersect(a, b); + + a.remove_set(&intersection); + b.remove_set(&intersection); + + intersection.len() as _ +} + +/// Delete the specified WAL segment from `wal`, and log it at info. +async fn delete_file<T>(wal: &T, id: SegmentId) +where + T: WalFileDeleter, +{ + info!( + %id, + "deleted fully-persisted wal segment" + ); + + wal.delete_file(id).await +} + +#[cfg(test)] +mod tests { + use std::{sync::Arc, time::Duration}; + + use assert_matches::assert_matches; + use data_types::{NamespaceId, PartitionId, TableId}; + use futures::Future; + use parking_lot::Mutex; + use test_helpers::timeout::FutureTimeout; + use tokio::sync::Notify; + + use super::*; + + /// A mock file deleter that records the IDs it was asked to delete. + #[derive(Debug, Default)] + struct MockWalDeleter { + notify: Notify, + calls: Mutex<Vec<SegmentId>>, + } + + impl MockWalDeleter { + /// Return the set of [`SegmentId`] that have been deleted. + fn calls(&self) -> Vec<SegmentId> { + self.calls.lock().clone() + } + /// Return a future that completes when a file is subsequently deleted, + /// or panics if no file is deleted within 5 seconds. + fn waker(&self) -> impl Future<Output = ()> + '_ { + self.notify + .notified() + .with_timeout_panic(Duration::from_secs(5)) + } + } + + #[async_trait] + impl WalFileDeleter for Arc<MockWalDeleter> { + async fn delete_file(&self, id: SegmentId) { + self.calls.lock().push(id); + self.notify.notify_waiters(); + } + } + + /// Return a [`SequenceNumberSet`] containing `vals`. + fn new_set<T>(vals: T) -> SequenceNumberSet + where + T: IntoIterator<Item = i64>, + { + vals.into_iter().map(SequenceNumber::new).collect() + } + + /// Return a persist completion notification with the given + /// [`SequenceNumberSet`] values. + fn new_note<T>(vals: T) -> Arc<CompletedPersist> + where + T: IntoIterator<Item = i64>, + { + Arc::new(CompletedPersist::new( + NamespaceId::new(1), + TableId::new(2), + PartitionId::new(3), + new_set(vals), + )) + } + + /// Test in-order notifications: + /// + /// * WAL file is rotated and the tracker notified + /// * Multiple persists complete, and an unbuffered notification, draining + /// the references to the file + /// * The file is deleted when refs == 0 + /// * Dropping the handle stops the actor + #[tokio::test] + async fn test_rotate_persist_delete() { + const SEGMENT_ID: SegmentId = SegmentId::new(42); + + let wal = Arc::new(MockWalDeleter::default()); + let (handle, actor) = WalReferenceHandle::new(Arc::clone(&wal)); + + let actor_task = tokio::spawn(actor.run()); + + // Add a file with IDs 1 through 5 + handle + .enqueue_rotated_file(SEGMENT_ID, new_set([1, 2, 3, 4, 5])) + .await; + + // Submit a persist notification that removes refs 1 & 2. + handle.enqueue_persist_notification(new_note([1, 2])).await; + + // Ensure the file was not deleted + assert!(wal.calls().is_empty()); + + // Enqueue a unbuffered notification (out of order) + handle + .enqueue_unbuffered_write(SequenceNumber::new(5)) + .await; + + // Ensure the file was not deleted + assert!(wal.calls().is_empty()); + + // Finally release the last IDs + let waker = wal.waker(); + handle.enqueue_persist_notification(new_note([3, 4])).await; + + // Wait for it to be processed + waker.await; + + // Validate the correct ID was deleted + assert_matches!(wal.calls().as_slice(), &[v] if v == SEGMENT_ID); + + // Assert clean shutdown behaviour. + drop(handle); + actor_task + .with_timeout_panic(Duration::from_secs(5)) + .await + .expect("actor task should stop cleanly") + } + + /// Test in-order notifications: + /// + /// * Multiple persists complete + /// * A WAL file notification is received containing a subset of the + /// already persisted IDs + /// * The file is deleted because refs == 0 + /// * A WAL file notification for a superset of the remaining persisted + /// IDs + /// * The remaining references are persisted/unbuffered + /// * The second WAL file is deleted + /// * Dropping the handle stops the actor + #[tokio::test] + async fn test_persist_all_rotate_delete() { + const SEGMENT_ID_1: SegmentId = SegmentId::new(42); + const SEGMENT_ID_2: SegmentId = SegmentId::new(24); + + let wal = Arc::new(MockWalDeleter::default()); + let (handle, actor) = WalReferenceHandle::new(Arc::clone(&wal)); + + let actor_task = tokio::spawn(actor.run()); + + // Submit a persist notification for the entire set of IDs [1,2,3,4] in + // the upcoming first WAL, and partially the second WAL + handle.enqueue_persist_notification(new_note([2])).await; + handle.enqueue_persist_notification(new_note([1])).await; + handle.enqueue_persist_notification(new_note([3, 4])).await; + + // Add a file with IDs 1, 2, 3 + let waker = wal.waker(); + handle + .enqueue_rotated_file(SEGMENT_ID_1, new_set([1, 2, 3])) + .await; + + // Wait for it to be processed + waker.await; + + // Validate the correct ID was deleted + assert_matches!(wal.calls().as_slice(), &[v] if v == SEGMENT_ID_1); + + // Enqueue the second WAL, covering 4 + handle + .enqueue_rotated_file(SEGMENT_ID_2, new_set([4, 5, 6])) + .await; + + // At this point, the second WAL still has references outstanding (5, 6) + // and should not have been deleted. + assert_eq!(wal.calls().len(), 1); + + // Release one of the remaining two refs + handle.enqueue_persist_notification(new_note([6])).await; + + // Still no deletion + assert_eq!(wal.calls().len(), 1); + + // And finally release the last ref via an unbuffered notification + let waker = wal.waker(); + handle + .enqueue_unbuffered_write(SequenceNumber::new(5)) + .await; + waker.await; + + // Validate the correct ID was deleted + assert_matches!(wal.calls().as_slice(), &[a, b] => { + assert_eq!(a, SEGMENT_ID_1); + assert_eq!(b, SEGMENT_ID_2); + }); + + // Assert clean shutdown behaviour. + drop(handle); + actor_task + .with_timeout_panic(Duration::from_secs(5)) + .await + .expect("actor task should stop cleanly") + } + + #[tokio::test] + async fn test_empty_file_set() { + const SEGMENT_ID: SegmentId = SegmentId::new(42); + + let wal = Arc::new(MockWalDeleter::default()); + let (handle, actor) = WalReferenceHandle::new(Arc::clone(&wal)); + + let actor_task = tokio::spawn(actor.run()); + + // Notifying the actor of a WAL file with no operations in it should not + // cause a panic, and should cause the file to be immediately deleted. + let waker = wal.waker(); + handle + .enqueue_rotated_file(SEGMENT_ID, SequenceNumberSet::default()) + .await; + + // Wait for the file deletion. + waker.await; + assert_matches!(wal.calls().as_slice(), &[v] if v == SEGMENT_ID); + + // Assert clean shutdown behaviour. + drop(handle); + actor_task + .with_timeout_panic(Duration::from_secs(5)) + .await + .expect("actor task should stop cleanly") + } + + #[tokio::test] + #[should_panic(expected = "duplicate segment ID")] + async fn test_duplicate_segment_ids() { + let wal = Arc::new(MockWalDeleter::default()); + let (handle, actor) = WalReferenceHandle::new(Arc::clone(&wal)); + + // Enqueuing a notification before the actor is running should succeed + // because of the channel buffer capacity. + handle + .enqueue_rotated_file(SegmentId::new(42), new_set([1, 2])) + .with_timeout_panic(Duration::from_secs(5)) + .await; + + handle + .enqueue_rotated_file(SegmentId::new(42), new_set([3, 4])) + .with_timeout_panic(Duration::from_secs(5)) + .await; + + // This should panic after processing the second file. + actor.run().with_timeout_panic(Duration::from_secs(5)).await; + } +}
535ff5f0c884d1511d6b46e6334f9b209a18a9fa
Marco Neumann
2023-08-17 13:59:04
extract InfluxRPC-specific code to `iox_query_influxrpc` (part 1) (#8508)
* refactor: replace test usage of `Predicate` * refactor: remove dead code * refactor: decouple recorg planning from InfluxRPC planning * refactor: move InfluxRPC-specific scan plan construction * refactor: move InfluxRPC-specific "missing columns" handling to `iox_query_influxrpc` ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
refactor: extract InfluxRPC-specific code to `iox_query_influxrpc` (part 1) (#8508) * refactor: replace test usage of `Predicate` * refactor: remove dead code * refactor: decouple recorg planning from InfluxRPC planning * refactor: move InfluxRPC-specific scan plan construction * refactor: move InfluxRPC-specific "missing columns" handling to `iox_query_influxrpc` --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/Cargo.lock b/Cargo.lock index 0734ad3618..5502bb2eb6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2925,6 +2925,7 @@ name = "iox_query_influxrpc" version = "0.1.0" dependencies = [ "arrow", + "arrow_util", "data_types", "datafusion", "datafusion_util", diff --git a/iox_query/src/frontend.rs b/iox_query/src/frontend.rs index f143585c4c..f3ac2892d0 100644 --- a/iox_query/src/frontend.rs +++ b/iox_query/src/frontend.rs @@ -1,4 +1,3 @@ -pub mod common; pub mod reorg; pub mod sql; @@ -6,12 +5,11 @@ pub mod sql; mod test { use std::sync::Arc; - use arrow_util::assert_batches_eq; use datafusion::physical_plan::{ metrics::{self, MetricValue}, ExecutionPlan, ExecutionPlanVisitor, }; - use datafusion_util::{test_collect_partition, test_execute_partition}; + use datafusion_util::test_execute_partition; use futures::StreamExt; use schema::{merge::SchemaMerger, sort::SortKey, Schema}; @@ -19,8 +17,8 @@ mod test { exec::{split::StreamSplitExec, Executor, ExecutorType}, frontend::reorg::ReorgPlanner, provider::{DeduplicateExec, RecordBatchesExec}, - test::{format_execution_plan, TestChunk}, - QueryChunk, ScanPlanBuilder, + test::TestChunk, + QueryChunk, }; /// A macro to asserts the contents of the extracted metrics is reasonable @@ -57,129 +55,6 @@ mod test { }; } - #[tokio::test] - async fn test_scan_plan_deduplication() { - test_helpers::maybe_start_logging(); - // Create 2 overlapped chunks - let (schema, chunks) = get_test_overlapped_chunks(); - - // Build a logical plan with deduplication - let scan_plan = ScanPlanBuilder::new(Arc::from("t"), &schema) - .with_chunks(chunks) - .build() - .unwrap(); - let logical_plan = scan_plan.plan_builder.build().unwrap(); - - // Build physical plan - let executor = Executor::new_testing(); - let physical_plan = executor - .new_context(ExecutorType::Reorg) - .create_physical_plan(&logical_plan) - .await - .unwrap(); - - insta::assert_yaml_snapshot!( - format_execution_plan(&physical_plan), - @r###" - --- - - " ProjectionExec: expr=[field_int@1 as field_int, field_int2@2 as field_int2, tag1@3 as tag1, time@4 as time]" - - " DeduplicateExec: [tag1@3 ASC,time@4 ASC]" - - " SortPreservingMergeExec: [tag1@3 ASC,time@4 ASC,__chunk_order@0 ASC]" - - " SortExec: expr=[tag1@3 ASC,time@4 ASC,__chunk_order@0 ASC]" - - " RecordBatchesExec: batches_groups=2 batches=2 total_rows=9" - "### - ); - - // Verify output data - // Since data is merged due to deduplication, the two input chunks will be merged into one output chunk - assert_eq!( - physical_plan.output_partitioning().partition_count(), - 1, - "{:?}", - physical_plan.output_partitioning() - ); - let batches0 = test_collect_partition(Arc::clone(&physical_plan), 0).await; - // Data is sorted on tag1 & time. One row is removed due to deduplication - let expected = vec![ - "+-----------+------------+------+--------------------------------+", - "| field_int | field_int2 | tag1 | time |", - "+-----------+------------+------+--------------------------------+", - "| 100 | | AL | 1970-01-01T00:00:00.000000050Z |", - "| 70 | | CT | 1970-01-01T00:00:00.000000100Z |", - "| 1000 | | MT | 1970-01-01T00:00:00.000001Z |", - "| 5 | | MT | 1970-01-01T00:00:00.000005Z |", - "| 10 | | MT | 1970-01-01T00:00:00.000007Z |", - "| 70 | 70 | UT | 1970-01-01T00:00:00.000220Z |", - "| 50 | 50 | VT | 1970-01-01T00:00:00.000210Z |", // other row with the same tag1 and time is removed - "| 1000 | 1000 | WA | 1970-01-01T00:00:00.000028Z |", - "+-----------+------------+------+--------------------------------+", - ]; - assert_batches_eq!(&expected, &batches0); - } - - #[tokio::test] - async fn test_scan_plan_without_deduplication() { - test_helpers::maybe_start_logging(); - // Create 2 overlapped chunks - let (schema, chunks) = get_test_chunks(); - - // Build a logical plan without deduplication - let scan_plan = ScanPlanBuilder::new(Arc::from("t"), &schema) - .with_chunks(chunks) - // force it to not deduplicate - .enable_deduplication(false) - .build() - .unwrap(); - let logical_plan = scan_plan.plan_builder.build().unwrap(); - - // Build physical plan - let executor = Executor::new_testing(); - let physical_plan = executor - .new_context(ExecutorType::Reorg) - .create_physical_plan(&logical_plan) - .await - .unwrap(); - - // Verify output data: 2 input chunks are pushed out as 2 output chunks - assert_eq!( - physical_plan.output_partitioning().partition_count(), - 2, - "{:?}", - physical_plan.output_partitioning() - ); - // - // First chunk has 5 rows - let batches0 = test_collect_partition(Arc::clone(&physical_plan), 0).await; - // Data is not sorted on anything - let expected = vec![ - "+-----------+------------+------+--------------------------------+", - "| field_int | field_int2 | tag1 | time |", - "+-----------+------------+------+--------------------------------+", - "| 1000 | | MT | 1970-01-01T00:00:00.000001Z |", - "| 10 | | MT | 1970-01-01T00:00:00.000007Z |", - "| 70 | | CT | 1970-01-01T00:00:00.000000100Z |", - "| 100 | | AL | 1970-01-01T00:00:00.000000050Z |", - "| 5 | | MT | 1970-01-01T00:00:00.000005Z |", - "+-----------+------------+------+--------------------------------+", - ]; - assert_batches_eq!(&expected, &batches0); - // - // Second chunk has 4 rows with duplicates - let batches1 = test_collect_partition(Arc::clone(&physical_plan), 1).await; - // Data is not sorted on anything - let expected = vec![ - "+-----------+------------+------+-----------------------------+", - "| field_int | field_int2 | tag1 | time |", - "+-----------+------------+------+-----------------------------+", - "| 1000 | 1000 | WA | 1970-01-01T00:00:00.000028Z |", - "| 10 | 10 | VT | 1970-01-01T00:00:00.000210Z |", // duplicate 1 - "| 70 | 70 | UT | 1970-01-01T00:00:00.000220Z |", - "| 50 | 50 | VT | 1970-01-01T00:00:00.000210Z |", // duplicate 2 - "+-----------+------------+------+-----------------------------+", - ]; - assert_batches_eq!(&expected, &batches1); - } - #[tokio::test] async fn test_metrics() { test_helpers::maybe_start_logging(); @@ -337,8 +212,8 @@ mod test { extractor.inner } - fn test_chunks(overlapped: bool) -> (Schema, Vec<Arc<dyn QueryChunk>>) { - let max_time = if overlapped { 70000 } else { 7000 }; + fn get_test_chunks() -> (Schema, Vec<Arc<dyn QueryChunk>>) { + let max_time = 7000; let chunk1 = Arc::new( TestChunk::new("t") .with_order(1) @@ -371,12 +246,4 @@ mod test { (schema, vec![chunk1, chunk2]) } - - fn get_test_chunks() -> (Schema, Vec<Arc<dyn QueryChunk>>) { - test_chunks(false) - } - - fn get_test_overlapped_chunks() -> (Schema, Vec<Arc<dyn QueryChunk>>) { - test_chunks(true) - } } diff --git a/iox_query/src/frontend/common.rs b/iox_query/src/frontend/common.rs deleted file mode 100644 index 953e746831..0000000000 --- a/iox_query/src/frontend/common.rs +++ /dev/null @@ -1,193 +0,0 @@ -use std::sync::Arc; - -use datafusion::{ - catalog::TableReference, common::tree_node::TreeNode, datasource::provider_as_source, - logical_expr::LogicalPlanBuilder, -}; -use observability_deps::tracing::trace; -use predicate::Predicate; -use schema::Schema; -use snafu::{ResultExt, Snafu}; - -use crate::{ - provider::{ChunkTableProvider, ProviderBuilder}, - util::MissingColumnsToNull, - QueryChunk, -}; - -#[derive(Debug, Snafu)] -pub enum Error { - #[snafu(display( - "gRPC planner got error adding chunk for table {}: {}", - table_name, - source - ))] - CreatingProvider { - table_name: String, - source: crate::provider::Error, - }, - - #[snafu(display( - "Internal gRPC planner rewriting predicate for {}: {}", - table_name, - source - ))] - RewritingFilterPredicate { - table_name: String, - source: datafusion::error::DataFusionError, - }, - - #[snafu(display("Error building plan: {}", source))] - BuildingPlan { - source: datafusion::error::DataFusionError, - }, -} - -pub(crate) type Result<T, E = Error> = std::result::Result<T, E>; - -/// Represents scanning one or more [`QueryChunk`]s. -pub struct ScanPlan { - pub plan_builder: LogicalPlanBuilder, - pub provider: Arc<ChunkTableProvider>, -} - -impl std::fmt::Debug for ScanPlan { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("ScanPlan") - .field("plan_builder", &"<...>") - .field("provider", &self.provider) - .finish() - } -} - -impl ScanPlan { - /// Return the schema of the source (the merged schema across all tables) - pub fn schema(&self) -> &Schema { - self.provider.iox_schema() - } -} - -/// Builder for [`ScanPlan`]s which scan the data 1 or more [`QueryChunk`] for -/// IOx's custom query frontends (InfluxRPC and Reorg at the time of -/// writing). -/// -/// The created plan looks like: -/// -/// ```text -/// Filter(predicate) [optional] -/// Scan -/// ``` -/// -/// NOTE: This function assumes the chunks have already been "pruned" -/// based on statistics and will not attempt to prune them -/// further. Some frontends like influxrpc or the reorg planner manage -/// (and thus prune) their own chunklist. - -#[derive(Debug)] -pub struct ScanPlanBuilder<'a> { - table_name: Arc<str>, - /// The schema of the resulting table (any chunks that don't have - /// all the necessary columns will be extended appropriately) - table_schema: &'a Schema, - chunks: Vec<Arc<dyn QueryChunk>>, - predicate: Option<&'a Predicate>, - /// Do deduplication - deduplication: bool, -} - -impl<'a> ScanPlanBuilder<'a> { - pub fn new(table_name: Arc<str>, table_schema: &'a Schema) -> Self { - Self { - table_name, - table_schema, - chunks: vec![], - predicate: None, - // always do deduplication in query - deduplication: true, - } - } - - /// Adds `chunks` to the list of Chunks to scan - pub fn with_chunks(mut self, chunks: impl IntoIterator<Item = Arc<dyn QueryChunk>>) -> Self { - self.chunks.extend(chunks.into_iter()); - self - } - - /// Sets the predicate - pub fn with_predicate(mut self, predicate: &'a Predicate) -> Self { - assert!(self.predicate.is_none()); - self.predicate = Some(predicate); - self - } - - /// Deduplication - pub fn enable_deduplication(mut self, deduplication: bool) -> Self { - self.deduplication = deduplication; - self - } - - /// Creates a `ScanPlan` from the specified chunks - pub fn build(self) -> Result<ScanPlan> { - let Self { - table_name, - chunks, - table_schema, - predicate, - deduplication, - } = self; - - assert!(!chunks.is_empty(), "no chunks provided"); - - // Prepare the plan for the table - let mut builder = ProviderBuilder::new(Arc::clone(&table_name), table_schema.clone()) - .with_enable_deduplication(deduplication); - - for chunk in chunks { - builder = builder.add_chunk(chunk); - } - - let provider = builder.build().context(CreatingProviderSnafu { - table_name: table_name.as_ref(), - })?; - - let provider = Arc::new(provider); - let source = provider_as_source(Arc::clone(&provider) as _); - - // Scan all columns (DataFusion optimizer will prune this - // later if possible) - let projection = None; - - // Do not parse the tablename as a SQL identifer, but use as is - let table_ref = TableReference::bare(table_name.to_string()); - let mut plan_builder = - LogicalPlanBuilder::scan(table_ref, source, projection).context(BuildingPlanSnafu)?; - - // Use a filter node to add general predicates + timestamp - // range, if any - if let Some(predicate) = predicate { - if let Some(filter_expr) = predicate.filter_expr() { - // Rewrite expression so it only refers to columns in this chunk - let schema = provider.iox_schema(); - trace!(%table_name, ?filter_expr, "Adding filter expr"); - let mut rewriter = MissingColumnsToNull::new(schema); - let filter_expr = - filter_expr - .rewrite(&mut rewriter) - .context(RewritingFilterPredicateSnafu { - table_name: table_name.as_ref(), - })?; - - trace!(?filter_expr, "Rewritten filter_expr"); - - plan_builder = plan_builder - .filter(filter_expr) - .context(BuildingPlanSnafu)?; - } - } - - Ok(ScanPlan { - plan_builder, - provider, - }) - } -} diff --git a/iox_query/src/frontend/reorg.rs b/iox_query/src/frontend/reorg.rs index 9caa76b70a..cd8838b4c2 100644 --- a/iox_query/src/frontend/reorg.rs +++ b/iox_query/src/frontend/reorg.rs @@ -3,17 +3,17 @@ use std::sync::Arc; use datafusion::{ - logical_expr::{LogicalPlan, LogicalPlanBuilder}, + logical_expr::LogicalPlan, prelude::{col, lit_timestamp_nano}, }; use observability_deps::tracing::debug; use schema::{sort::SortKey, Schema, TIME_COLUMN_NAME}; -use crate::{exec::make_stream_split, util::logical_sort_key_exprs, QueryChunk}; +use crate::{ + exec::make_stream_split, provider::ProviderBuilder, util::logical_sort_key_exprs, QueryChunk, +}; use snafu::{ResultExt, Snafu}; -use super::common::ScanPlanBuilder; - #[derive(Debug, Snafu)] pub enum Error { #[snafu(display("Chunk schema not compatible for compact plan: {}", source))] @@ -24,11 +24,6 @@ pub enum Error { source: datafusion::error::DataFusionError, }, - #[snafu(display("Reorg planner got error building scan: {}", source))] - BuildingScan { - source: crate::frontend::common::Error, - }, - #[snafu(display( "Reorg planner got error adding creating scan for {}: {}", table_name, @@ -36,7 +31,7 @@ pub enum Error { ))] CreatingScan { table_name: String, - source: super::common::Error, + source: crate::provider::Error, }, } pub type Result<T, E = Error> = std::result::Result<T, E>; @@ -79,20 +74,27 @@ impl ReorgPlanner { where I: IntoIterator<Item = Arc<dyn QueryChunk>>, { - let scan_plan = ScanPlanBuilder::new(table_name, schema) - .with_chunks(chunks) - .build() - .context(BuildingScanSnafu)?; + let mut builder = ProviderBuilder::new(Arc::clone(&table_name), schema.clone()) + .with_enable_deduplication(true); + + for chunk in chunks { + builder = builder.add_chunk(chunk); + } - let plan = scan_plan.plan_builder.build()?; + let provider = builder.build().context(CreatingScanSnafu { + table_name: table_name.as_ref(), + })?; + let plan_builder = Arc::new(provider) + .into_logical_plan_builder() + .context(BuildingPlanSnafu)?; let sort_expr = logical_sort_key_exprs(&output_sort_key); - let plan = LogicalPlanBuilder::from(plan) + let plan = plan_builder .sort(sort_expr) .context(BuildingPlanSnafu)? .build() .context(BuildingPlanSnafu)?; - debug!(table_name=scan_plan.provider.table_name(), plan=%plan.display_indent_schema(), + debug!(table_name=table_name.as_ref(), plan=%plan.display_indent_schema(), "created compact plan for table"); Ok(plan) @@ -179,13 +181,21 @@ impl ReorgPlanner { panic!("Split plan does not accept empty split_times"); } - let scan_plan = ScanPlanBuilder::new(table_name, schema) - .with_chunks(chunks) - .build() - .context(BuildingScanSnafu)?; - let plan = scan_plan.plan_builder.build().context(BuildingPlanSnafu)?; + let mut builder = ProviderBuilder::new(Arc::clone(&table_name), schema.clone()) + .with_enable_deduplication(true); + + for chunk in chunks { + builder = builder.add_chunk(chunk); + } + + let provider = builder.build().context(CreatingScanSnafu { + table_name: table_name.as_ref(), + })?; + let plan_builder = Arc::new(provider) + .into_logical_plan_builder() + .context(BuildingPlanSnafu)?; let sort_expr = logical_sort_key_exprs(&output_sort_key); - let plan = LogicalPlanBuilder::from(plan) + let plan = plan_builder .sort(sort_expr) .context(BuildingPlanSnafu)? .build() @@ -213,7 +223,7 @@ impl ReorgPlanner { } let plan = make_stream_split(plan, split_exprs); - debug!(table_name=scan_plan.provider.table_name(), plan=%plan.display_indent_schema(), + debug!(table_name=table_name.as_ref(), plan=%plan.display_indent_schema(), "created split plan for table"); Ok(plan) diff --git a/iox_query/src/lib.rs b/iox_query/src/lib.rs index 5dbd434a18..e678801e9a 100644 --- a/iox_query/src/lib.rs +++ b/iox_query/src/lib.rs @@ -44,7 +44,6 @@ pub mod pruning; pub mod statistics; pub mod util; -pub use frontend::common::ScanPlanBuilder; pub use query_functions::group_by::{Aggregate, WindowDuration}; /// The name of the virtual column that represents the chunk order. diff --git a/iox_query/src/provider.rs b/iox_query/src/provider.rs index 336f4cb83b..e20196bf24 100644 --- a/iox_query/src/provider.rs +++ b/iox_query/src/provider.rs @@ -8,16 +8,17 @@ use arrow::{ error::ArrowError, }; use datafusion::{ - datasource::TableProvider, + datasource::{provider_as_source, TableProvider}, error::{DataFusionError, Result as DataFusionResult}, execution::context::SessionState, - logical_expr::{TableProviderFilterPushDown, TableType}, + logical_expr::{LogicalPlanBuilder, TableProviderFilterPushDown, TableType}, optimizer::utils::{conjunction, split_conjunction}, physical_plan::{ expressions::col as physical_col, filter::FilterExec, projection::ProjectionExec, ExecutionPlan, }, prelude::Expr, + sql::TableReference, }; use observability_deps::tracing::trace; use schema::{sort::SortKey, Schema}; @@ -177,6 +178,22 @@ impl ChunkTableProvider { pub fn deduplication(&self) -> bool { self.deduplication } + + /// Convert into a logical plan builder. + pub fn into_logical_plan_builder( + self: Arc<Self>, + ) -> Result<LogicalPlanBuilder, DataFusionError> { + let table_name = self.table_name().to_owned(); + let source = provider_as_source(self as _); + + // Scan all columns (DataFusion optimizer will prune this + // later if possible) + let projection = None; + + // Do not parse the tablename as a SQL identifer, but use as is + let table_ref = TableReference::bare(table_name); + LogicalPlanBuilder::scan(table_ref, source, projection) + } } #[async_trait] @@ -306,10 +323,10 @@ mod test { use super::*; use crate::{ exec::IOxSessionContext, + pruning::retention_expr, test::{format_execution_plan, TestChunk}, }; use datafusion::prelude::{col, lit}; - use predicate::Predicate; #[tokio::test] async fn provider_scan_default() { @@ -516,10 +533,7 @@ mod test { #[tokio::test] async fn provider_scan_retention() { let table_name = "t"; - let pred = Predicate::default() - .with_retention(100) - .filter_expr() - .unwrap(); + let pred = retention_expr(100); let chunk1 = Arc::new( TestChunk::new(table_name) .with_id(1) diff --git a/iox_query/src/util.rs b/iox_query/src/util.rs index 51d1bb335c..28371db745 100644 --- a/iox_query/src/util.rs +++ b/iox_query/src/util.rs @@ -2,38 +2,34 @@ use std::{ cmp::{max, min}, - convert::TryInto, sync::Arc, }; use arrow::{ - array::TimestampNanosecondArray, - compute::SortOptions, - datatypes::{DataType, Schema as ArrowSchema}, + array::TimestampNanosecondArray, compute::SortOptions, datatypes::Schema as ArrowSchema, record_batch::RecordBatch, }; use data_types::TimestampMinMax; use datafusion::{ self, - common::{tree_node::TreeNodeRewriter, DFSchema, ToDFSchema}, + common::ToDFSchema, datasource::{provider_as_source, MemTable}, - error::{DataFusionError, Result as DatafusionResult}, + error::DataFusionError, execution::context::ExecutionProps, - logical_expr::{BinaryExpr, ExprSchemable, LogicalPlan, LogicalPlanBuilder}, + logical_expr::{LogicalPlan, LogicalPlanBuilder}, optimizer::simplify_expressions::{ExprSimplifier, SimplifyContext}, physical_expr::create_physical_expr, physical_plan::{ expressions::{col as physical_col, PhysicalSortExpr}, ColumnStatistics, ExecutionPlan, PhysicalExpr, Statistics, }, - prelude::{binary_expr, lit, Column, Expr}, + prelude::{Column, Expr}, scalar::ScalarValue, }; use itertools::Itertools; use observability_deps::tracing::trace; -use predicate::rpc_predicate::{FIELD_COLUMN_NAME, MEASUREMENT_COLUMN_NAME}; use schema::{sort::SortKey, InfluxColumnType, Schema, TIME_COLUMN_NAME}; use snafu::{ensure, OptionExt, ResultExt, Snafu}; @@ -143,98 +139,6 @@ pub fn df_physical_expr( create_physical_expr(&expr, df_schema.as_ref(), schema.as_ref(), &props) } -/// Rewrites the provided expr such that references to any column that -/// are not present in `schema` become null. -/// -/// So for example, if the predicate is -/// -/// `(STATE = 'CA') OR (READING >0)` -/// -/// but the schema only has `STATE` (and not `READING`), then the -/// predicate is rewritten to -/// -/// `(STATE = 'CA') OR (NULL >0)` -/// -/// This matches the Influx data model where any value that is not -/// explicitly specified is implicitly NULL. Since different chunks -/// and measurements can have different subsets of the columns, only -/// parts of the predicate make sense. -/// See comments on 'is_null_column' -#[derive(Debug)] -pub struct MissingColumnsToNull<'a> { - schema: &'a Schema, - df_schema: DFSchema, -} - -impl<'a> MissingColumnsToNull<'a> { - pub fn new(schema: &'a Schema) -> Self { - let df_schema: DFSchema = schema - .as_arrow() - .as_ref() - .clone() - .try_into() - .expect("Create DF Schema"); - - Self { schema, df_schema } - } - - /// Returns true if `expr` is a `Expr::Column` reference to a - /// column that doesn't exist in this schema - fn is_null_column(&self, expr: &Expr) -> bool { - if let Expr::Column(column) = &expr { - if column.name != MEASUREMENT_COLUMN_NAME && column.name != FIELD_COLUMN_NAME { - return self.schema.find_index_of(&column.name).is_none(); - } - } - false - } - - /// Rewrites an arg like col if col refers to a non existent - /// column into a null literal with "type" of `other_arg`, if possible - fn rewrite_op_arg(&self, arg: Expr, other_arg: &Expr) -> DatafusionResult<Expr> { - if self.is_null_column(&arg) { - let other_datatype = match other_arg.get_type(&self.df_schema) { - Ok(other_datatype) => other_datatype, - Err(_) => { - // the other arg is also unknown and will be - // rewritten, default to Int32 (sins due to - // https://github.com/apache/arrow-datafusion/issues/1179) - DataType::Int32 - } - }; - - let scalar: ScalarValue = (&other_datatype).try_into()?; - Ok(Expr::Literal(scalar)) - } else { - Ok(arg) - } - } -} - -impl<'a> TreeNodeRewriter for MissingColumnsToNull<'a> { - type N = Expr; - - fn mutate(&mut self, expr: Expr) -> DatafusionResult<Expr> { - // Ideally this would simply find all Expr::Columns and - // replace them with a constant NULL value. However, doing do - // is blocked on DF bug - // https://github.com/apache/arrow-datafusion/issues/1179 - // - // Until then, we need to know what type of expr the column is - // being compared with, so workaround by finding the datatype of the other arg - match expr { - Expr::BinaryExpr(BinaryExpr { left, op, right }) => { - let left = self.rewrite_op_arg(*left, &right)?; - let right = self.rewrite_op_arg(*right, &left)?; - Ok(binary_expr(left, op, right)) - } - Expr::IsNull(expr) if self.is_null_column(&expr) => Ok(lit(true)), - Expr::IsNotNull(expr) if self.is_null_column(&expr) => Ok(lit(false)), - expr => Ok(expr), - } - } -} - /// Return min and max for column `time` of the given set of record batches pub fn compute_timenanosecond_min_max<'a, I>(batches: I) -> Result<TimestampMinMax> where @@ -326,127 +230,11 @@ pub fn create_basic_summary( #[cfg(test)] mod tests { - use arrow::datatypes::DataType; - use datafusion::{ - common::tree_node::TreeNode, - prelude::{col, lit}, - scalar::ScalarValue, - }; + use datafusion::scalar::ScalarValue; use schema::{builder::SchemaBuilder, InfluxFieldType}; use super::*; - #[test] - fn test_missing_colums_to_null() { - let schema = SchemaBuilder::new() - .tag("tag") - .field("str", DataType::Utf8) - .unwrap() - .field("int", DataType::Int64) - .unwrap() - .field("uint", DataType::UInt64) - .unwrap() - .field("float", DataType::Float64) - .unwrap() - .field("bool", DataType::Boolean) - .unwrap() - .build() - .unwrap(); - - // The fact that these need to be typed is due to - // https://github.com/apache/arrow-datafusion/issues/1179 - let utf8_null = Expr::Literal(ScalarValue::Utf8(None)); - let int32_null = Expr::Literal(ScalarValue::Int32(None)); - - // no rewrite - let expr = lit(1); - let expected = expr.clone(); - assert_rewrite(&schema, expr, expected); - - // tag != str (no rewrite) - let expr = col("tag").not_eq(col("str")); - let expected = expr.clone(); - assert_rewrite(&schema, expr, expected); - - // tag == str (no rewrite) - let expr = col("tag").eq(col("str")); - let expected = expr.clone(); - assert_rewrite(&schema, expr, expected); - - // int < 5 (no rewrite, int part of schema) - let expr = col("int").lt(lit(5)); - let expected = expr.clone(); - assert_rewrite(&schema, expr, expected); - - // unknown < 5 --> NULL < 5 (unknown not in schema) - let expr = col("unknown").lt(lit(5)); - let expected = int32_null.clone().lt(lit(5)); - assert_rewrite(&schema, expr, expected); - - // 5 < unknown --> 5 < NULL (unknown not in schema) - let expr = lit(5).lt(col("unknown")); - let expected = lit(5).lt(int32_null.clone()); - assert_rewrite(&schema, expr, expected); - - // _measurement < 5 --> _measurement < 5 (special column) - let expr = col("_measurement").lt(lit(5)); - let expected = expr.clone(); - assert_rewrite(&schema, expr, expected); - - // _field < 5 --> _field < 5 (special column) - let expr = col("_field").lt(lit(5)); - let expected = expr.clone(); - assert_rewrite(&schema, expr, expected); - - // _field < 5 OR col("unknown") < 5 --> _field < 5 OR (NULL < 5) - let expr = col("_field").lt(lit(5)).or(col("unknown").lt(lit(5))); - let expected = col("_field").lt(lit(5)).or(int32_null.clone().lt(lit(5))); - assert_rewrite(&schema, expr, expected); - - // unknown < unknown2 --> NULL < NULL (both unknown columns) - let expr = col("unknown").lt(col("unknown2")); - let expected = int32_null.clone().lt(int32_null); - assert_rewrite(&schema, expr, expected); - - // int < 5 OR unknown != "foo" - let expr = col("int").lt(lit(5)).or(col("unknown").not_eq(lit("foo"))); - let expected = col("int").lt(lit(5)).or(utf8_null.not_eq(lit("foo"))); - assert_rewrite(&schema, expr, expected); - - // int IS NULL - let expr = col("int").is_null(); - let expected = expr.clone(); - assert_rewrite(&schema, expr, expected); - - // unknown IS NULL --> true - let expr = col("unknown").is_null(); - let expected = lit(true); - assert_rewrite(&schema, expr, expected); - - // int IS NOT NULL - let expr = col("int").is_not_null(); - let expected = expr.clone(); - assert_rewrite(&schema, expr, expected); - - // unknown IS NOT NULL --> false - let expr = col("unknown").is_not_null(); - let expected = lit(false); - assert_rewrite(&schema, expr, expected); - } - - fn assert_rewrite(schema: &Schema, expr: Expr, expected: Expr) { - let mut rewriter = MissingColumnsToNull::new(schema); - let rewritten_expr = expr - .clone() - .rewrite(&mut rewriter) - .expect("Rewrite successful"); - - assert_eq!( - &rewritten_expr, &expected, - "Mismatch rewriting\nInput: {expr}\nRewritten: {rewritten_expr}\nExpected: {expected}" - ); - } - #[test] fn test_create_basic_summary_no_columns_no_rows() { let schema = SchemaBuilder::new().build().unwrap(); diff --git a/iox_query_influxrpc/Cargo.toml b/iox_query_influxrpc/Cargo.toml index 745e8ced1a..bf8cd300c8 100644 --- a/iox_query_influxrpc/Cargo.toml +++ b/iox_query_influxrpc/Cargo.toml @@ -21,6 +21,7 @@ snafu = "0.7" workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] # In alphabetical order +arrow_util = { path = "../arrow_util" } test_helpers = { path = "../test_helpers" } insta = { version = "1", features = ["yaml"] } tokio = { version = "1.32", features = ["macros", "parking_lot"] } diff --git a/iox_query_influxrpc/src/lib.rs b/iox_query_influxrpc/src/lib.rs index 721d489eca..75373fa632 100644 --- a/iox_query_influxrpc/src/lib.rs +++ b/iox_query_influxrpc/src/lib.rs @@ -36,7 +36,6 @@ use iox_query::{ field::FieldColumns, fieldlist::Field, make_non_null_checker, make_schema_pivot, stringset::StringSet, IOxSessionContext, }, - frontend::common::ScanPlanBuilder, plan::{ fieldlist::FieldListPlan, seriesset::{SeriesSetPlan, SeriesSetPlans}, @@ -62,6 +61,11 @@ use snafu::{ensure, OptionExt, ResultExt, Snafu}; use std::collections::{BTreeMap, HashSet as StdHashSet}; use std::{cmp::Reverse, collections::BTreeSet, sync::Arc}; +use crate::scan_plan::ScanPlanBuilder; + +mod missing_columns; +mod scan_plan; + const CONCURRENT_TABLE_JOBS: usize = 10; #[derive(Debug, Snafu)] @@ -139,9 +143,7 @@ pub enum Error { }, #[snafu(display("Error creating scan: {}", source))] - CreatingScan { - source: iox_query::frontend::common::Error, - }, + CreatingScan { source: crate::scan_plan::Error }, #[snafu(display( "gRPC planner got error casting aggregate {:?} for {}: {}", @@ -194,8 +196,8 @@ impl Error { } } -impl From<iox_query::frontend::common::Error> for Error { - fn from(source: iox_query::frontend::common::Error) -> Self { +impl From<crate::scan_plan::Error> for Error { + fn from(source: crate::scan_plan::Error) -> Self { Self::CreatingScan { source } } } diff --git a/iox_query_influxrpc/src/missing_columns.rs b/iox_query_influxrpc/src/missing_columns.rs new file mode 100644 index 0000000000..0254e6e4b5 --- /dev/null +++ b/iox_query_influxrpc/src/missing_columns.rs @@ -0,0 +1,226 @@ +use arrow::datatypes::DataType; +use datafusion::{ + common::{tree_node::TreeNodeRewriter, DFSchema}, + error::Result as DatafusionResult, + logical_expr::{BinaryExpr, ExprSchemable}, + prelude::{binary_expr, lit, Expr}, + scalar::ScalarValue, +}; +use predicate::rpc_predicate::{FIELD_COLUMN_NAME, MEASUREMENT_COLUMN_NAME}; +use schema::Schema; + +/// Rewrites the provided expr such that references to any column that +/// are not present in `schema` become null. +/// +/// So for example, if the predicate is +/// +/// `(STATE = 'CA') OR (READING >0)` +/// +/// but the schema only has `STATE` (and not `READING`), then the +/// predicate is rewritten to +/// +/// `(STATE = 'CA') OR (NULL >0)` +/// +/// This matches the Influx data model where any value that is not +/// explicitly specified is implicitly NULL. Since different chunks +/// and measurements can have different subsets of the columns, only +/// parts of the predicate make sense. +/// See comments on 'is_null_column' +#[derive(Debug)] +pub struct MissingColumnsToNull<'a> { + schema: &'a Schema, + df_schema: DFSchema, +} + +impl<'a> MissingColumnsToNull<'a> { + pub fn new(schema: &'a Schema) -> Self { + let df_schema: DFSchema = schema + .as_arrow() + .as_ref() + .clone() + .try_into() + .expect("Create DF Schema"); + + Self { schema, df_schema } + } + + /// Returns true if `expr` is a `Expr::Column` reference to a + /// column that doesn't exist in this schema + fn is_null_column(&self, expr: &Expr) -> bool { + if let Expr::Column(column) = &expr { + if column.name != MEASUREMENT_COLUMN_NAME && column.name != FIELD_COLUMN_NAME { + return self.schema.find_index_of(&column.name).is_none(); + } + } + false + } + + /// Rewrites an arg like col if col refers to a non existent + /// column into a null literal with "type" of `other_arg`, if possible + fn rewrite_op_arg(&self, arg: Expr, other_arg: &Expr) -> DatafusionResult<Expr> { + if self.is_null_column(&arg) { + let other_datatype = match other_arg.get_type(&self.df_schema) { + Ok(other_datatype) => other_datatype, + Err(_) => { + // the other arg is also unknown and will be + // rewritten, default to Int32 (sins due to + // https://github.com/apache/arrow-datafusion/issues/1179) + DataType::Int32 + } + }; + + let scalar: ScalarValue = (&other_datatype).try_into()?; + Ok(Expr::Literal(scalar)) + } else { + Ok(arg) + } + } +} + +impl<'a> TreeNodeRewriter for MissingColumnsToNull<'a> { + type N = Expr; + + fn mutate(&mut self, expr: Expr) -> DatafusionResult<Expr> { + // Ideally this would simply find all Expr::Columns and + // replace them with a constant NULL value. However, doing do + // is blocked on DF bug + // https://github.com/apache/arrow-datafusion/issues/1179 + // + // Until then, we need to know what type of expr the column is + // being compared with, so workaround by finding the datatype of the other arg + match expr { + Expr::BinaryExpr(BinaryExpr { left, op, right }) => { + let left = self.rewrite_op_arg(*left, &right)?; + let right = self.rewrite_op_arg(*right, &left)?; + Ok(binary_expr(left, op, right)) + } + Expr::IsNull(expr) if self.is_null_column(&expr) => Ok(lit(true)), + Expr::IsNotNull(expr) if self.is_null_column(&expr) => Ok(lit(false)), + expr => Ok(expr), + } + } +} + +#[cfg(test)] +mod tests { + use arrow::datatypes::DataType; + use datafusion::{ + common::tree_node::TreeNode, + prelude::{col, lit}, + scalar::ScalarValue, + }; + use schema::builder::SchemaBuilder; + + use super::*; + + #[test] + fn test_missing_colums_to_null() { + let schema = SchemaBuilder::new() + .tag("tag") + .field("str", DataType::Utf8) + .unwrap() + .field("int", DataType::Int64) + .unwrap() + .field("uint", DataType::UInt64) + .unwrap() + .field("float", DataType::Float64) + .unwrap() + .field("bool", DataType::Boolean) + .unwrap() + .build() + .unwrap(); + + // The fact that these need to be typed is due to + // https://github.com/apache/arrow-datafusion/issues/1179 + let utf8_null = Expr::Literal(ScalarValue::Utf8(None)); + let int32_null = Expr::Literal(ScalarValue::Int32(None)); + + // no rewrite + let expr = lit(1); + let expected = expr.clone(); + assert_rewrite(&schema, expr, expected); + + // tag != str (no rewrite) + let expr = col("tag").not_eq(col("str")); + let expected = expr.clone(); + assert_rewrite(&schema, expr, expected); + + // tag == str (no rewrite) + let expr = col("tag").eq(col("str")); + let expected = expr.clone(); + assert_rewrite(&schema, expr, expected); + + // int < 5 (no rewrite, int part of schema) + let expr = col("int").lt(lit(5)); + let expected = expr.clone(); + assert_rewrite(&schema, expr, expected); + + // unknown < 5 --> NULL < 5 (unknown not in schema) + let expr = col("unknown").lt(lit(5)); + let expected = int32_null.clone().lt(lit(5)); + assert_rewrite(&schema, expr, expected); + + // 5 < unknown --> 5 < NULL (unknown not in schema) + let expr = lit(5).lt(col("unknown")); + let expected = lit(5).lt(int32_null.clone()); + assert_rewrite(&schema, expr, expected); + + // _measurement < 5 --> _measurement < 5 (special column) + let expr = col("_measurement").lt(lit(5)); + let expected = expr.clone(); + assert_rewrite(&schema, expr, expected); + + // _field < 5 --> _field < 5 (special column) + let expr = col("_field").lt(lit(5)); + let expected = expr.clone(); + assert_rewrite(&schema, expr, expected); + + // _field < 5 OR col("unknown") < 5 --> _field < 5 OR (NULL < 5) + let expr = col("_field").lt(lit(5)).or(col("unknown").lt(lit(5))); + let expected = col("_field").lt(lit(5)).or(int32_null.clone().lt(lit(5))); + assert_rewrite(&schema, expr, expected); + + // unknown < unknown2 --> NULL < NULL (both unknown columns) + let expr = col("unknown").lt(col("unknown2")); + let expected = int32_null.clone().lt(int32_null); + assert_rewrite(&schema, expr, expected); + + // int < 5 OR unknown != "foo" + let expr = col("int").lt(lit(5)).or(col("unknown").not_eq(lit("foo"))); + let expected = col("int").lt(lit(5)).or(utf8_null.not_eq(lit("foo"))); + assert_rewrite(&schema, expr, expected); + + // int IS NULL + let expr = col("int").is_null(); + let expected = expr.clone(); + assert_rewrite(&schema, expr, expected); + + // unknown IS NULL --> true + let expr = col("unknown").is_null(); + let expected = lit(true); + assert_rewrite(&schema, expr, expected); + + // int IS NOT NULL + let expr = col("int").is_not_null(); + let expected = expr.clone(); + assert_rewrite(&schema, expr, expected); + + // unknown IS NOT NULL --> false + let expr = col("unknown").is_not_null(); + let expected = lit(false); + assert_rewrite(&schema, expr, expected); + } + + fn assert_rewrite(schema: &Schema, expr: Expr, expected: Expr) { + let mut rewriter = MissingColumnsToNull::new(schema); + let rewritten_expr = expr + .clone() + .rewrite(&mut rewriter) + .expect("Rewrite successful"); + + assert_eq!( + &rewritten_expr, &expected, + "Mismatch rewriting\nInput: {expr}\nRewritten: {rewritten_expr}\nExpected: {expected}" + ); + } +} diff --git a/iox_query_influxrpc/src/scan_plan.rs b/iox_query_influxrpc/src/scan_plan.rs new file mode 100644 index 0000000000..ca9cde7418 --- /dev/null +++ b/iox_query_influxrpc/src/scan_plan.rs @@ -0,0 +1,280 @@ +use std::sync::Arc; + +use datafusion::{common::tree_node::TreeNode, logical_expr::LogicalPlanBuilder}; +use observability_deps::tracing::trace; +use predicate::Predicate; +use schema::Schema; +use snafu::{ResultExt, Snafu}; + +use iox_query::{ + provider::{ChunkTableProvider, ProviderBuilder}, + QueryChunk, +}; + +use crate::missing_columns::MissingColumnsToNull; + +#[derive(Debug, Snafu)] +pub enum Error { + #[snafu(display( + "gRPC planner got error adding chunk for table {}: {}", + table_name, + source + ))] + CreatingProvider { + table_name: String, + source: iox_query::provider::Error, + }, + + #[snafu(display( + "Internal gRPC planner rewriting predicate for {}: {}", + table_name, + source + ))] + RewritingFilterPredicate { + table_name: String, + source: datafusion::error::DataFusionError, + }, + + #[snafu(display("Error building plan: {}", source))] + BuildingPlan { + source: datafusion::error::DataFusionError, + }, +} + +pub(crate) type Result<T, E = Error> = std::result::Result<T, E>; + +/// Represents scanning one or more [`QueryChunk`]s. +pub struct ScanPlan { + pub plan_builder: LogicalPlanBuilder, + pub provider: Arc<ChunkTableProvider>, +} + +impl std::fmt::Debug for ScanPlan { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ScanPlan") + .field("plan_builder", &"<...>") + .field("provider", &self.provider) + .finish() + } +} + +impl ScanPlan { + /// Return the schema of the source (the merged schema across all tables) + pub fn schema(&self) -> &Schema { + self.provider.iox_schema() + } +} + +/// Builder for [`ScanPlan`]s which scan the data 1 or more [`QueryChunk`] for +/// IOx's custom query frontends (InfluxRPC and Reorg at the time of +/// writing). +/// +/// The created plan looks like: +/// +/// ```text +/// Filter(predicate) [optional] +/// Scan +/// ``` +/// +/// NOTE: This function assumes the chunks have already been "pruned" +/// based on statistics and will not attempt to prune them +/// further. Some frontends like influxrpc or the reorg planner manage +/// (and thus prune) their own chunklist. + +#[derive(Debug)] +pub struct ScanPlanBuilder<'a> { + table_name: Arc<str>, + /// The schema of the resulting table (any chunks that don't have + /// all the necessary columns will be extended appropriately) + table_schema: &'a Schema, + chunks: Vec<Arc<dyn QueryChunk>>, + predicate: Option<&'a Predicate>, +} + +impl<'a> ScanPlanBuilder<'a> { + pub fn new(table_name: Arc<str>, table_schema: &'a Schema) -> Self { + Self { + table_name, + table_schema, + chunks: vec![], + predicate: None, + } + } + + /// Adds `chunks` to the list of Chunks to scan + pub fn with_chunks(mut self, chunks: impl IntoIterator<Item = Arc<dyn QueryChunk>>) -> Self { + self.chunks.extend(chunks.into_iter()); + self + } + + /// Sets the predicate + pub fn with_predicate(mut self, predicate: &'a Predicate) -> Self { + assert!(self.predicate.is_none()); + self.predicate = Some(predicate); + self + } + + /// Creates a `ScanPlan` from the specified chunks + pub fn build(self) -> Result<ScanPlan> { + let Self { + table_name, + chunks, + table_schema, + predicate, + } = self; + + assert!(!chunks.is_empty(), "no chunks provided"); + + // Prepare the plan for the table + let mut builder = ProviderBuilder::new(Arc::clone(&table_name), table_schema.clone()) + .with_enable_deduplication(true); + + for chunk in chunks { + builder = builder.add_chunk(chunk); + } + + let provider = builder.build().context(CreatingProviderSnafu { + table_name: table_name.as_ref(), + })?; + let provider = Arc::new(provider); + let mut plan_builder = Arc::clone(&provider) + .into_logical_plan_builder() + .context(BuildingPlanSnafu)?; + + // Use a filter node to add general predicates + timestamp + // range, if any + if let Some(predicate) = predicate { + if let Some(filter_expr) = predicate.filter_expr() { + // Rewrite expression so it only refers to columns in this chunk + let schema = provider.iox_schema(); + trace!(%table_name, ?filter_expr, "Adding filter expr"); + let mut rewriter = MissingColumnsToNull::new(schema); + let filter_expr = + filter_expr + .rewrite(&mut rewriter) + .context(RewritingFilterPredicateSnafu { + table_name: table_name.as_ref(), + })?; + + trace!(?filter_expr, "Rewritten filter_expr"); + + plan_builder = plan_builder + .filter(filter_expr) + .context(BuildingPlanSnafu)?; + } + } + + Ok(ScanPlan { + plan_builder, + provider, + }) + } +} + +#[cfg(test)] +mod tests { + use arrow_util::assert_batches_eq; + use datafusion_util::test_collect_partition; + use iox_query::{ + exec::{Executor, ExecutorType}, + test::{format_execution_plan, TestChunk}, + }; + use schema::merge::SchemaMerger; + + use super::*; + + #[tokio::test] + async fn test_scan_plan_deduplication() { + test_helpers::maybe_start_logging(); + // Create 2 overlapped chunks + let (schema, chunks) = get_test_overlapped_chunks(); + + // Build a logical plan with deduplication + let scan_plan = ScanPlanBuilder::new(Arc::from("t"), &schema) + .with_chunks(chunks) + .build() + .unwrap(); + let logical_plan = scan_plan.plan_builder.build().unwrap(); + + // Build physical plan + let executor = Executor::new_testing(); + let physical_plan = executor + .new_context(ExecutorType::Reorg) + .create_physical_plan(&logical_plan) + .await + .unwrap(); + + insta::assert_yaml_snapshot!( + format_execution_plan(&physical_plan), + @r###" + --- + - " ProjectionExec: expr=[field_int@1 as field_int, field_int2@2 as field_int2, tag1@3 as tag1, time@4 as time]" + - " DeduplicateExec: [tag1@3 ASC,time@4 ASC]" + - " SortPreservingMergeExec: [tag1@3 ASC,time@4 ASC,__chunk_order@0 ASC]" + - " SortExec: expr=[tag1@3 ASC,time@4 ASC,__chunk_order@0 ASC]" + - " RecordBatchesExec: batches_groups=2 batches=2 total_rows=9" + "### + ); + + // Verify output data + // Since data is merged due to deduplication, the two input chunks will be merged into one output chunk + assert_eq!( + physical_plan.output_partitioning().partition_count(), + 1, + "{:?}", + physical_plan.output_partitioning() + ); + let batches0 = test_collect_partition(Arc::clone(&physical_plan), 0).await; + // Data is sorted on tag1 & time. One row is removed due to deduplication + let expected = vec![ + "+-----------+------------+------+--------------------------------+", + "| field_int | field_int2 | tag1 | time |", + "+-----------+------------+------+--------------------------------+", + "| 100 | | AL | 1970-01-01T00:00:00.000000050Z |", + "| 70 | | CT | 1970-01-01T00:00:00.000000100Z |", + "| 1000 | | MT | 1970-01-01T00:00:00.000001Z |", + "| 5 | | MT | 1970-01-01T00:00:00.000005Z |", + "| 10 | | MT | 1970-01-01T00:00:00.000007Z |", + "| 70 | 70 | UT | 1970-01-01T00:00:00.000220Z |", + "| 50 | 50 | VT | 1970-01-01T00:00:00.000210Z |", // other row with the same tag1 and time is removed + "| 1000 | 1000 | WA | 1970-01-01T00:00:00.000028Z |", + "+-----------+------------+------+--------------------------------+", + ]; + assert_batches_eq!(&expected, &batches0); + } + + fn get_test_overlapped_chunks() -> (Schema, Vec<Arc<dyn QueryChunk>>) { + let max_time = 70000; + let chunk1 = Arc::new( + TestChunk::new("t") + .with_order(1) + .with_partition(1) + .with_time_column_with_stats(Some(50), Some(max_time)) + .with_tag_column_with_stats("tag1", Some("AL"), Some("MT")) + .with_i64_field_column("field_int") + .with_five_rows_of_data(), + ); + + // Chunk 2 has an extra field, and only 4 rows + let chunk2 = Arc::new( + TestChunk::new("t") + .with_order(2) + .with_partition(1) + .with_time_column_with_stats(Some(28000), Some(220000)) + .with_tag_column_with_stats("tag1", Some("UT"), Some("WA")) + .with_i64_field_column("field_int") + .with_i64_field_column("field_int2") + .with_may_contain_pk_duplicates(true) + .with_four_rows_of_data(), + ); + + let schema = SchemaMerger::new() + .merge(chunk1.schema()) + .unwrap() + .merge(chunk2.schema()) + .unwrap() + .build(); + + (schema, vec![chunk1, chunk2]) + } +}
d1a54cf0d479f27193e2b24478412ff4a5be1f05
Christopher M. Wolff
2023-03-01 15:32:57
allow no lower bound gap fill implementation (#7104)
* feat: allow no lower bound gap fill implementation * chore: clippy * refactor: code review feedback ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
feat: allow no lower bound gap fill implementation (#7104) * feat: allow no lower bound gap fill implementation * chore: clippy * refactor: code review feedback --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/iox_query/src/exec/gapfill.rs b/iox_query/src/exec/gapfill.rs index 68cb17bb66..e862a57a16 100644 --- a/iox_query/src/exec/gapfill.rs +++ b/iox_query/src/exec/gapfill.rs @@ -48,24 +48,29 @@ pub(crate) struct GapFillParams { /// The origin argument from the call to DATE_BIN_GAPFILL pub origin: Expr, /// The time range of the time column inferred from predicates - /// in the overall query + /// in the overall query. The lower bound may be [`Bound::Unbounded`] + /// which implies that gap-filling should just start from the + /// first point in each series. pub time_range: Range<Bound<Expr>>, } impl GapFillParams { // Extract the expressions so they can be optimized. fn expressions(&self) -> Vec<Expr> { - vec![ + let mut exprs = vec![ self.stride.clone(), self.time_column.clone(), self.origin.clone(), - bound_extract(&self.time_range.start) - .unwrap_or_else(|| panic!("lower time bound is required")) - .clone(), + ]; + if let Some(start) = bound_extract(&self.time_range.start) { + exprs.push(start.clone()); + } + exprs.push( bound_extract(&self.time_range.end) .unwrap_or_else(|| panic!("upper time bound is required")) .clone(), - ] + ); + exprs } #[allow(clippy::wrong_self_convention)] // follows convention of UserDefinedLogicalNode @@ -100,6 +105,11 @@ impl GapFill { aggr_expr: Vec<Expr>, params: GapFillParams, ) -> Result<Self> { + if params.time_range.end == Bound::Unbounded { + return Err(DataFusionError::Internal( + "missing upper bound in GapFill time range".to_string(), + )); + } Ok(Self { input, group_expr, @@ -477,6 +487,7 @@ mod test { }; use observability_deps::tracing::debug; use schema::{InfluxColumnType, InfluxFieldType}; + use test_helpers::assert_error; fn schema() -> Schema { Schema::new(vec![ @@ -496,10 +507,10 @@ mod test { } #[test] - fn test_from_template() -> Result<()> { - let scan = table_scan()?; - let gapfill = GapFill::try_new( - Arc::new(scan.clone()), + fn test_try_new_errs() { + let scan = table_scan().unwrap(); + let result = GapFill::try_new( + Arc::new(scan), vec![col("loc"), col("time")], vec![col("temp")], GapFillParams { @@ -508,12 +519,23 @@ mod test { origin: lit_timestamp_nano(0), time_range: Range { start: Bound::Included(lit_timestamp_nano(1000)), - end: Bound::Excluded(lit_timestamp_nano(2000)), + end: Bound::Unbounded, }, }, - )?; + ); + + assert_error!(result, DataFusionError::Internal(ref msg) if msg == "missing upper bound in GapFill time range"); + } + + fn assert_gapfill_from_template_roundtrip(gapfill: &GapFill) { + let scan = table_scan().unwrap(); let exprs = gapfill.expressions(); - assert_eq!(8, exprs.len()); + let want_exprs = gapfill.group_expr.len() + + gapfill.aggr_expr.len() + + 3 // stride, time, origin + + bound_extract(&gapfill.params.time_range.start).iter().count() + + bound_extract(&gapfill.params.time_range.end).iter().count(); + assert_eq!(want_exprs, exprs.len()); let gapfill_ft = gapfill.from_template(&exprs, &[scan]); let gapfill_ft = gapfill_ft .as_any() @@ -522,7 +544,35 @@ mod test { assert_eq!(gapfill.group_expr, gapfill_ft.group_expr); assert_eq!(gapfill.aggr_expr, gapfill_ft.aggr_expr); assert_eq!(gapfill.params, gapfill_ft.params); - Ok(()) + } + + #[test] + fn test_from_template() { + for time_range in vec![ + Range { + start: Bound::Included(lit_timestamp_nano(1000)), + end: Bound::Excluded(lit_timestamp_nano(2000)), + }, + Range { + start: Bound::Unbounded, + end: Bound::Excluded(lit_timestamp_nano(2000)), + }, + ] { + let scan = table_scan().unwrap(); + let gapfill = GapFill::try_new( + Arc::new(scan.clone()), + vec![col("loc"), col("time")], + vec![col("temp")], + GapFillParams { + stride: lit(ScalarValue::IntervalDayTime(Some(60_000))), + time_column: col("time"), + origin: lit_timestamp_nano(0), + time_range, + }, + ) + .unwrap(); + assert_gapfill_from_template_roundtrip(&gapfill); + } } #[test] @@ -640,7 +690,7 @@ mod test { agg_cols: vec![vec![Some(10), Some(11)]], input_batch_size, }; - let params = get_params_ms(&batch, 25, 975, 1_125); + let params = get_params_ms(&batch, 25, Some(975), 1_125); let tc = TestCase { test_records: batch, output_batch_size, @@ -681,7 +731,7 @@ mod test { agg_cols: vec![], input_batch_size, }; - let params = get_params_ms(&batch, 25, 975, 1_125); + let params = get_params_ms(&batch, 25, Some(975), 1_125); let tc = TestCase { test_records: batch, output_batch_size, @@ -720,7 +770,7 @@ mod test { agg_cols: vec![vec![Some(10), Some(11), Some(20), Some(21)]], input_batch_size, }; - let params = get_params_ms(&records, 25, 975, 1_125); + let params = get_params_ms(&records, 25, Some(975), 1_125); let tc = TestCase { test_records: records, output_batch_size, @@ -789,7 +839,7 @@ mod test { ]], input_batch_size, }; - let params = get_params_ms(&records, 25, 975, 1_125); + let params = get_params_ms(&records, 25, Some(975), 1_125); let tc = TestCase { test_records: records, output_batch_size, @@ -872,7 +922,7 @@ mod test { ]], input_batch_size, }; - let params = get_params_ms(&records, 25, 975, 1_125); + let params = get_params_ms(&records, 25, Some(975), 1_125); let tc = TestCase { test_records: records, output_batch_size, @@ -909,6 +959,58 @@ mod test { Ok(()) } + #[tokio::test] + async fn test_gapfill_multi_group_cols_with_more_nulls() { + test_helpers::maybe_start_logging(); + for output_batch_size in [1, 2, 4, 8, 16, 32] { + for input_batch_size in [1, 2, 4, 8] { + let records = TestRecords { + group_cols: vec![vec![Some("a"), Some("b"), Some("b"), Some("b"), Some("b")]], + time_col: vec![ + Some(1_000), + None, // group b + None, + None, + None, + ], + agg_cols: vec![vec![ + Some(10), // group a + Some(90), // group b + Some(91), + Some(92), + Some(93), + ]], + input_batch_size, + }; + let params = get_params_ms(&records, 25, Some(975), 1_025); + let tc = TestCase { + test_records: records, + output_batch_size, + params, + }; + let batches = tc.run().await.unwrap(); + let expected = [ + "+----+--------------------------+----+", + "| g0 | time | a0 |", + "+----+--------------------------+----+", + "| a | 1970-01-01T00:00:00.975Z | |", + "| a | 1970-01-01T00:00:01Z | 10 |", + "| a | 1970-01-01T00:00:01.025Z | |", + "| b | | 90 |", + "| b | | 91 |", + "| b | | 92 |", + "| b | | 93 |", + "| b | 1970-01-01T00:00:00.975Z | |", + "| b | 1970-01-01T00:00:01Z | |", + "| b | 1970-01-01T00:00:01.025Z | |", + "+----+--------------------------+----+", + ]; + assert_batches_eq!(expected, &batches); + assert_batch_count(&batches, output_batch_size); + } + } + } + #[tokio::test] async fn test_gapfill_multi_aggr_cols_with_nulls() -> Result<()> { test_helpers::maybe_start_logging(); @@ -966,7 +1068,7 @@ mod test { ], input_batch_size, }; - let params = get_params_ms(&records, 25, 975, 1_125); + let params = get_params_ms(&records, 25, Some(975), 1_125); let tc = TestCase { test_records: records, output_batch_size, @@ -1003,6 +1105,131 @@ mod test { Ok(()) } + #[tokio::test] + async fn test_gapfill_simple_no_lower_bound() { + test_helpers::maybe_start_logging(); + for output_batch_size in [1, 2, 4, 8] { + for input_batch_size in [1, 2, 4] { + let batch = TestRecords { + group_cols: vec![vec![Some("a"), Some("a"), Some("b"), Some("b")]], + time_col: vec![Some(1_025), Some(1_100), Some(1_050), Some(1_100)], + agg_cols: vec![vec![Some(10), Some(11), Some(20), Some(21)]], + input_batch_size, + }; + let params = get_params_ms(&batch, 25, None, 1_125); + let tc = TestCase { + test_records: batch, + output_batch_size, + params, + }; + let batches = tc.run().await.unwrap(); + let expected = [ + "+----+--------------------------+----+", + "| g0 | time | a0 |", + "+----+--------------------------+----+", + "| a | 1970-01-01T00:00:01.025Z | 10 |", + "| a | 1970-01-01T00:00:01.050Z | |", + "| a | 1970-01-01T00:00:01.075Z | |", + "| a | 1970-01-01T00:00:01.100Z | 11 |", + "| a | 1970-01-01T00:00:01.125Z | |", + "| b | 1970-01-01T00:00:01.050Z | 20 |", + "| b | 1970-01-01T00:00:01.075Z | |", + "| b | 1970-01-01T00:00:01.100Z | 21 |", + "| b | 1970-01-01T00:00:01.125Z | |", + "+----+--------------------------+----+", + ]; + assert_batches_eq!(expected, &batches); + assert_batch_count(&batches, output_batch_size); + } + } + } + + #[tokio::test] + async fn test_gapfill_simple_no_lower_bound_with_nulls() { + test_helpers::maybe_start_logging(); + for output_batch_size in [1, 2, 4, 8] { + for input_batch_size in [1, 2, 4] { + let batch = TestRecords { + group_cols: vec![vec![ + Some("a"), + Some("a"), + Some("a"), + Some("b"), + Some("b"), + Some("b"), + Some("b"), + Some("c"), + Some("c"), + Some("c"), + Some("c"), + Some("c"), + ]], + time_col: vec![ + None, // group a + Some(1_025), + Some(1_100), + None, // group b + None, + None, + None, // group c + None, + None, + None, + Some(1_050), + Some(1_100), + ], + agg_cols: vec![vec![ + Some(1), // group a + Some(10), + Some(11), + Some(90), // group b + Some(91), + Some(92), + Some(93), + None, // group c + None, + Some(2), + Some(20), + Some(21), + ]], + input_batch_size, + }; + let params = get_params_ms(&batch, 25, None, 1_125); + let tc = TestCase { + test_records: batch, + output_batch_size, + params, + }; + let batches = tc.run().await.unwrap(); + let expected = [ + "+----+--------------------------+----+", + "| g0 | time | a0 |", + "+----+--------------------------+----+", + "| a | | 1 |", + "| a | 1970-01-01T00:00:01.025Z | 10 |", + "| a | 1970-01-01T00:00:01.050Z | |", + "| a | 1970-01-01T00:00:01.075Z | |", + "| a | 1970-01-01T00:00:01.100Z | 11 |", + "| a | 1970-01-01T00:00:01.125Z | |", + "| b | | 90 |", + "| b | | 91 |", + "| b | | 92 |", + "| b | | 93 |", + "| c | | |", + "| c | | |", + "| c | | 2 |", + "| c | 1970-01-01T00:00:01.050Z | 20 |", + "| c | 1970-01-01T00:00:01.075Z | |", + "| c | 1970-01-01T00:00:01.100Z | 21 |", + "| c | 1970-01-01T00:00:01.125Z | |", + "+----+--------------------------+----+", + ]; + assert_batches_eq!(expected, &batches); + assert_batch_count(&batches, output_batch_size); + } + } + } + fn assert_batch_count(actual_batches: &[RecordBatch], batch_size: usize) { let num_rows = actual_batches.iter().map(|b| b.num_rows()).sum::<usize>(); let expected_batch_count = f64::ceil(num_rows as f64 / batch_size as f64) as usize; @@ -1148,7 +1375,20 @@ mod test { } } - fn get_params_ms(batch: &TestRecords, stride: i64, start: i64, end: i64) -> GapFillExecParams { + fn bound_included_from_option<T>(o: Option<T>) -> Bound<T> { + if let Some(v) = o { + Bound::Included(v) + } else { + Bound::Unbounded + } + } + + fn get_params_ms( + batch: &TestRecords, + stride: i64, + start: Option<i64>, + end: i64, + ) -> GapFillExecParams { GapFillExecParams { // interval day time is milliseconds in the low 32-bit word stride: phys_lit(ScalarValue::IntervalDayTime(Some(stride))), // milliseconds @@ -1156,10 +1396,12 @@ mod test { origin: phys_lit(ScalarValue::TimestampNanosecond(Some(0), None)), // timestamps are nanos, so scale them accordingly time_range: Range { - start: Bound::Included(phys_lit(ScalarValue::TimestampNanosecond( - Some(start * 1_000_000), - None, - ))), + start: bound_included_from_option(start.map(|start| { + phys_lit(ScalarValue::TimestampNanosecond( + Some(start * 1_000_000), + None, + )) + })), end: Bound::Included(phys_lit(ScalarValue::TimestampNanosecond( Some(end * 1_000_000), None, diff --git a/iox_query/src/exec/gapfill/algo.rs b/iox_query/src/exec/gapfill/algo.rs index a0b70a1b74..a836f98161 100644 --- a/iox_query/src/exec/gapfill/algo.rs +++ b/iox_query/src/exec/gapfill/algo.rs @@ -79,10 +79,9 @@ pub(super) struct GapFiller { impl GapFiller { /// Initialize a [GapFiller] at the beginning of an input record batch. pub fn new(params: GapFillParams) -> Self { - let next_ts = params.first_ts; let cursor = Cursor { next_input_offset: 0, - next_ts, + next_ts: params.first_ts, remaining_output_batch_size: 0, }; Self { @@ -135,7 +134,11 @@ impl GapFiller { self.trailing_gaps = false; } else { assert!(self.cursor.next_input_offset == *last_series_end); - if self.cursor.next_ts <= self.params.last_ts { + if self + .cursor + .next_ts + .map_or(false, |next_ts| next_ts <= self.params.last_ts) + { // Possible state 2: // Set this bit so that the output batch begins // with trailing gaps for the last series. @@ -297,7 +300,10 @@ struct Cursor { /// Where to read the next row from the input. next_input_offset: usize, /// The next timestamp to be produced for the current series. - next_ts: i64, + /// Since the lower bound for gap filling could just be "whatever + /// the first timestamp in the series is," this may be `None` before + /// any rows with non-null timestamps are produced for a series. + next_ts: Option<i64>, /// How many rows may be output before we need to start a new record batch. remaining_output_batch_size: usize, } @@ -311,7 +317,7 @@ impl Cursor { input_time_array: &TimestampNanosecondArray, series_end: usize, ) -> usize { - let null_ts_count = if input_time_array.null_count() > 0 { + let mut count = if input_time_array.null_count() > 0 { let len = series_end - self.next_input_offset; let slice = input_time_array.slice(self.next_input_offset, len); slice.null_count() @@ -319,7 +325,10 @@ impl Cursor { 0 }; - let count = null_ts_count + params.valid_row_count(self.next_ts); + self.next_input_offset += count; + if self.maybe_init_next_ts(input_time_array, series_end) { + count += params.valid_row_count(self.next_ts.unwrap()); + } self.next_input_offset = series_end; self.next_ts = params.first_ts; @@ -327,6 +336,37 @@ impl Cursor { count } + /// Attempts to assign a value to `self.next_ts` if it does not have one. + /// + /// This bit of abstraction is needed because the lower bound for gap filling may be + /// determined in one of two ways: + /// * If the [`GapFillParams`] provided by client code has `first_ts` set to `Some`, this + /// will be the first timestamp for each series. In this case `self.next_ts` + /// will never `None`, and this function does nothing. + /// * Otherwise it is determined to be whatever the first timestamp in the input series is. + /// In this case `params.first_ts == None`, and we need to extract the timestamp from + /// the input time array. + /// + /// Returns true if `self.next_ts` ends up containing a value. + fn maybe_init_next_ts( + &mut self, + input_time_array: &TimestampNanosecondArray, + series_end: usize, + ) -> bool { + self.next_ts = match self.next_ts { + Some(_) => self.next_ts, + None if self.next_input_offset < series_end + && input_time_array.is_valid(self.next_input_offset) => + { + Some(input_time_array.value(self.next_input_offset)) + } + // This may happen if current input offset points at a row + // with a null timestamp, or is past the end of the current series. + _ => None, + }; + self.next_ts.is_some() + } + /// Builds a vector that can be used to produce a timestamp array. fn build_time_vec( &mut self, @@ -458,17 +498,21 @@ impl Cursor { self.next_input_offset += 1; } + if !self.maybe_init_next_ts(input_times, series_end) { + return Ok(()); + } + let mut next_ts = self.next_ts.unwrap(); + let output_row_count = std::cmp::min( - params.valid_row_count(self.next_ts), + params.valid_row_count(next_ts), self.remaining_output_batch_size, ); if output_row_count == 0 { return Ok(()); } - let mut next_ts = self.next_ts; // last_ts is the last timestamp that will fit in the output batch - let last_ts = self.next_ts + (output_row_count - 1) as i64 * params.stride; + let last_ts = next_ts + (output_row_count - 1) as i64 * params.stride; loop { if self.next_input_offset >= series_end { @@ -503,7 +547,7 @@ impl Cursor { next_ts += params.stride; } - self.next_ts = last_ts + params.stride; + self.next_ts = Some(last_ts + params.stride); self.remaining_output_batch_size -= output_row_count; Ok(()) } @@ -551,7 +595,7 @@ mod tests { let params = GapFillParams { stride: 50, - first_ts: 950, + first_ts: Some(950), last_ts: 1250, }; @@ -579,7 +623,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: input_times.len(), - next_ts: params.last_ts + params.stride, + next_ts: Some(params.last_ts + params.stride), remaining_output_batch_size: output_batch_size - 7 }, cursor @@ -588,6 +632,43 @@ mod tests { Ok(()) } + #[test] + fn test_cursor_append_time_values_no_first_ts() { + test_helpers::maybe_start_logging(); + let input_times = TimestampNanosecondArray::from(vec![1100, 1200]); + let series = input_times.len(); + + let params = GapFillParams { + stride: 50, + first_ts: None, + last_ts: 1250, + }; + + let output_batch_size = 10000; + let mut cursor = Cursor { + next_input_offset: 0, + next_ts: params.first_ts, + remaining_output_batch_size: output_batch_size, + }; + + let out_times = cursor + .build_time_vec(&params, &[series], &input_times) + .unwrap(); + assert_eq!( + vec![Some(1100), Some(1150), Some(1200), Some(1250)], + out_times + ); + + assert_eq!( + Cursor { + next_input_offset: input_times.len(), + next_ts: Some(params.last_ts + params.stride), + remaining_output_batch_size: output_batch_size - 4 + }, + cursor + ); + } + #[test] fn test_cursor_append_time_value_nulls() -> Result<()> { test_helpers::maybe_start_logging(); @@ -597,7 +678,7 @@ mod tests { let params = GapFillParams { stride: 50, - first_ts: 950, + first_ts: Some(950), last_ts: 1250, }; @@ -626,7 +707,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: input_times.len(), - next_ts: params.last_ts + params.stride, + next_ts: Some(params.last_ts + params.stride), remaining_output_batch_size: output_batch_size - 9 }, cursor @@ -642,7 +723,7 @@ mod tests { let params = GapFillParams { stride: 50, - first_ts: 950, + first_ts: Some(950), last_ts: 1250, }; @@ -658,7 +739,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: input_times.len(), - next_ts: params.last_ts + params.stride, + next_ts: Some(params.last_ts + params.stride), remaining_output_batch_size: output_batch_size - 7 }, cursor @@ -674,7 +755,7 @@ mod tests { let params = GapFillParams { stride: 50, - first_ts: 950, + first_ts: Some(950), last_ts: 1250, }; @@ -694,7 +775,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: input_times.len(), - next_ts: params.last_ts + params.stride, + next_ts: Some(params.last_ts + params.stride), remaining_output_batch_size: output_batch_size - 7 }, cursor @@ -712,7 +793,7 @@ mod tests { let params = GapFillParams { stride: 50, - first_ts: 950, + first_ts: Some(950), last_ts: 1250, }; @@ -742,7 +823,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: input_times.len(), - next_ts: params.last_ts + params.stride, + next_ts: Some(params.last_ts + params.stride), remaining_output_batch_size: output_batch_size - 9 }, cursor @@ -756,7 +837,7 @@ mod tests { let output_batch_size = 5; let params = GapFillParams { stride: 50, - first_ts: 950, + first_ts: Some(950), last_ts: 1350, }; let input_times = TimestampNanosecondArray::from(vec![ @@ -795,7 +876,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: 2, - next_ts: 1200, + next_ts: Some(1200), remaining_output_batch_size: 0 }, cursor @@ -818,7 +899,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: input_times.len(), - next_ts: params.last_ts + params.stride, + next_ts: Some(params.last_ts + params.stride), remaining_output_batch_size: 1 }, cursor @@ -833,7 +914,7 @@ mod tests { let output_batch_size = 6; let params = GapFillParams { stride: 50, - first_ts: 950, + first_ts: Some(950), last_ts: 1350, }; let input_times = TimestampNanosecondArray::from(vec![ @@ -878,7 +959,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: 4, - next_ts: 1150, + next_ts: Some(1150), remaining_output_batch_size: 0 }, cursor @@ -900,7 +981,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: input_times.len(), - next_ts: params.last_ts + params.stride, + next_ts: Some(params.last_ts + params.stride), remaining_output_batch_size: 1 }, cursor @@ -916,7 +997,7 @@ mod tests { let output_batch_size = 4; let params = GapFillParams { stride: 50, - first_ts: 1000, + first_ts: Some(1000), last_ts: 1100, }; let input_times = TimestampNanosecondArray::from(vec![ @@ -957,7 +1038,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: 4, - next_ts: 1000, + next_ts: Some(1000), remaining_output_batch_size: 0 }, cursor @@ -980,7 +1061,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: input_times.len(), - next_ts: params.last_ts + params.stride, + next_ts: Some(params.last_ts + params.stride), remaining_output_batch_size: 1 }, cursor, @@ -998,7 +1079,7 @@ mod tests { let output_batch_size = 4; let params = GapFillParams { stride: 50, - first_ts: 1000, + first_ts: Some(1000), last_ts: 1100, }; let input_times = TimestampNanosecondArray::from(vec![ @@ -1040,7 +1121,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: 4, - next_ts: 1000, + next_ts: Some(1000), remaining_output_batch_size: 0 }, cursor @@ -1063,7 +1144,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: input_times.len(), - next_ts: params.last_ts + params.stride, + next_ts: Some(params.last_ts + params.stride), remaining_output_batch_size: 0 }, cursor, @@ -1079,7 +1160,7 @@ mod tests { let output_batch_size = 3; let params = GapFillParams { stride: 100, - first_ts: 200, + first_ts: Some(200), last_ts: 1000, }; let input_times = TimestampNanosecondArray::from(vec![300, 500, 700, 800]); @@ -1111,7 +1192,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: 1, - next_ts: 500, + next_ts: Some(500), remaining_output_batch_size: 0 }, cursor @@ -1134,7 +1215,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: 3, - next_ts: 800, + next_ts: Some(800), remaining_output_batch_size: 0 }, cursor @@ -1157,7 +1238,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: input_times.len(), - next_ts: params.last_ts + params.stride, + next_ts: Some(params.last_ts + params.stride), remaining_output_batch_size: 0 }, cursor @@ -1172,7 +1253,7 @@ mod tests { let output_batch_size = 4; let params = GapFillParams { stride: 50, - first_ts: 1000, + first_ts: Some(1000), last_ts: 1200, }; let input_times = TimestampNanosecondArray::from(vec![ @@ -1212,7 +1293,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: 2, - next_ts: 1200, + next_ts: Some(1200), remaining_output_batch_size: 0 }, cursor @@ -1235,7 +1316,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: 3, - next_ts: params.last_ts + params.stride, + next_ts: Some(params.last_ts + params.stride), remaining_output_batch_size: 3 }, cursor @@ -1259,7 +1340,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: 5, - next_ts: 1150, + next_ts: Some(1150), remaining_output_batch_size: 0 }, cursor @@ -1282,7 +1363,7 @@ mod tests { assert_eq!( Cursor { next_input_offset: 7, - next_ts: 1250, + next_ts: Some(1250), remaining_output_batch_size: 2 }, cursor diff --git a/iox_query/src/exec/gapfill/params.rs b/iox_query/src/exec/gapfill/params.rs index 4965b4e68c..80a0feb815 100644 --- a/iox_query/src/exec/gapfill/params.rs +++ b/iox_query/src/exec/gapfill/params.rs @@ -24,8 +24,9 @@ pub(super) struct GapFillParams { /// The stride in nanoseconds of the timestamps to be output. pub stride: i64, /// The first timestamp (inclusive) to be output for each series, - /// in nanoseconds since the epoch. - pub first_ts: i64, + /// in nanoseconds since the epoch. `None` means gap filling should + /// start from the first timestamp in each series. + pub first_ts: Option<i64>, /// The last timestamp (inclusive!) to be output for each series, /// in nanoseconds since the epoch. pub last_ts: i64, @@ -47,15 +48,11 @@ impl GapFillParams { })?; // Find the smallest timestamp that might appear in the - // range + // range. There might not be one, which is okay. let first_ts = match range.start { - Bound::Included(v) => v, - Bound::Excluded(v) => v + 1, - Bound::Unbounded => { - return Err(DataFusionError::Execution( - "missing lower time bound for gap filling".to_string(), - )) - } + Bound::Included(v) => Some(v), + Bound::Excluded(v) => Some(v + 1), + Bound::Unbounded => None, }; // Find the largest timestamp that might appear in the @@ -73,8 +70,10 @@ impl GapFillParams { // Call date_bin on the timestamps to find the first and last time bins // for each series let mut args = vec![stride, i64_to_columnar_ts(first_ts), origin]; - let first_ts = extract_timestamp_nanos(&date_bin(&args)?)?; - args[1] = i64_to_columnar_ts(last_ts); + let first_ts = first_ts + .map(|_| extract_timestamp_nanos(&date_bin(&args)?)) + .transpose()?; + args[1] = i64_to_columnar_ts(Some(last_ts)); let last_ts = extract_timestamp_nanos(&date_bin(&args)?)?; Ok(Self { @@ -94,8 +93,11 @@ impl GapFillParams { } } -fn i64_to_columnar_ts(i: i64) -> ColumnarValue { - ColumnarValue::Scalar(ScalarValue::TimestampNanosecond(Some(i), None)) +fn i64_to_columnar_ts(i: Option<i64>) -> ColumnarValue { + match i { + Some(i) => ColumnarValue::Scalar(ScalarValue::TimestampNanosecond(Some(i), None)), + None => ColumnarValue::Scalar(ScalarValue::Null), + } } fn extract_timestamp_nanos(cv: &ColumnarValue) -> Result<i64> { @@ -127,12 +129,26 @@ fn extract_interval_nanos(cv: &ColumnarValue) -> Result<i64> { #[cfg(test)] mod tests { - use std::sync::Arc; + use std::{ + ops::{Bound, Range}, + sync::Arc, + }; use arrow::datatypes::{DataType, Field, Schema, TimeUnit}; - use datafusion::{datasource::empty::EmptyTable, error::Result}; + use datafusion::{ + datasource::empty::EmptyTable, + error::Result, + physical_plan::{ + expressions::{Column, Literal}, + PhysicalExpr, + }, + scalar::ScalarValue, + }; - use crate::exec::{gapfill::GapFillExec, Executor, ExecutorType}; + use crate::exec::{ + gapfill::{GapFillExec, GapFillExecParams}, + Executor, ExecutorType, + }; use super::GapFillParams; @@ -148,9 +164,9 @@ mod tests { \ngroup by minute", ).await?; let expected = GapFillParams { - stride: 60_000_000_000, // 1 minute - first_ts: 441_820_500_000_000_000, // Sunday, January 1, 1984 3:55:00 PM - last_ts: 441_820_800_000_000_000, // Sunday, January 1, 1984 3:59:00 PM + stride: 60_000_000_000, // 1 minute + first_ts: Some(441_820_500_000_000_000), // Sunday, January 1, 1984 3:55:00 PM + last_ts: 441_820_800_000_000_000, // Sunday, January 1, 1984 3:59:00 PM }; assert_eq!(expected, actual); Ok(()) @@ -168,8 +184,8 @@ mod tests { \ngroup by minute", ).await?; let expected = GapFillParams { - stride: 60_000_000_000, // 1 minute - first_ts: 441_820_500_000_000_000, // Sunday, January 1, 1984 3:55:00 PM + stride: 60_000_000_000, // 1 minute + first_ts: Some(441_820_500_000_000_000), // Sunday, January 1, 1984 3:55:00 PM // Last bin at 16:00 is excluded last_ts: 441_820_740_000_000_000, // Sunday, January 1, 1984 3:59:00 PM }; @@ -191,8 +207,8 @@ mod tests { let expected = GapFillParams { stride: 60_000_000_000, // 1 minute // First bin not exluded since it truncates to 15:55:00 - first_ts: 441_820_500_000_000_000, // Sunday, January 1, 1984 3:55:00 PM - last_ts: 441_820_800_000_000_000, // Sunday, January 1, 1984 3:59:00 PM + first_ts: Some(441_820_500_000_000_000), // Sunday, January 1, 1984 3:55:00 PM + last_ts: 441_820_800_000_000_000, // Sunday, January 1, 1984 3:59:00 PM }; assert_eq!(expected, actual); Ok(()) @@ -211,21 +227,57 @@ mod tests { \ngroup by minute", ).await?; let expected = GapFillParams { - stride: 60_000_000_000, // 1 minute - first_ts: 441_820_449_000_000_000, // Sunday, January 1, 1984 3:54:09 PM - last_ts: 441_820_749_000_000_000, // Sunday, January 1, 1984 3:59:09 PM + stride: 60_000_000_000, // 1 minute + first_ts: Some(441_820_449_000_000_000), // Sunday, January 1, 1984 3:54:09 PM + last_ts: 441_820_749_000_000_000, // Sunday, January 1, 1984 3:59:09 PM }; assert_eq!(expected, actual); Ok(()) } + fn interval(ns: i64) -> Arc<dyn PhysicalExpr> { + Arc::new(Literal::new(ScalarValue::IntervalDayTime(Some( + ns / 1_000_000, + )))) + } + + fn timestamp(ns: i64) -> Arc<dyn PhysicalExpr> { + Arc::new(Literal::new(ScalarValue::TimestampNanosecond( + Some(ns), + None, + ))) + } + + #[test] + fn test_params_no_start() { + let exec_params = GapFillExecParams { + stride: interval(1_000_000_000), + time_column: Column::new("time", 0), + origin: timestamp(0), + time_range: Range { + start: Bound::Unbounded, + end: Bound::Excluded(timestamp(20_000_000_000)), + }, + }; + + let actual = GapFillParams::try_new(schema().into(), &exec_params).unwrap(); + assert_eq!( + GapFillParams { + stride: 1_000_000_000, + first_ts: None, + last_ts: 19_000_000_000, + }, + actual + ); + } + #[test] #[allow(clippy::reversed_empty_ranges)] fn test_params_row_count() -> Result<()> { test_helpers::maybe_start_logging(); let params = GapFillParams { stride: 10, - first_ts: 1000, + first_ts: Some(1000), last_ts: 1050, };
c1a448e9303e7fb66d8b60495b5138fe123f4ab1
Andrew Lamb
2023-05-26 06:05:14
Add decoded payload type and size to querier <--> ingester tracing (#7870)
* feat: Add decoded payload type and size to querier <--> ingester tracing * feat: add aggregate sizes ---------
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
feat: Add decoded payload type and size to querier <--> ingester tracing (#7870) * feat: Add decoded payload type and size to querier <--> ingester tracing * feat: add aggregate sizes --------- Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/querier/src/ingester/flight_client.rs b/querier/src/ingester/flight_client.rs index fea503b39d..b39b63625b 100644 --- a/querier/src/ingester/flight_client.rs +++ b/querier/src/ingester/flight_client.rs @@ -311,7 +311,10 @@ where let res = self.inner.next_message().await; match &res { - Ok(_) => span_recorder.ok("ok"), + Ok(res) => { + span_recorder.ok("ok"); + self.record_metadata(&mut span_recorder, res.as_ref()) + } Err(e) => span_recorder.error(e.to_string()), } @@ -319,6 +322,35 @@ where } } +impl<T> QueryDataTracer<T> +where + T: QueryData, +{ + /// Record additional metadata on the + fn record_metadata( + &self, + span_recorder: &mut SpanRecorder, + res: Option<&(DecodedPayload, proto::IngesterQueryResponseMetadata)>, + ) { + let Some((payload, _metadata)) = res else { + return; + }; + match payload { + DecodedPayload::None => { + span_recorder.set_metadata("payload_type", "none"); + } + DecodedPayload::Schema(_) => { + span_recorder.set_metadata("payload_type", "schema"); + } + DecodedPayload::RecordBatch(batch) => { + span_recorder.set_metadata("payload_type", "batch"); + span_recorder.set_metadata("num_rows", batch.num_rows() as i64); + span_recorder.set_metadata("mem_bytes", batch.get_array_memory_size() as i64); + } + } + } +} + #[derive(Debug, Clone)] struct CachedConnection { ingester_address: Arc<str>, diff --git a/querier/src/ingester/mod.rs b/querier/src/ingester/mod.rs index cc3e550504..98be3a067d 100644 --- a/querier/src/ingester/mod.rs +++ b/querier/src/ingester/mod.rs @@ -220,6 +220,8 @@ struct IngesterResponseOk { n_partitions: usize, n_chunks: usize, n_rows: usize, + /// Estimated number of bytes this batch requires in memory + memory_bytes: usize, } /// Helper to observe a single ingester request. @@ -257,6 +259,15 @@ impl<'a> ObserveIngesterRequest<'a> { } fn set_ok(mut self, ok_status: IngesterResponseOk) { + self.span_recorder + .set_metadata("n_partitions", ok_status.n_partitions as i64); + self.span_recorder + .set_metadata("num_chunks", ok_status.n_chunks as i64); + self.span_recorder + .set_metadata("num_rows", ok_status.n_rows as i64); + self.span_recorder + .set_metadata("mem_bytes", ok_status.memory_bytes as i64); + self.res = Some(Ok(ok_status)); self.span_recorder.ok("done"); } @@ -695,6 +706,7 @@ impl IngesterConnection for IngesterConnectionImpl { for c in p.chunks() { status.n_chunks += 1; status.n_rows += c.rows(); + status.memory_bytes += c.estimate_size() } }
3612b1c48234c74b33865a62a864b07c014b9ba6
Marco Neumann
2023-08-17 10:18:45
use DF `Expr` instead of `Predicate` for chunk pruning (#8500)
`Predicate` is InfluxRPC specific and contains way more than just filter expression. Ref #8097.
Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
refactor: use DF `Expr` instead of `Predicate` for chunk pruning (#8500) `Predicate` is InfluxRPC specific and contains way more than just filter expression. Ref #8097. Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/ingester/src/buffer_tree/table.rs b/ingester/src/buffer_tree/table.rs index cfd5173521..bfd9f27e4e 100644 --- a/ingester/src/buffer_tree/table.rs +++ b/ingester/src/buffer_tree/table.rs @@ -9,7 +9,7 @@ use data_types::{ partition_template::{build_column_values, ColumnValue, TablePartitionTemplateOverride}, NamespaceId, PartitionKey, SequenceNumber, Table, TableId, }; -use datafusion::scalar::ScalarValue; +use datafusion::{prelude::Expr, scalar::ScalarValue}; use iox_query::{ chunk_statistics::{create_chunk_statistics, ColumnRange}, pruning::prune_summaries, @@ -267,6 +267,9 @@ where ); let table_partition_template = self.catalog_table.get().await.partition_template; + let filters = predicate + .map(|p| p.filter_expr().into_iter().collect::<Vec<_>>()) + .unwrap_or_default(); // Gather the partition data from all of the partitions in this table. let span = SpanRecorder::new(span); @@ -289,19 +292,13 @@ where // Potentially prune out this partition if the partition // template & derived partition key can be used to match - // against the optional predicate. - if predicate - .as_ref() - .map(|p| { - !keep_after_pruning_partition_key( - &table_partition_template, - &partition_key, - p, - &data, - ) - }) - .unwrap_or_default() - { + // against the filters. + if !keep_after_pruning_partition_key( + &table_partition_template, + &partition_key, + &filters, + &data, + ) { // This partition will never contain any data that would // form part of the query response. // @@ -345,7 +342,7 @@ where fn keep_after_pruning_partition_key( table_partition_template: &TablePartitionTemplateOverride, partition_key: &PartitionKey, - predicate: &Predicate, + filters: &[Expr], data: &QueryAdaptor, ) -> bool { // Construct a set of per-column min/max statistics based on the partition @@ -422,7 +419,7 @@ fn keep_after_pruning_partition_key( prune_summaries( data.schema(), &[(chunk_statistics, data.schema().as_arrow())], - predicate, + filters, ) // Errors are logged by `iox_query` and sometimes fine, e.g. for not // implemented DataFusion features or upstream bugs. The querier uses the diff --git a/iox_query/src/pruning.rs b/iox_query/src/pruning.rs index 38ef4a9ac0..4885b2867e 100644 --- a/iox_query/src/pruning.rs +++ b/iox_query/src/pruning.rs @@ -14,7 +14,6 @@ use datafusion::{ }; use datafusion_util::create_pruning_predicate; use observability_deps::tracing::{debug, trace, warn}; -use predicate::Predicate; use query_functions::group_by::Aggregate; use schema::{Schema, TIME_COLUMN_NAME}; use std::sync::Arc; @@ -75,15 +74,15 @@ pub trait PruningObserver { pub fn prune_chunks( table_schema: &Schema, chunks: &[Arc<dyn QueryChunk>], - predicate: &Predicate, + filters: &[Expr], ) -> Result<Vec<bool>, NotPrunedReason> { let num_chunks = chunks.len(); - debug!(num_chunks, %predicate, "Pruning chunks"); + debug!(num_chunks, ?filters, "Pruning chunks"); let summaries: Vec<_> = chunks .iter() .map(|c| (c.stats(), c.schema().as_arrow())) .collect(); - prune_summaries(table_schema, &summaries, predicate) + prune_summaries(table_schema, &summaries, filters) } /// Given a `Vec` of pruning summaries, return a `Vec<bool>` where `false` indicates that the @@ -91,9 +90,9 @@ pub fn prune_chunks( pub fn prune_summaries( table_schema: &Schema, summaries: &[(Arc<Statistics>, SchemaRef)], - predicate: &Predicate, + filters: &[Expr], ) -> Result<Vec<bool>, NotPrunedReason> { - let filter_expr = match predicate.filter_expr() { + let filter_expr = match filters.iter().cloned().reduce(|a, b| a.and(b)) { Some(expr) => expr, None => { debug!("No expression on predicate"); @@ -219,7 +218,6 @@ mod test { use datafusion::prelude::{col, lit}; use datafusion_util::lit_dict; - use predicate::Predicate; use schema::merge::SchemaMerger; use crate::{test::TestChunk, QueryChunk}; @@ -231,8 +229,7 @@ mod test { test_helpers::maybe_start_logging(); let c1 = Arc::new(TestChunk::new("chunk1")); - let predicate = Predicate::new(); - let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &[]); assert_eq!(result, Err(NotPrunedReason::NoExpressionOnPredicate)); } @@ -248,9 +245,9 @@ mod test { Some(10.0), )); - let predicate = Predicate::new().with_expr(col("column1").gt(lit(100.0f64))); + let filters = vec![col("column1").gt(lit(100.0f64))]; - let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &filters); assert_eq!(result.expect("pruning succeeds"), vec![false]); } @@ -266,9 +263,9 @@ mod test { Some(10), )); - let predicate = Predicate::new().with_expr(col("column1").gt(lit(100i64))); + let filters = vec![col("column1").gt(lit(100i64))]; - let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &filters); assert_eq!(result.expect("pruning succeeds"), vec![false]); } @@ -285,9 +282,9 @@ mod test { Some(10), )); - let predicate = Predicate::new().with_expr(col("column1").gt(lit(100u64))); + let filters = vec![col("column1").gt(lit(100u64))]; - let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &filters); assert_eq!(result.expect("pruning succeeds"), vec![false]); } @@ -302,9 +299,9 @@ mod test { Some(false), )); - let predicate = Predicate::new().with_expr(col("column1")); + let filters = vec![col("column1")]; - let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &filters); assert_eq!(result.expect("pruning succeeds"), vec![false; 1]); } @@ -322,9 +319,9 @@ mod test { ), ); - let predicate = Predicate::new().with_expr(col("column1").gt(lit("z"))); + let filters = vec![col("column1").gt(lit("z"))]; - let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &filters); assert_eq!(result.expect("pruning succeeds"), vec![false]); } @@ -339,9 +336,9 @@ mod test { Some(10.0), )); - let predicate = Predicate::new().with_expr(col("column1").lt(lit(100.0f64))); + let filters = vec![col("column1").lt(lit(100.0f64))]; - let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &filters); assert_eq!(result.expect("pruning succeeds"), vec![true]); } @@ -357,9 +354,9 @@ mod test { Some(10), )); - let predicate = Predicate::new().with_expr(col("column1").lt(lit(100i64))); + let filters = vec![col("column1").lt(lit(100i64))]; - let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &filters); assert_eq!(result.expect("pruning succeeds"), vec![true]); } @@ -375,9 +372,9 @@ mod test { Some(10), )); - let predicate = Predicate::new().with_expr(col("column1").lt(lit(100u64))); + let filters = vec![col("column1").lt(lit(100u64))]; - let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &filters); assert_eq!(result.expect("pruning succeeds"), vec![true]); } @@ -393,9 +390,9 @@ mod test { Some(true), )); - let predicate = Predicate::new().with_expr(col("column1")); + let filters = vec![col("column1")]; - let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &filters); assert_eq!(result.expect("pruning succeeds"), vec![true]); } @@ -413,9 +410,9 @@ mod test { ), ); - let predicate = Predicate::new().with_expr(col("column1").lt(lit("z"))); + let filters = vec![col("column1").lt(lit("z"))]; - let result = prune_chunks(&c1.schema().clone(), &[c1], &predicate); + let result = prune_chunks(&c1.schema().clone(), &[c1], &filters); assert_eq!(result.expect("pruning succeeds"), vec![true]); } @@ -455,12 +452,12 @@ mod test { let c4 = Arc::new(TestChunk::new("chunk4").with_i64_field_column("column1")) as Arc<dyn QueryChunk>; - let predicate = Predicate::new().with_expr(col("column1").gt(lit(100i64))); + let filters = vec![col("column1").gt(lit(100i64))]; let chunks = vec![c1, c2, c3, c4]; let schema = merge_schema(&chunks); - let result = prune_chunks(&schema, &chunks, &predicate); + let result = prune_chunks(&schema, &chunks, &filters); assert_eq!( result.expect("pruning succeeds"), @@ -513,12 +510,12 @@ mod test { Some(20), )) as Arc<dyn QueryChunk>; - let predicate = Predicate::new().with_expr(col("column1").gt(lit(100i64))); + let filters = vec![col("column1").gt(lit(100i64))]; let chunks = vec![c1, c2, c3, c4, c5, c6]; let schema = merge_schema(&chunks); - let result = prune_chunks(&schema, &chunks, &predicate); + let result = prune_chunks(&schema, &chunks, &filters); assert_eq!( result.expect("pruning succeeds"), @@ -551,12 +548,12 @@ mod test { Some(4), )) as Arc<dyn QueryChunk>; - let predicate = Predicate::new().with_expr(col("column1").gt(lit(100i64))); + let filters = vec![col("column1").gt(lit(100i64))]; let chunks = vec![c1, c2, c3]; let schema = merge_schema(&chunks); - let result = prune_chunks(&schema, &chunks, &predicate); + let result = prune_chunks(&schema, &chunks, &filters); assert_eq!(result.expect("pruning succeeds"), vec![false, true, true]); } @@ -604,17 +601,15 @@ mod test { ), ) as Arc<dyn QueryChunk>; - let predicate = Predicate::new().with_expr( - col("column1") - .is_null() - .not() - .and(col("column1").eq(lit_dict("bar"))), - ); + let filters = vec![col("column1") + .is_null() + .not() + .and(col("column1").eq(lit_dict("bar")))]; let chunks = vec![c1, c2, c3]; let schema = merge_schema(&chunks); - let result = prune_chunks(&schema, &chunks, &predicate); + let result = prune_chunks(&schema, &chunks, &filters); assert_eq!(result.expect("pruning succeeds"), vec![true, false, false]); } @@ -666,16 +661,14 @@ mod test { .with_i64_field_column_with_stats("column2", Some(0), Some(4)), ) as Arc<dyn QueryChunk>; - let predicate = Predicate::new().with_expr( - col("column1") - .gt(lit(100i64)) - .and(col("column2").lt(lit(5i64))), - ); + let filters = vec![col("column1") + .gt(lit(100i64)) + .and(col("column2").lt(lit(5i64)))]; let chunks = vec![c1, c2, c3, c4, c5, c6]; let schema = merge_schema(&chunks); - let result = prune_chunks(&schema, &chunks, &predicate); + let result = prune_chunks(&schema, &chunks, &filters); assert_eq!( result.expect("Pruning succeeds"), diff --git a/iox_query/src/test.rs b/iox_query/src/test.rs index a1b644dfd9..d6e07b523b 100644 --- a/iox_query/src/test.rs +++ b/iox_query/src/test.rs @@ -37,7 +37,6 @@ use itertools::Itertools; use object_store::{path::Path, ObjectMeta}; use parking_lot::Mutex; use parquet_file::storage::ParquetExecInput; -use predicate::Predicate; use schema::{ builder::SchemaBuilder, merge::SchemaMerger, sort::SortKey, Schema, TIME_COLUMN_NAME, }; @@ -136,7 +135,6 @@ impl QueryNamespace for TestDatabase { // save last predicate *self.chunks_predicate.lock() = filters.to_vec(); - let predicate = Predicate::default().with_exprs(filters.iter().cloned()); let partitions = self.partitions.lock().clone(); Ok(partitions .values() @@ -148,7 +146,7 @@ impl QueryNamespace for TestDatabase { prune_chunks( c.schema(), &[Arc::clone(*c) as Arc<dyn QueryChunk>], - &predicate, + filters, ) .ok() .map(|res| res[0]) diff --git a/iox_query_influxrpc/src/lib.rs b/iox_query_influxrpc/src/lib.rs index 03e4db103f..721d489eca 100644 --- a/iox_query_influxrpc/src/lib.rs +++ b/iox_query_influxrpc/src/lib.rs @@ -1800,7 +1800,8 @@ fn prune_chunks( ) -> Vec<Arc<dyn QueryChunk>> { use iox_query::pruning::prune_chunks; - let Ok(mask) = prune_chunks(table_schema, &chunks, predicate) else { + let filters = predicate.filter_expr().into_iter().collect::<Vec<_>>(); + let Ok(mask) = prune_chunks(table_schema, &chunks, &filters) else { return chunks; }; diff --git a/querier/src/table/query_access/mod.rs b/querier/src/table/query_access/mod.rs index 3998a6154a..f8291fffd0 100644 --- a/querier/src/table/query_access/mod.rs +++ b/querier/src/table/query_access/mod.rs @@ -16,7 +16,6 @@ use iox_query::{ pruning::{prune_chunks, retention_expr, NotPrunedReason, PruningObserver}, QueryChunk, }; -use predicate::Predicate; use schema::Schema; use crate::{ingester::IngesterChunk, parquet::QuerierParquetChunk}; @@ -118,8 +117,7 @@ impl ChunkPruner for QuerierTableChunkPruner { ) -> Result<Vec<Arc<dyn QueryChunk>>, ProviderError> { let observer = &MetricPruningObserver::new(Arc::clone(&self.metrics)); - let predicate = Predicate::default().with_exprs(filters.iter().cloned()); - let chunks = match prune_chunks(table_schema, &chunks, &predicate) { + let chunks = match prune_chunks(table_schema, &chunks, filters) { Ok(keeps) => { assert_eq!(chunks.len(), keeps.len()); chunks
d713ba935a3b644cc4c687d9a620da2dc737d014
Carol (Nichols || Goulding)
2023-05-19 11:25:34
Reduce duplication of encode/decode implementations
This is much less gobbledygook.
null
refactor: Reduce duplication of encode/decode implementations This is much less gobbledygook.
diff --git a/data_types/src/partition_template.rs b/data_types/src/partition_template.rs index b72ea38c32..795b3caf46 100644 --- a/data_types/src/partition_template.rs +++ b/data_types/src/partition_template.rs @@ -23,18 +23,19 @@ pub static PARTITION_BY_DAY_PROTO: Lazy<Arc<proto::PartitionTemplate>> = Lazy::n }); /// A partition template specified by a namespace record. -#[derive(Debug, PartialEq, Clone)] -pub struct NamespacePartitionTemplateOverride(Arc<proto::PartitionTemplate>); +#[derive(Debug, PartialEq, Clone, sqlx::Type)] +#[sqlx(transparent)] +pub struct NamespacePartitionTemplateOverride(SerializationWrapper); impl Default for NamespacePartitionTemplateOverride { fn default() -> Self { - Self(Arc::clone(&PARTITION_BY_DAY_PROTO)) + Self(SerializationWrapper(Arc::clone(&PARTITION_BY_DAY_PROTO))) } } impl From<proto::PartitionTemplate> for NamespacePartitionTemplateOverride { fn from(partition_template: proto::PartitionTemplate) -> Self { - Self(Arc::new(partition_template)) + Self(SerializationWrapper(Arc::new(partition_template))) } } @@ -42,17 +43,18 @@ impl From<proto::PartitionTemplate> for NamespacePartitionTemplateOverride { /// partition template, so the table will get the namespace's partition template. impl From<&NamespacePartitionTemplateOverride> for TablePartitionTemplateOverride { fn from(namespace_template: &NamespacePartitionTemplateOverride) -> Self { - Self(Arc::clone(&namespace_template.0)) + Self(SerializationWrapper(Arc::clone(&namespace_template.0 .0))) } } /// A partition template specified by a table record. -#[derive(Debug, PartialEq, Clone)] -pub struct TablePartitionTemplateOverride(Arc<proto::PartitionTemplate>); +#[derive(Debug, PartialEq, Clone, sqlx::Type)] +#[sqlx(transparent)] +pub struct TablePartitionTemplateOverride(SerializationWrapper); impl Default for TablePartitionTemplateOverride { fn default() -> Self { - Self(Arc::clone(&PARTITION_BY_DAY_PROTO)) + Self(SerializationWrapper(Arc::clone(&PARTITION_BY_DAY_PROTO))) } } @@ -67,6 +69,7 @@ impl TablePartitionTemplateOverride { ) -> Self { custom_table_template .map(Arc::new) + .map(SerializationWrapper) .map(Self) .unwrap_or_else(|| namespace_template.into()) } @@ -75,6 +78,7 @@ impl TablePartitionTemplateOverride { /// build `PartitionKey`s. pub fn parts(&self) -> impl Iterator<Item = TemplatePart<'_>> { self.0 + .0 .parts .iter() .flat_map(|part| part.part.as_ref()) @@ -85,7 +89,14 @@ impl TablePartitionTemplateOverride { } } -impl<DB> sqlx::Type<DB> for NamespacePartitionTemplateOverride +/// This manages the serialization/deserialization of the `proto::PartitionTemplate` type to and +/// from the database through `sqlx` for the `NamespacePartitionTemplateOverride` and +/// `TablePartitionTemplateOverride` types. It's an internal implementation detail to minimize code +/// duplication. +#[derive(Debug, Clone, PartialEq)] +struct SerializationWrapper(Arc<proto::PartitionTemplate>); + +impl<DB> sqlx::Type<DB> for SerializationWrapper where sqlx::types::Json<Self>: sqlx::Type<DB>, DB: sqlx::Database, @@ -95,7 +106,7 @@ where } } -impl<'q, DB> sqlx::Encode<'q, DB> for NamespacePartitionTemplateOverride +impl<'q, DB> sqlx::Encode<'q, DB> for SerializationWrapper where DB: sqlx::Database, for<'b> sqlx::types::Json<&'b proto::PartitionTemplate>: sqlx::Encode<'q, DB>, @@ -104,8 +115,6 @@ where &self, buf: &mut <DB as sqlx::database::HasArguments<'q>>::ArgumentBuffer, ) -> sqlx::encode::IsNull { - // Unambiguous delegation to the Encode impl on the Json type, which - // exists due to the constraint in the where clause above. <sqlx::types::Json<&proto::PartitionTemplate> as sqlx::Encode<'_, DB>>::encode_by_ref( &sqlx::types::Json(&self.0), buf, @@ -113,7 +122,7 @@ where } } -impl<'q, DB> sqlx::Decode<'q, DB> for NamespacePartitionTemplateOverride +impl<'q, DB> sqlx::Decode<'q, DB> for SerializationWrapper where DB: sqlx::Database, sqlx::types::Json<proto::PartitionTemplate>: sqlx::Decode<'q, DB>, @@ -148,49 +157,7 @@ pub fn test_table_partition_override( .collect(); let proto = Arc::new(proto::PartitionTemplate { parts }); - TablePartitionTemplateOverride(proto) -} - -impl<DB> sqlx::Type<DB> for TablePartitionTemplateOverride -where - sqlx::types::Json<Self>: sqlx::Type<DB>, - DB: sqlx::Database, -{ - fn type_info() -> DB::TypeInfo { - <sqlx::types::Json<Self> as sqlx::Type<DB>>::type_info() - } -} - -impl<'q, DB> sqlx::Encode<'q, DB> for TablePartitionTemplateOverride -where - DB: sqlx::Database, - for<'b> sqlx::types::Json<&'b proto::PartitionTemplate>: sqlx::Encode<'q, DB>, -{ - fn encode_by_ref( - &self, - buf: &mut <DB as sqlx::database::HasArguments<'q>>::ArgumentBuffer, - ) -> sqlx::encode::IsNull { - <sqlx::types::Json<&proto::PartitionTemplate> as sqlx::Encode<'_, DB>>::encode_by_ref( - &sqlx::types::Json(&self.0), - buf, - ) - } -} - -impl<'q, DB> sqlx::Decode<'q, DB> for TablePartitionTemplateOverride -where - DB: sqlx::Database, - sqlx::types::Json<proto::PartitionTemplate>: sqlx::Decode<'q, DB>, -{ - fn decode( - value: <DB as sqlx::database::HasValueRef<'q>>::ValueRef, - ) -> Result<Self, Box<dyn std::error::Error + 'static + Send + Sync>> { - Ok(Self( - <sqlx::types::Json<proto::PartitionTemplate> as sqlx::Decode<'_, DB>>::decode(value)? - .0 - .into(), - )) - } + TablePartitionTemplateOverride(SerializationWrapper(proto)) } #[cfg(test)] @@ -229,7 +196,7 @@ mod tests { &namespace_template, ); - assert_eq!(table_template.0.as_ref(), &custom_table_template); + assert_eq!(table_template.0 .0.as_ref(), &custom_table_template); } // The JSON representation of the partition template protobuf is stored in the database, so
5f7a6e696fcabd3395024cdd92b0a76a85653876
Marco Neumann
2022-11-10 08:13:22
chunk InfluxRPC `ReadResponses` (#6094)
Currently we see some prod panics: ``` 'assertion failed: len <= std::u32::MAX as usize', tonic/src/codec/encode.rs:127:5 ``` This is due to an upstream bug in tonic: https://github.com/hyperium/tonic/issues/1141 However the fix will only turn this into an error instead of panicking. We should instead NOT return such overlarge results, esp. because InfluxRPC supports streaming. While we currently don't perform streaming conversion (like streaming the data out of the query stack into the gRPC layer), the 4GB size limit can easily be triggered (in prod) w/ enough RAM. So let's re-chunk our in-memory responses so that they stream nicely to the client. We may later implement proper streaming conversion, see #4445 and #503.
null
refactor: chunk InfluxRPC `ReadResponses` (#6094) Currently we see some prod panics: ``` 'assertion failed: len <= std::u32::MAX as usize', tonic/src/codec/encode.rs:127:5 ``` This is due to an upstream bug in tonic: https://github.com/hyperium/tonic/issues/1141 However the fix will only turn this into an error instead of panicking. We should instead NOT return such overlarge results, esp. because InfluxRPC supports streaming. While we currently don't perform streaming conversion (like streaming the data out of the query stack into the gRPC layer), the 4GB size limit can easily be triggered (in prod) w/ enough RAM. So let's re-chunk our in-memory responses so that they stream nicely to the client. We may later implement proper streaming conversion, see #4445 and #503.
diff --git a/service_grpc_influxrpc/src/service.rs b/service_grpc_influxrpc/src/service.rs index d48219a201..ff68000e02 100644 --- a/service_grpc_influxrpc/src/service.rs +++ b/service_grpc_influxrpc/src/service.rs @@ -16,8 +16,8 @@ use datafusion::error::DataFusionError; use futures::Stream; use generated_types::{ google::protobuf::Empty, literal_or_regex::Value as RegexOrLiteralValue, - offsets_response::PartitionOffsetResponse, storage_server::Storage, tag_key_predicate, - CapabilitiesResponse, Capability, Int64ValuesResponse, LiteralOrRegex, + offsets_response::PartitionOffsetResponse, read_response::Frame, storage_server::Storage, + tag_key_predicate, CapabilitiesResponse, Capability, Int64ValuesResponse, LiteralOrRegex, MeasurementFieldsRequest, MeasurementFieldsResponse, MeasurementNamesRequest, MeasurementTagKeysRequest, MeasurementTagValuesRequest, OffsetsResponse, Predicate, ReadFilterRequest, ReadGroupRequest, ReadResponse, ReadSeriesCardinalityRequest, @@ -32,7 +32,7 @@ use iox_query::{ }, QueryDatabase, QueryText, }; -use observability_deps::tracing::{error, info, trace}; +use observability_deps::tracing::{error, info, trace, warn}; use pin_project::pin_project; use service_common::{datafusion_error_to_tonic_code, planner::Planner, QueryDatabaseProvider}; use snafu::{OptionExt, ResultExt, Snafu}; @@ -47,6 +47,12 @@ use trace::{ctx::SpanContext, span::SpanExt}; use trace_http::ctx::{RequestLogContext, RequestLogContextExt}; use tracker::InstrumentedAsyncOwnedSemaphorePermit; +/// The size to which we limit our [`ReadResponse`] payloads. +/// +/// We will regroup the returned frames (preserving order) to only produce [`ReadResponse`] objects of approximately +/// this size (there's a bit of additional encoding overhead on top of that, but that should be OK). +const MAX_READ_RESPONSE_SIZE: usize = 4194304 - 100_000; // 4MB - <wiggle room> + #[derive(Debug, Snafu)] pub enum Error { #[snafu(display("Database not found: {}", db_name))] @@ -270,7 +276,8 @@ where let mut query_completed_token = db.record_query(&ctx, "read_filter", defer_json(&req)); let results = read_filter_impl(Arc::clone(&db), db_name, req, &ctx) - .await? + .await + .map(|responses| chunk_read_responses(responses, MAX_READ_RESPONSE_SIZE))? .into_iter() .map(Ok) .collect::<Vec<_>>(); @@ -350,6 +357,7 @@ where &ctx, ) .await + .map(|responses| chunk_read_responses(responses, MAX_READ_RESPONSE_SIZE)) .map_err(|e| e.into_status())? .into_iter() .map(Ok) @@ -429,6 +437,7 @@ where &ctx, ) .await + .map(|responses| chunk_read_responses(responses, MAX_READ_RESPONSE_SIZE)) .map_err(|e| e.into_status())? .into_iter() .map(Ok) @@ -1586,6 +1595,58 @@ where } } +/// Chunk given [`ReadResponse`]s -- while preserving the [`Frame`] order -- into responses that shall at max have the +/// given size. +/// +/// # Panic +/// Panics if `size_limit` is 0. +fn chunk_read_responses(responses: Vec<ReadResponse>, size_limit: usize) -> Vec<ReadResponse> { + assert!(size_limit > 0, "zero size limit"); + + let mut out = Vec::with_capacity(1); + let it = responses + .into_iter() + .flat_map(|response| response.frames.into_iter()); + + let mut frames = vec![]; + let mut size = 0; + for frame in it { + let fsize = frame_size(&frame); + + // flush? + if size + fsize > size_limit { + size = 0; + out.push(ReadResponse { + frames: std::mem::take(&mut frames), + }); + } + + if fsize > size_limit { + warn!( + frame_size = fsize, + size_limit, "Oversized frame in read response", + ); + } + frames.push(frame); + size += fsize; + } + + // final flush + if !frames.is_empty() { + out.push(ReadResponse { frames }); + } + + out +} + +fn frame_size(frame: &Frame) -> usize { + frame + .data + .as_ref() + .map(|data| data.encoded_len()) + .unwrap_or_default() +} + #[cfg(test)] mod tests { use super::*; @@ -3474,6 +3535,117 @@ mod tests { assert_eq!(response.metadata().get("storage-type").unwrap(), "iox"); } + #[test] + #[should_panic(expected = "zero size limit")] + fn test_chunk_read_responses_panics() { + chunk_read_responses(vec![], 0); + } + + #[test] + fn test_chunk_read_responses_ok() { + use generated_types::influxdata::platform::storage::read_response::{ + frame::Data, BooleanPointsFrame, + }; + + let frame1 = Frame { + data: Some(Data::BooleanPoints(BooleanPointsFrame { + timestamps: vec![1, 2, 3], + values: vec![false, true, false], + })), + }; + let frame2 = Frame { + data: Some(Data::BooleanPoints(BooleanPointsFrame { + timestamps: vec![4], + values: vec![true], + })), + }; + let fsize1 = frame_size(&frame1); + let fsize2 = frame_size(&frame2); + + // no respones + assert_eq!(chunk_read_responses(vec![], 1), vec![],); + + // no frames + assert_eq!( + chunk_read_responses(vec![ReadResponse { frames: vec![] }], 1), + vec![], + ); + + // split + assert_eq!( + chunk_read_responses( + vec![ReadResponse { + frames: vec![ + frame1.clone(), + frame1.clone(), + frame2.clone(), + frame2.clone(), + frame1.clone(), + ], + }], + fsize1 + fsize1 + fsize2, + ), + vec![ + ReadResponse { + frames: vec![frame1.clone(), frame1.clone(), frame2.clone()], + }, + ReadResponse { + frames: vec![frame2.clone(), frame1.clone()], + }, + ], + ); + + // join + assert_eq!( + chunk_read_responses( + vec![ + ReadResponse { + frames: vec![frame1.clone(), frame2.clone(),], + }, + ReadResponse { + frames: vec![frame2.clone(),], + }, + ], + fsize1 + fsize2 + fsize2, + ), + vec![ReadResponse { + frames: vec![frame1.clone(), frame2.clone(), frame2.clone()], + },], + ); + + // re-arrange + assert_eq!( + chunk_read_responses( + vec![ + ReadResponse { + frames: vec![ + frame1.clone(), + frame1.clone(), + frame2.clone(), + frame2.clone(), + frame1.clone(), + ], + }, + ReadResponse { + frames: vec![frame1.clone(), frame2.clone(),], + }, + ], + fsize1 + fsize1 + fsize2, + ), + vec![ + ReadResponse { + frames: vec![frame1.clone(), frame1.clone(), frame2.clone()], + }, + ReadResponse { + frames: vec![frame2.clone(), frame1.clone(), frame1], + }, + ReadResponse { + frames: vec![frame2], + }, + ], + ); + } + fn make_timestamp_range(start: i64, end: i64) -> TimestampRange { TimestampRange { start, end } }
aa8a8c560d54653012812b5dac248caa907d2b6f
Michael Gattozzi
2025-01-12 13:08:01
Set 72 hour query/write limit for Core (#25810)
This commit sets InfluxDB 3 Core to have a 72 hour limit for queries and writes. What this means is that writes that contain historical data older than 72 hours will be rejected and queries will filter out data older than 72 hours. Core is intended to be a recent timeseries database and performance over data older than 72 hours will degrade without a garbage collector, a core feature of InfluxDB 3 Enterprise. InfluxDB 3 Enterprise does not have this write or query limit in place. Note that this does *not* mean older data is deleted. Older data is still accessible in object storage as Parquet files that can still be used in other services and analyzed with dataframe libraries like pandas and polars. This commit does a few things: - Uses timestamps in the year 2065 for tests as these should not break for longer than many of us will be working in our lifetimes. This is only needed for the integration tests as other tests use the MockProvider for time. - Filters the buffer and persisted files to only show data newer than 3 days ago - Fixes the integration tests to work with the fact that writes older than 3 days are rejected
null
feat: Set 72 hour query/write limit for Core (#25810) This commit sets InfluxDB 3 Core to have a 72 hour limit for queries and writes. What this means is that writes that contain historical data older than 72 hours will be rejected and queries will filter out data older than 72 hours. Core is intended to be a recent timeseries database and performance over data older than 72 hours will degrade without a garbage collector, a core feature of InfluxDB 3 Enterprise. InfluxDB 3 Enterprise does not have this write or query limit in place. Note that this does *not* mean older data is deleted. Older data is still accessible in object storage as Parquet files that can still be used in other services and analyzed with dataframe libraries like pandas and polars. This commit does a few things: - Uses timestamps in the year 2065 for tests as these should not break for longer than many of us will be working in our lifetimes. This is only needed for the integration tests as other tests use the MockProvider for time. - Filters the buffer and persisted files to only show data newer than 3 days ago - Fixes the integration tests to work with the fact that writes older than 3 days are rejected
diff --git a/influxdb3/tests/server/auth.rs b/influxdb3/tests/server/auth.rs index 2db0cc756d..5fb3fb538e 100644 --- a/influxdb3/tests/server/auth.rs +++ b/influxdb3/tests/server/auth.rs @@ -26,7 +26,7 @@ async fn auth() { client .post(&write_lp_url) .query(&write_lp_params) - .body("cpu,host=a val=1i 123") + .body("cpu,host=a val=1i 2998574937") .send() .await .unwrap() @@ -47,7 +47,7 @@ async fn auth() { client .post(&write_lp_url) .query(&write_lp_params) - .body("cpu,host=a val=1i 123") + .body("cpu,host=a val=1i 2998574937") .bearer_auth(TOKEN) .send() .await @@ -59,7 +59,7 @@ async fn auth() { client .post(&write_lp_url) .query(&write_lp_params) - .body("cpu,host=a val=1i 123") + .body("cpu,host=a val=1i 2998574937") // support both Bearer and Token auth schemes .header("Authorization", format!("Token {TOKEN}")) .send() @@ -141,10 +141,10 @@ async fn auth_grpc() { server .write_lp_to_db( "foo", - "cpu,host=s1,region=us-east usage=0.9 1\n\ - cpu,host=s1,region=us-east usage=0.89 2\n\ - cpu,host=s1,region=us-east usage=0.85 3", - Precision::Nanosecond, + "cpu,host=s1,region=us-east usage=0.9 2998574937\n\ + cpu,host=s1,region=us-east usage=0.89 2998574938\n\ + cpu,host=s1,region=us-east usage=0.85 2998574939", + Precision::Second, ) .await .unwrap(); @@ -167,13 +167,13 @@ async fn auth_grpc() { let batches = collect_stream(response).await; assert_batches_sorted_eq!( [ - "+------+---------+--------------------------------+-------+", - "| host | region | time | usage |", - "+------+---------+--------------------------------+-------+", - "| s1 | us-east | 1970-01-01T00:00:00.000000001Z | 0.9 |", - "| s1 | us-east | 1970-01-01T00:00:00.000000002Z | 0.89 |", - "| s1 | us-east | 1970-01-01T00:00:00.000000003Z | 0.85 |", - "+------+---------+--------------------------------+-------+", + "+------+---------+----------------------+-------+", + "| host | region | time | usage |", + "+------+---------+----------------------+-------+", + "| s1 | us-east | 2065-01-07T17:28:57Z | 0.9 |", + "| s1 | us-east | 2065-01-07T17:28:58Z | 0.89 |", + "| s1 | us-east | 2065-01-07T17:28:59Z | 0.85 |", + "+------+---------+----------------------+-------+", ], &batches ); diff --git a/influxdb3/tests/server/cli.rs b/influxdb3/tests/server/cli.rs index 1881074be4..ddbecbc04a 100644 --- a/influxdb3/tests/server/cli.rs +++ b/influxdb3/tests/server/cli.rs @@ -276,7 +276,7 @@ async fn test_create_table() { server .write_lp_to_db( db_name, - format!("{table_name},one=1,two=2,three=3 four=\"4\",five=5u,six=6,seven=7i,eight=true 1000"), + format!("{table_name},one=1,two=2,three=3 four=\"4\",five=5u,six=6,seven=7i,eight=true 2998574937"), influxdb3_client::Precision::Second, ) .await @@ -303,7 +303,7 @@ async fn test_create_table() { "six": 6.0, "seven": 7, "eight": true, - "time": "1970-01-01T00:16:40" + "time": "2065-01-07T17:28:57" }]) ); } diff --git a/influxdb3/tests/server/configure.rs b/influxdb3/tests/server/configure.rs index 52f5d002a6..02b5f2348c 100644 --- a/influxdb3/tests/server/configure.rs +++ b/influxdb3/tests/server/configure.rs @@ -1055,7 +1055,7 @@ async fn api_v3_configure_table_create_then_write() { server .write_lp_to_db( "foo", - "bar,tag1=1,tag2=2 field1=1u,field2=2i,field3=3,field4=\"4\",field5=true 1000", + "bar,tag1=1,tag2=2 field1=1u,field2=2i,field3=3,field4=\"4\",field5=true 2998574938", influxdb3_client::Precision::Second, ) .await @@ -1080,7 +1080,7 @@ async fn api_v3_configure_table_create_then_write() { "field3": 3.0, "field4": "4", "field5": true, - "time": "1970-01-01T00:16:40" + "time": "2065-01-07T17:28:58" }]) ); } @@ -1129,7 +1129,7 @@ async fn api_v3_configure_table_create_no_fields() { server .write_lp_to_db( "foo", - "bar,one=1,two=2 new_field=0 1000", + "bar,one=1,two=2 new_field=0 2998574938", influxdb3_client::Precision::Second, ) .await @@ -1150,7 +1150,7 @@ async fn api_v3_configure_table_create_no_fields() { "one": "1", "two": "2", "new_field": 0.0, - "time": "1970-01-01T00:16:40" + "time": "2065-01-07T17:28:58" }]) ); } diff --git a/influxdb3/tests/server/flight.rs b/influxdb3/tests/server/flight.rs index 4e508b3740..5199d5a0e6 100644 --- a/influxdb3/tests/server/flight.rs +++ b/influxdb3/tests/server/flight.rs @@ -14,10 +14,10 @@ async fn flight() -> Result<(), influxdb3_client::Error> { server .write_lp_to_db( "foo", - "cpu,host=s1,region=us-east usage=0.9 1\n\ - cpu,host=s1,region=us-east usage=0.89 2\n\ - cpu,host=s1,region=us-east usage=0.85 3", - Precision::Nanosecond, + "cpu,host=s1,region=us-east usage=0.9 2998574936\n\ + cpu,host=s1,region=us-east usage=0.89 2998574937\n\ + cpu,host=s1,region=us-east usage=0.85 2998574938", + Precision::Second, ) .await?; @@ -33,13 +33,13 @@ async fn flight() -> Result<(), influxdb3_client::Error> { let batches = collect_stream(response).await; assert_batches_sorted_eq!( [ - "+------+---------+--------------------------------+-------+", - "| host | region | time | usage |", - "+------+---------+--------------------------------+-------+", - "| s1 | us-east | 1970-01-01T00:00:00.000000001Z | 0.9 |", - "| s1 | us-east | 1970-01-01T00:00:00.000000002Z | 0.89 |", - "| s1 | us-east | 1970-01-01T00:00:00.000000003Z | 0.85 |", - "+------+---------+--------------------------------+-------+", + "+------+---------+----------------------+-------+", + "| host | region | time | usage |", + "+------+---------+----------------------+-------+", + "| s1 | us-east | 2065-01-07T17:28:56Z | 0.9 |", + "| s1 | us-east | 2065-01-07T17:28:57Z | 0.89 |", + "| s1 | us-east | 2065-01-07T17:28:58Z | 0.85 |", + "+------+---------+----------------------+-------+", ], &batches ); @@ -68,13 +68,13 @@ async fn flight() -> Result<(), influxdb3_client::Error> { let batches = collect_stream(stream).await; assert_batches_sorted_eq!( [ - "+------+---------+--------------------------------+-------+", - "| host | region | time | usage |", - "+------+---------+--------------------------------+-------+", - "| s1 | us-east | 1970-01-01T00:00:00.000000001Z | 0.9 |", - "| s1 | us-east | 1970-01-01T00:00:00.000000002Z | 0.89 |", - "| s1 | us-east | 1970-01-01T00:00:00.000000003Z | 0.85 |", - "+------+---------+--------------------------------+-------+", + "+------+---------+----------------------+-------+", + "| host | region | time | usage |", + "+------+---------+----------------------+-------+", + "| s1 | us-east | 2065-01-07T17:28:56Z | 0.9 |", + "| s1 | us-east | 2065-01-07T17:28:57Z | 0.89 |", + "| s1 | us-east | 2065-01-07T17:28:58Z | 0.85 |", + "+------+---------+----------------------+-------+", ], &batches ); @@ -155,10 +155,10 @@ async fn flight_influxql() { server .write_lp_to_db( "foo", - "cpu,host=s1,region=us-east usage=0.9 1\n\ - cpu,host=s1,region=us-east usage=0.89 2\n\ - cpu,host=s1,region=us-east usage=0.85 3", - Precision::Nanosecond, + "cpu,host=s1,region=us-east usage=0.9 2998574936\n\ + cpu,host=s1,region=us-east usage=0.89 2998574937\n\ + cpu,host=s1,region=us-east usage=0.85 2998574938", + Precision::Second, ) .await .unwrap(); diff --git a/influxdb3/tests/server/limits.rs b/influxdb3/tests/server/limits.rs index 2aca62f077..8872707a69 100644 --- a/influxdb3/tests/server/limits.rs +++ b/influxdb3/tests/server/limits.rs @@ -12,7 +12,7 @@ async fn limits() -> Result<(), Error> { server .write_lp_to_db( db, - "cpu,host=s1,region=us-east usage=0.9 1\n", + "cpu,host=s1,region=us-east usage=0.9 2998574938\n", Precision::Nanosecond, ) .await?; @@ -21,8 +21,8 @@ async fn limits() -> Result<(), Error> { let Err(Error::ApiError { code, .. }) = server .write_lp_to_db( "six", - "cpu,host=s1,region=us-east usage=0.9 1\n", - Precision::Nanosecond, + "cpu,host=s1,region=us-east usage=0.9 2998574938\n", + Precision::Second, ) .await else { @@ -35,19 +35,19 @@ async fn limits() -> Result<(), Error> { let table_lp = (0..1995).fold(String::new(), |mut acc, i| { acc.push_str("cpu"); acc.push_str(&i.to_string()); - acc.push_str(",host=s1,region=us-east usage=0.9 1\n"); + acc.push_str(",host=s1,region=us-east usage=0.9 2998574938\n"); acc }); server - .write_lp_to_db("one", &table_lp, Precision::Nanosecond) + .write_lp_to_db("one", &table_lp, Precision::Second) .await?; let Err(Error::ApiError { code, .. }) = server .write_lp_to_db( "six", - "cpu2000,host=s1,region=us-east usage=0.9 1\n", - Precision::Nanosecond, + "cpu2000,host=s1,region=us-east usage=0.9 2998574938\n", + Precision::Second, ) .await else { @@ -63,15 +63,15 @@ async fn limits() -> Result<(), Error> { lp_500.push_str(&column); lp_501.push_str(&column); } - lp_500.push_str(" 0\n"); - lp_501.push_str(",column501=1 0\n"); + lp_500.push_str(" 2998574938\n"); + lp_501.push_str(",column501=1 2998574938\n"); server - .write_lp_to_db("one", &lp_500, Precision::Nanosecond) + .write_lp_to_db("one", &lp_500, Precision::Second) .await?; let Err(Error::ApiError { code, .. }) = server - .write_lp_to_db("one", &lp_501, Precision::Nanosecond) + .write_lp_to_db("one", &lp_501, Precision::Second) .await else { panic!("Did not error when adding 501st column"); diff --git a/influxdb3/tests/server/query.rs b/influxdb3/tests/server/query.rs index 56cc34b90c..8d376f13f5 100644 --- a/influxdb3/tests/server/query.rs +++ b/influxdb3/tests/server/query.rs @@ -14,10 +14,10 @@ async fn api_v3_query_sql() { server .write_lp_to_db( "foo", - "cpu,host=s1,region=us-east usage=0.9 1\n\ - cpu,host=s1,region=us-east usage=0.89 2\n\ - cpu,host=s1,region=us-east usage=0.85 3", - Precision::Nanosecond, + "cpu,host=s1,region=us-east usage=0.9 2998574936\n\ + cpu,host=s1,region=us-east usage=0.89 2998574937\n\ + cpu,host=s1,region=us-east usage=0.85 2998574938", + Precision::Second, ) .await .unwrap(); @@ -26,13 +26,13 @@ async fn api_v3_query_sql() { TestCase { database: Some("foo"), query: "SELECT host, region, time, usage FROM cpu", - expected: "+------+---------+-------------------------------+-------+\n\ - | host | region | time | usage |\n\ - +------+---------+-------------------------------+-------+\n\ - | s1 | us-east | 1970-01-01T00:00:00.000000001 | 0.9 |\n\ - | s1 | us-east | 1970-01-01T00:00:00.000000002 | 0.89 |\n\ - | s1 | us-east | 1970-01-01T00:00:00.000000003 | 0.85 |\n\ - +------+---------+-------------------------------+-------+", + expected: "+------+---------+---------------------+-------+\n\ + | host | region | time | usage |\n\ + +------+---------+---------------------+-------+\n\ + | s1 | us-east | 2065-01-07T17:28:56 | 0.9 |\n\ + | s1 | us-east | 2065-01-07T17:28:57 | 0.89 |\n\ + | s1 | us-east | 2065-01-07T17:28:58 | 0.85 |\n\ + +------+---------+---------------------+-------+", }, TestCase { database: Some("foo"), @@ -76,14 +76,14 @@ async fn api_v3_query_sql_params() { server .write_lp_to_db( "foo", - "cpu,host=a,region=us-east usage=0.9 1 - cpu,host=b,region=us-east usage=0.50 1 - cpu,host=a,region=us-east usage=0.80 2 - cpu,host=b,region=us-east usage=0.60 2 - cpu,host=a,region=us-east usage=0.70 3 - cpu,host=b,region=us-east usage=0.70 3 - cpu,host=a,region=us-east usage=0.50 4 - cpu,host=b,region=us-east usage=0.80 4", + "cpu,host=a,region=us-east usage=0.9 2998574936 + cpu,host=b,region=us-east usage=0.50 2998574936 + cpu,host=a,region=us-east usage=0.80 2998574937 + cpu,host=b,region=us-east usage=0.60 2998574937 + cpu,host=a,region=us-east usage=0.70 2998574938 + cpu,host=b,region=us-east usage=0.70 2998574938 + cpu,host=a,region=us-east usage=0.50 2998574939 + cpu,host=b,region=us-east usage=0.80 2998574939", Precision::Second, ) .await @@ -116,8 +116,8 @@ async fn api_v3_query_sql_params() { "+------+---------+---------------------+-------+\n\ | host | region | time | usage |\n\ +------+---------+---------------------+-------+\n\ - | b | us-east | 1970-01-01T00:00:03 | 0.7 |\n\ - | b | us-east | 1970-01-01T00:00:04 | 0.8 |\n\ + | b | us-east | 2065-01-07T17:28:58 | 0.7 |\n\ + | b | us-east | 2065-01-07T17:28:59 | 0.8 |\n\ +------+---------+---------------------+-------+", resp ); @@ -152,8 +152,8 @@ async fn api_v3_query_sql_params() { "+------+---------+---------------------+-------+\n\ | host | region | time | usage |\n\ +------+---------+---------------------+-------+\n\ - | b | us-east | 1970-01-01T00:00:03 | 0.7 |\n\ - | b | us-east | 1970-01-01T00:00:04 | 0.8 |\n\ + | b | us-east | 2065-01-07T17:28:58 | 0.7 |\n\ + | b | us-east | 2065-01-07T17:28:59 | 0.8 |\n\ +------+---------+---------------------+-------+", resp ); @@ -195,13 +195,13 @@ async fn api_v3_query_influxql() { server .write_lp_to_db( "foo", - "cpu,host=s1,region=us-east usage=0.9 1\n\ - cpu,host=s1,region=us-east usage=0.89 2\n\ - cpu,host=s1,region=us-east usage=0.85 3\n\ - mem,host=s1,region=us-east usage=0.5 4\n\ - mem,host=s1,region=us-east usage=0.6 5\n\ - mem,host=s1,region=us-east usage=0.7 6", - Precision::Nanosecond, + "cpu,host=s1,region=us-east usage=0.9 2998574930\n\ + cpu,host=s1,region=us-east usage=0.89 2998574931\n\ + cpu,host=s1,region=us-east usage=0.85 2998574932\n + mem,host=s1,region=us-east usage=0.5 2998574933\n\ + mem,host=s1,region=us-east usage=0.6 2998574934\n\ + mem,host=s1,region=us-east usage=0.7 2998574935", + Precision::Second, ) .await .unwrap(); @@ -210,13 +210,13 @@ async fn api_v3_query_influxql() { server .write_lp_to_db( "bar", - "cpu,host=s1,region=us-east usage=0.9 1\n\ - cpu,host=s1,region=us-east usage=0.89 2\n\ - cpu,host=s1,region=us-east usage=0.85 3\n\ - mem,host=s1,region=us-east usage=0.5 4\n\ - mem,host=s1,region=us-east usage=0.6 5\n\ - mem,host=s1,region=us-east usage=0.7 6", - Precision::Nanosecond, + "cpu,host=s1,region=us-east usage=0.9 2998574930\n\ + cpu,host=s1,region=us-east usage=0.89 2998574931\n\ + cpu,host=s1,region=us-east usage=0.85 2998574932\n\ + mem,host=s1,region=us-east usage=0.5 2998574933\n\ + mem,host=s1,region=us-east usage=0.6 2998574934\n\ + mem,host=s1,region=us-east usage=0.7 2998574935", + Precision::Second, ) .await .unwrap(); @@ -225,41 +225,38 @@ async fn api_v3_query_influxql() { TestCase { database: Some("foo"), query: "SELECT time, host, region, usage FROM cpu", - expected: - "+------------------+-------------------------------+------+---------+-------+\n\ - | iox::measurement | time | host | region | usage |\n\ - +------------------+-------------------------------+------+---------+-------+\n\ - | cpu | 1970-01-01T00:00:00.000000001 | s1 | us-east | 0.9 |\n\ - | cpu | 1970-01-01T00:00:00.000000002 | s1 | us-east | 0.89 |\n\ - | cpu | 1970-01-01T00:00:00.000000003 | s1 | us-east | 0.85 |\n\ - +------------------+-------------------------------+------+---------+-------+", + expected: "+------------------+---------------------+------+---------+-------+\n\ + | iox::measurement | time | host | region | usage |\n\ + +------------------+---------------------+------+---------+-------+\n\ + | cpu | 2065-01-07T17:28:50 | s1 | us-east | 0.9 |\n\ + | cpu | 2065-01-07T17:28:51 | s1 | us-east | 0.89 |\n\ + | cpu | 2065-01-07T17:28:52 | s1 | us-east | 0.85 |\n\ + +------------------+---------------------+------+---------+-------+", }, TestCase { database: None, query: "SELECT time, host, region, usage FROM foo.autogen.cpu", - expected: - "+------------------+-------------------------------+------+---------+-------+\n\ - | iox::measurement | time | host | region | usage |\n\ - +------------------+-------------------------------+------+---------+-------+\n\ - | cpu | 1970-01-01T00:00:00.000000001 | s1 | us-east | 0.9 |\n\ - | cpu | 1970-01-01T00:00:00.000000002 | s1 | us-east | 0.89 |\n\ - | cpu | 1970-01-01T00:00:00.000000003 | s1 | us-east | 0.85 |\n\ - +------------------+-------------------------------+------+---------+-------+", + expected: "+------------------+---------------------+------+---------+-------+\n\ + | iox::measurement | time | host | region | usage |\n\ + +------------------+---------------------+------+---------+-------+\n\ + | cpu | 2065-01-07T17:28:50 | s1 | us-east | 0.9 |\n\ + | cpu | 2065-01-07T17:28:51 | s1 | us-east | 0.89 |\n\ + | cpu | 2065-01-07T17:28:52 | s1 | us-east | 0.85 |\n\ + +------------------+---------------------+------+---------+-------+", }, TestCase { database: Some("foo"), query: "SELECT host, region, usage FROM cpu, mem", - expected: - "+------------------+-------------------------------+------+---------+-------+\n\ - | iox::measurement | time | host | region | usage |\n\ - +------------------+-------------------------------+------+---------+-------+\n\ - | cpu | 1970-01-01T00:00:00.000000001 | s1 | us-east | 0.9 |\n\ - | cpu | 1970-01-01T00:00:00.000000002 | s1 | us-east | 0.89 |\n\ - | cpu | 1970-01-01T00:00:00.000000003 | s1 | us-east | 0.85 |\n\ - | mem | 1970-01-01T00:00:00.000000004 | s1 | us-east | 0.5 |\n\ - | mem | 1970-01-01T00:00:00.000000005 | s1 | us-east | 0.6 |\n\ - | mem | 1970-01-01T00:00:00.000000006 | s1 | us-east | 0.7 |\n\ - +------------------+-------------------------------+------+---------+-------+", + expected: "+------------------+---------------------+------+---------+-------+\n\ + | iox::measurement | time | host | region | usage |\n\ + +------------------+---------------------+------+---------+-------+\n\ + | cpu | 2065-01-07T17:28:50 | s1 | us-east | 0.9 |\n\ + | cpu | 2065-01-07T17:28:51 | s1 | us-east | 0.89 |\n\ + | cpu | 2065-01-07T17:28:52 | s1 | us-east | 0.85 |\n\ + | mem | 2065-01-07T17:28:53 | s1 | us-east | 0.5 |\n\ + | mem | 2065-01-07T17:28:54 | s1 | us-east | 0.6 |\n\ + | mem | 2065-01-07T17:28:55 | s1 | us-east | 0.7 |\n\ + +------------------+---------------------+------+---------+-------+", }, TestCase { database: Some("foo"), @@ -327,7 +324,8 @@ async fn api_v3_query_influxql() { }, TestCase { database: Some("foo"), - query: "SHOW TAG VALUES WITH KEY = \"host\" WHERE time < 1970-01-02", + // TODO: WHERE time < 2065-01-08 does not work for some reason + query: "SHOW TAG VALUES WITH KEY = \"host\" WHERE time > 2065-01-07", expected: "+------------------+------+-------+\n\ | iox::measurement | key | value |\n\ +------------------+------+-------+\n\ @@ -337,7 +335,8 @@ async fn api_v3_query_influxql() { }, TestCase { database: None, - query: "SHOW TAG VALUES ON foo WITH KEY = \"host\" WHERE time < 1970-01-02", + // TODO: WHERE time < 2065-01-08 does not work for some reason + query: "SHOW TAG VALUES ON foo WITH KEY = \"host\" WHERE time > 2065-01-07", expected: "+------------------+------+-------+\n\ | iox::measurement | key | value |\n\ +------------------+------+-------+\n\ @@ -417,14 +416,14 @@ async fn api_v3_query_influxql_params() { server .write_lp_to_db( "foo", - "cpu,host=a,region=us-east usage=0.9 1 - cpu,host=b,region=us-east usage=0.50 1 - cpu,host=a,region=us-east usage=0.80 2 - cpu,host=b,region=us-east usage=0.60 2 - cpu,host=a,region=us-east usage=0.70 3 - cpu,host=b,region=us-east usage=0.70 3 - cpu,host=a,region=us-east usage=0.50 4 - cpu,host=b,region=us-east usage=0.80 4", + "cpu,host=a,region=us-east usage=0.9 2998574931 + cpu,host=b,region=us-east usage=0.50 2998574931 + cpu,host=a,region=us-east usage=0.80 2998574932 + cpu,host=b,region=us-east usage=0.60 2998574932 + cpu,host=a,region=us-east usage=0.70 2998574933 + cpu,host=b,region=us-east usage=0.70 2998574933 + cpu,host=a,region=us-east usage=0.50 2998574934 + cpu,host=b,region=us-east usage=0.80 2998574934", Precision::Second, ) .await @@ -457,8 +456,8 @@ async fn api_v3_query_influxql_params() { "+------------------+---------------------+------+---------+-------+\n\ | iox::measurement | time | host | region | usage |\n\ +------------------+---------------------+------+---------+-------+\n\ - | cpu | 1970-01-01T00:00:03 | b | us-east | 0.7 |\n\ - | cpu | 1970-01-01T00:00:04 | b | us-east | 0.8 |\n\ + | cpu | 2065-01-07T17:28:53 | b | us-east | 0.7 |\n\ + | cpu | 2065-01-07T17:28:54 | b | us-east | 0.8 |\n\ +------------------+---------------------+------+---------+-------+", resp ); @@ -493,8 +492,8 @@ async fn api_v3_query_influxql_params() { "+------------------+---------------------+------+---------+-------+\n\ | iox::measurement | time | host | region | usage |\n\ +------------------+---------------------+------+---------+-------+\n\ - | cpu | 1970-01-01T00:00:03 | b | us-east | 0.7 |\n\ - | cpu | 1970-01-01T00:00:04 | b | us-east | 0.8 |\n\ + | cpu | 2065-01-07T17:28:53 | b | us-east | 0.7 |\n\ + | cpu | 2065-01-07T17:28:54 | b | us-east | 0.8 |\n\ +------------------+---------------------+------+---------+-------+", resp ); @@ -540,14 +539,14 @@ async fn api_v3_query_json_format() { server .write_lp_to_db( "foo", - "cpu,host=a,region=us-east usage=0.9 1 - cpu,host=b,region=us-east usage=0.50 1 - cpu,host=a,region=us-east usage=0.80 2 - cpu,host=b,region=us-east usage=0.60 2 - cpu,host=a,region=us-east usage=0.70 3 - cpu,host=b,region=us-east usage=0.70 3 - cpu,host=a,region=us-east usage=0.50 4 - cpu,host=b,region=us-east usage=0.80 4", + "cpu,host=a,region=us-east usage=0.9 2998574931 + cpu,host=b,region=us-east usage=0.50 2998574931 + cpu,host=a,region=us-east usage=0.80 2998574932 + cpu,host=b,region=us-east usage=0.60 2998574932 + cpu,host=a,region=us-east usage=0.70 2998574933 + cpu,host=b,region=us-east usage=0.70 2998574933 + cpu,host=a,region=us-east usage=0.50 2998574934 + cpu,host=b,region=us-east usage=0.80 2998574934", Precision::Second, ) .await @@ -568,56 +567,56 @@ async fn api_v3_query_json_format() { "host": "a", "iox::measurement": "cpu", "region": "us-east", - "time": "1970-01-01T00:00:01", + "time": "2065-01-07T17:28:51", "usage": 0.9 }, { "host": "b", "iox::measurement": "cpu", "region": "us-east", - "time": "1970-01-01T00:00:01", + "time": "2065-01-07T17:28:51", "usage": 0.5 }, { "host": "a", "iox::measurement": "cpu", "region": "us-east", - "time": "1970-01-01T00:00:02", + "time": "2065-01-07T17:28:52", "usage": 0.8 }, { "host": "b", "iox::measurement": "cpu", "region": "us-east", - "time": "1970-01-01T00:00:02", + "time": "2065-01-07T17:28:52", "usage": 0.6 }, { "host": "a", "iox::measurement": "cpu", "region": "us-east", - "time": "1970-01-01T00:00:03", + "time": "2065-01-07T17:28:53", "usage": 0.7 }, { "host": "b", "iox::measurement": "cpu", "region": "us-east", - "time": "1970-01-01T00:00:03", + "time": "2065-01-07T17:28:53", "usage": 0.7 }, { "host": "a", "iox::measurement": "cpu", "region": "us-east", - "time": "1970-01-01T00:00:04", + "time": "2065-01-07T17:28:54", "usage": 0.5 }, { "host": "b", "iox::measurement": "cpu", "region": "us-east", - "time": "1970-01-01T00:00:04", + "time": "2065-01-07T17:28:54", "usage": 0.8 } ]), @@ -714,14 +713,14 @@ async fn api_v3_query_jsonl_format() { server .write_lp_to_db( "foo", - "cpu,host=a,region=us-east usage=0.9 1 - cpu,host=b,region=us-east usage=0.50 1 - cpu,host=a,region=us-east usage=0.80 2 - cpu,host=b,region=us-east usage=0.60 2 - cpu,host=a,region=us-east usage=0.70 3 - cpu,host=b,region=us-east usage=0.70 3 - cpu,host=a,region=us-east usage=0.50 4 - cpu,host=b,region=us-east usage=0.80 4", + "cpu,host=a,region=us-east usage=0.9 2998574931 + cpu,host=b,region=us-east usage=0.50 2998574931 + cpu,host=a,region=us-east usage=0.80 2998574932 + cpu,host=b,region=us-east usage=0.60 2998574932 + cpu,host=a,region=us-east usage=0.70 2998574933 + cpu,host=b,region=us-east usage=0.70 2998574933 + cpu,host=a,region=us-east usage=0.50 2998574934 + cpu,host=b,region=us-east usage=0.80 2998574934", Precision::Second, ) .await @@ -737,14 +736,14 @@ async fn api_v3_query_jsonl_format() { TestCase { database: Some("foo"), query: "SELECT time, host, region, usage FROM cpu", - expected: "{\"iox::measurement\":\"cpu\",\"time\":\"1970-01-01T00:00:01\",\"host\":\"a\",\"region\":\"us-east\",\"usage\":0.9}\n\ - {\"iox::measurement\":\"cpu\",\"time\":\"1970-01-01T00:00:01\",\"host\":\"b\",\"region\":\"us-east\",\"usage\":0.5}\n\ - {\"iox::measurement\":\"cpu\",\"time\":\"1970-01-01T00:00:02\",\"host\":\"a\",\"region\":\"us-east\",\"usage\":0.8}\n\ - {\"iox::measurement\":\"cpu\",\"time\":\"1970-01-01T00:00:02\",\"host\":\"b\",\"region\":\"us-east\",\"usage\":0.6}\n\ - {\"iox::measurement\":\"cpu\",\"time\":\"1970-01-01T00:00:03\",\"host\":\"a\",\"region\":\"us-east\",\"usage\":0.7}\n\ - {\"iox::measurement\":\"cpu\",\"time\":\"1970-01-01T00:00:03\",\"host\":\"b\",\"region\":\"us-east\",\"usage\":0.7}\n\ - {\"iox::measurement\":\"cpu\",\"time\":\"1970-01-01T00:00:04\",\"host\":\"a\",\"region\":\"us-east\",\"usage\":0.5}\n\ - {\"iox::measurement\":\"cpu\",\"time\":\"1970-01-01T00:00:04\",\"host\":\"b\",\"region\":\"us-east\",\"usage\":0.8}\n" + expected: "{\"iox::measurement\":\"cpu\",\"time\":\"2065-01-07T17:28:51\",\"host\":\"a\",\"region\":\"us-east\",\"usage\":0.9}\n\ + {\"iox::measurement\":\"cpu\",\"time\":\"2065-01-07T17:28:51\",\"host\":\"b\",\"region\":\"us-east\",\"usage\":0.5}\n\ + {\"iox::measurement\":\"cpu\",\"time\":\"2065-01-07T17:28:52\",\"host\":\"a\",\"region\":\"us-east\",\"usage\":0.8}\n\ + {\"iox::measurement\":\"cpu\",\"time\":\"2065-01-07T17:28:52\",\"host\":\"b\",\"region\":\"us-east\",\"usage\":0.6}\n\ + {\"iox::measurement\":\"cpu\",\"time\":\"2065-01-07T17:28:53\",\"host\":\"a\",\"region\":\"us-east\",\"usage\":0.7}\n\ + {\"iox::measurement\":\"cpu\",\"time\":\"2065-01-07T17:28:53\",\"host\":\"b\",\"region\":\"us-east\",\"usage\":0.7}\n\ + {\"iox::measurement\":\"cpu\",\"time\":\"2065-01-07T17:28:54\",\"host\":\"a\",\"region\":\"us-east\",\"usage\":0.5}\n\ + {\"iox::measurement\":\"cpu\",\"time\":\"2065-01-07T17:28:54\",\"host\":\"b\",\"region\":\"us-east\",\"usage\":0.8}\n" .into(), }, TestCase { @@ -799,12 +798,12 @@ async fn api_v1_query_json_format() { server .write_lp_to_db( "foo", - "cpu,host=a usage=0.9 1\n\ - cpu,host=a usage=0.89 2\n\ - cpu,host=a usage=0.85 3\n\ - mem,host=a usage=0.5 4\n\ - mem,host=a usage=0.6 5\n\ - mem,host=a usage=0.7 6", + "cpu,host=a usage=0.9 2998574931\n\ + cpu,host=a usage=0.89 2998574932\n\ + cpu,host=a usage=0.85 2998574933\n\ + mem,host=a usage=0.5 2998574934\n\ + mem,host=a usage=0.6 2998574935\n\ + mem,host=a usage=0.7 2998574936", Precision::Second, ) .await @@ -842,9 +841,9 @@ async fn api_v1_query_json_format() { ], "name": "cpu", "values": [ - ["1970-01-01T00:00:01Z", "a", 0.9], - ["1970-01-01T00:00:02Z", "a", 0.89], - ["1970-01-01T00:00:03Z", "a", 0.85] + ["2065-01-07T17:28:51Z", "a", 0.9], + ["2065-01-07T17:28:52Z", "a", 0.89], + ["2065-01-07T17:28:53Z", "a", 0.85] ] } ], @@ -870,9 +869,9 @@ async fn api_v1_query_json_format() { ], "name": "mem", "values": [ - ["1970-01-01T00:00:04Z", "a", 0.5], - ["1970-01-01T00:00:05Z", "a", 0.6], - ["1970-01-01T00:00:06Z", "a", 0.7] + ["2065-01-07T17:28:54Z", "a", 0.5], + ["2065-01-07T17:28:55Z", "a", 0.6], + ["2065-01-07T17:28:56Z", "a", 0.7] ] }, { @@ -883,9 +882,9 @@ async fn api_v1_query_json_format() { ], "name": "cpu", "values": [ - ["1970-01-01T00:00:01Z", "a", 0.9], - ["1970-01-01T00:00:02Z", "a", 0.89], - ["1970-01-01T00:00:03Z", "a", 0.85] + ["2065-01-07T17:28:51Z", "a", 0.9], + ["2065-01-07T17:28:52Z", "a", 0.89], + ["2065-01-07T17:28:53Z", "a", 0.85] ] } ], @@ -911,9 +910,9 @@ async fn api_v1_query_json_format() { ], "name": "cpu", "values": [ - ["1970-01-01T00:00:01Z", "a", 0.9], - ["1970-01-01T00:00:02Z", "a", 0.89], - ["1970-01-01T00:00:03Z", "a", 0.85] + ["2065-01-07T17:28:51Z", "a", 0.9], + ["2065-01-07T17:28:52Z", "a", 0.89], + ["2065-01-07T17:28:53Z", "a", 0.85] ] } ], @@ -939,9 +938,9 @@ async fn api_v1_query_json_format() { ], "name": "cpu", "values": [ - [1, "a", 0.9], - [2, "a", 0.89], - [3, "a", 0.85] + [2998574931u32, "a", 0.9], + [2998574932u32, "a", 0.89], + [2998574933u32, "a", 0.85] ] } ], @@ -979,12 +978,12 @@ async fn api_v1_query_csv_format() { server .write_lp_to_db( "foo", - "cpu,host=a usage=0.9 1\n\ - cpu,host=a usage=0.89 2\n\ - cpu,host=a usage=0.85 3\n\ - mem,host=a usage=0.5 4\n\ - mem,host=a usage=0.6 5\n\ - mem,host=a usage=0.7 6", + "cpu,host=a usage=0.9 2998574931\n\ + cpu,host=a usage=0.89 2998574932\n\ + cpu,host=a usage=0.85 2998574933\n\ + mem,host=a usage=0.5 2998574934\n\ + mem,host=a usage=0.6 2998574935\n\ + mem,host=a usage=0.7 2998574936", Precision::Second, ) .await @@ -1004,9 +1003,9 @@ async fn api_v1_query_csv_format() { epoch: None, query: "SELECT time, host, usage FROM cpu", expected: "name,tags,time,host,usage\n\ - cpu,,1970-01-01T00:00:01Z,a,0.9\n\ - cpu,,1970-01-01T00:00:02Z,a,0.89\n\ - cpu,,1970-01-01T00:00:03Z,a,0.85\n\r\n", + cpu,,2065-01-07T17:28:51Z,a,0.9\n\ + cpu,,2065-01-07T17:28:52Z,a,0.89\n\ + cpu,,2065-01-07T17:28:53Z,a,0.85\n\r\n", }, // Basic Query with multiple measurements: TestCase { @@ -1014,12 +1013,12 @@ async fn api_v1_query_csv_format() { epoch: None, query: "SELECT time, host, usage FROM cpu, mem", expected: "name,tags,time,host,usage\n\ - mem,,1970-01-01T00:00:04Z,a,0.5\n\ - mem,,1970-01-01T00:00:05Z,a,0.6\n\ - mem,,1970-01-01T00:00:06Z,a,0.7\n\ - cpu,,1970-01-01T00:00:01Z,a,0.9\n\ - cpu,,1970-01-01T00:00:02Z,a,0.89\n\ - cpu,,1970-01-01T00:00:03Z,a,0.85\n\r\n", + mem,,2065-01-07T17:28:54Z,a,0.5\n\ + mem,,2065-01-07T17:28:55Z,a,0.6\n\ + mem,,2065-01-07T17:28:56Z,a,0.7\n\ + cpu,,2065-01-07T17:28:51Z,a,0.9\n\ + cpu,,2065-01-07T17:28:52Z,a,0.89\n\ + cpu,,2065-01-07T17:28:53Z,a,0.85\n\r\n", }, // Basic Query with db in query string: TestCase { @@ -1027,9 +1026,9 @@ async fn api_v1_query_csv_format() { epoch: None, query: "SELECT time, host, usage FROM foo.autogen.cpu", expected: "name,tags,time,host,usage\n\ - cpu,,1970-01-01T00:00:01Z,a,0.9\n\ - cpu,,1970-01-01T00:00:02Z,a,0.89\n\ - cpu,,1970-01-01T00:00:03Z,a,0.85\n\r\n", + cpu,,2065-01-07T17:28:51Z,a,0.9\n\ + cpu,,2065-01-07T17:28:52Z,a,0.89\n\ + cpu,,2065-01-07T17:28:53Z,a,0.85\n\r\n", }, // Basic Query epoch parameter set: TestCase { @@ -1037,9 +1036,9 @@ async fn api_v1_query_csv_format() { epoch: Some("s"), query: "SELECT time, host, usage FROM cpu", expected: "name,tags,time,host,usage\n\ - cpu,,1,a,0.9\n\ - cpu,,2,a,0.89\n\ - cpu,,3,a,0.85\n\r\n", + cpu,,2998574931,a,0.9\n\ + cpu,,2998574932,a,0.89\n\ + cpu,,2998574933,a,0.85\n\r\n", }, ]; @@ -1072,12 +1071,12 @@ async fn api_v1_query_chunked() { server .write_lp_to_db( "foo", - "cpu,host=a usage=0.9 1\n\ - cpu,host=a usage=0.89 2\n\ - cpu,host=a usage=0.85 3\n\ - mem,host=a usage=0.5 4\n\ - mem,host=a usage=0.6 5\n\ - mem,host=a usage=0.7 6", + "cpu,host=a usage=0.9 2998574931\n\ + cpu,host=a usage=0.89 2998574932\n\ + cpu,host=a usage=0.85 2998574933\n\ + mem,host=a usage=0.5 2998574934\n\ + mem,host=a usage=0.6 2998574935\n\ + mem,host=a usage=0.7 2998574936", Precision::Second, ) .await @@ -1102,9 +1101,9 @@ async fn api_v1_query_chunked() { "name": "cpu", "columns": ["time","host","usage"], "values": [ - [1, "a", 0.9], - [2, "a", 0.89], - [3, "a", 0.85] + [2998574931u32, "a", 0.9], + [2998574932u32, "a", 0.89], + [2998574933u32, "a", 0.85] ] } ], @@ -1126,8 +1125,8 @@ async fn api_v1_query_chunked() { "name": "cpu", "columns": ["time","host","usage"], "values": [ - [1, "a", 0.9], - [2, "a", 0.89], + [2998574931u32, "a", 0.9], + [2998574932u32, "a", 0.89], ] } ], @@ -1143,7 +1142,7 @@ async fn api_v1_query_chunked() { "name": "cpu", "columns": ["time","host","usage"], "values": [ - [3, "a", 0.85] + [2998574933u32, "a", 0.85] ] } ], @@ -1166,9 +1165,9 @@ async fn api_v1_query_chunked() { "name": "cpu", "columns": ["time","host","usage"], "values": [ - [1, "a", 0.9], - [2, "a", 0.89], - [3, "a", 0.85] + [2998574931u32, "a", 0.9], + [2998574932u32, "a", 0.89], + [2998574933u32, "a", 0.85] ] } ], @@ -1184,9 +1183,9 @@ async fn api_v1_query_chunked() { "name": "mem", "columns": ["time","host","usage"], "values": [ - [4, "a", 0.5], - [5, "a", 0.6], - [6, "a", 0.7] + [2998574934u32, "a", 0.5], + [2998574935u32, "a", 0.6], + [2998574936u32, "a", 0.7] ] } ], @@ -1209,8 +1208,8 @@ async fn api_v1_query_chunked() { "name": "cpu", "columns": ["time","host","usage"], "values": [ - [1, "a", 0.9], - [2, "a", 0.89], + [2998574931u32, "a", 0.9], + [2998574932u32, "a", 0.89], ] } ], @@ -1226,7 +1225,7 @@ async fn api_v1_query_chunked() { "name": "cpu", "columns": ["time","host","usage"], "values": [ - [3, "a", 0.85] + [2998574933u32, "a", 0.85] ] } ], @@ -1242,8 +1241,8 @@ async fn api_v1_query_chunked() { "name": "mem", "columns": ["time","host","usage"], "values": [ - [4, "a", 0.5], - [5, "a", 0.6], + [2998574934u32, "a", 0.5], + [2998574935u32, "a", 0.6], ] } ], @@ -1259,7 +1258,7 @@ async fn api_v1_query_chunked() { "name": "mem", "columns": ["time","host","usage"], "values": [ - [6, "a", 0.7] + [2998574936u32, "a", 0.7] ] } ], @@ -1301,11 +1300,11 @@ async fn api_v1_query_data_conversion() { server .write_lp_to_db( "foo", - "weather,location=us-midwest temperature_integer=82i 1465839830100400200\n\ - weather,location=us-midwest temperature_float=82 1465839830100400200\n\ - weather,location=us-midwest temperature_str=\"too warm\" 1465839830100400200\n\ - weather,location=us-midwest too_hot=true 1465839830100400200", - Precision::Nanosecond, + "weather,location=us-midwest temperature_integer=82i 2998574930\n\ + weather,location=us-midwest temperature_float=82 2998574930\n\ + weather,location=us-midwest temperature_str=\"too warm\" 2998574930\n\ + weather,location=us-midwest too_hot=true 2998574930", + Precision::Second, ) .await .unwrap(); @@ -1338,7 +1337,7 @@ async fn api_v1_query_data_conversion() { ], "name": "weather", "values": [ - ["2016-06-13T17:43:50.100400200Z", "us-midwest", 82, 82.0, "too warm", true], + ["2065-01-07T17:28:50Z", "us-midwest", 82, 82.0, "too warm", true], ] } ], @@ -1378,12 +1377,12 @@ async fn api_v1_query_uri_and_body() { .write_lp_to_db( "foo", "\ - cpu,host=a usage=0.9 1\n\ - cpu,host=b usage=0.89 1\n\ - cpu,host=c usage=0.85 1\n\ - mem,host=a usage=0.5 2\n\ - mem,host=b usage=0.6 2\n\ - mem,host=c usage=0.7 2\ + cpu,host=a usage=0.9 2998674931\n\ + cpu,host=b usage=0.89 2998674931\n\ + cpu,host=c usage=0.85 2998674931\n\ + mem,host=a usage=0.5 2998674932\n\ + mem,host=b usage=0.6 2998674932\n\ + mem,host=c usage=0.7 2998674932\ ", Precision::Second, ) @@ -1427,17 +1426,17 @@ async fn api_v1_query_uri_and_body() { "name": "cpu", "values": [ [ - "1970-01-01T00:00:01Z", + "2065-01-08T21:15:31Z", "a", 0.9 ], [ - "1970-01-01T00:00:01Z", + "2065-01-08T21:15:31Z", "b", 0.89 ], [ - "1970-01-01T00:00:01Z", + "2065-01-08T21:15:31Z", "c", 0.85 ] @@ -1514,17 +1513,17 @@ async fn api_v1_query_uri_and_body() { "name": "mem", "values": [ [ - "1970-01-01T00:00:02Z", + "2065-01-08T21:15:32Z", "a", 0.5 ], [ - "1970-01-01T00:00:02Z", + "2065-01-08T21:15:32Z", "b", 0.6 ], [ - "1970-01-01T00:00:02Z", + "2065-01-08T21:15:32Z", "c", 0.7 ] @@ -1673,8 +1672,8 @@ async fn api_v3_query_null_tag_values_null_fields() { server .write_lp_to_db( "foo", - "cpu,host=a,region=us-east usage=0.9,system=0.1 1 - cpu,host=b usage=0.80,system=0.1 4", + "cpu,host=a,region=us-east usage=0.9,system=0.1 2998674931 + cpu,host=b usage=0.80,system=0.1 2998674934", Precision::Second, ) .await @@ -1704,8 +1703,8 @@ async fn api_v3_query_null_tag_values_null_fields() { "+------+---------+---------------------+-------+\n\ | host | region | time | usage |\n\ +------+---------+---------------------+-------+\n\ - | a | us-east | 1970-01-01T00:00:01 | 0.9 |\n\ - | b | | 1970-01-01T00:00:04 | 0.8 |\n\ + | a | us-east | 2065-01-08T21:15:31 | 0.9 |\n\ + | b | | 2065-01-08T21:15:34 | 0.8 |\n\ +------+---------+---------------------+-------+", resp ); @@ -1713,7 +1712,7 @@ async fn api_v3_query_null_tag_values_null_fields() { server .write_lp_to_db( "foo", - "cpu,host=a,region=us-east usage=0.9 10000000", + "cpu,host=a,region=us-east usage=0.9 2998674935", Precision::Second, ) .await @@ -1737,9 +1736,9 @@ async fn api_v3_query_null_tag_values_null_fields() { "+------+---------+--------+---------------------+-------+\n\ | host | region | system | time | usage |\n\ +------+---------+--------+---------------------+-------+\n\ - | a | us-east | 0.1 | 1970-01-01T00:00:01 | 0.9 |\n\ - | b | | 0.1 | 1970-01-01T00:00:04 | 0.8 |\n\ - | a | us-east | | 1970-04-26T17:46:40 | 0.9 |\n\ + | a | us-east | 0.1 | 2065-01-08T21:15:31 | 0.9 |\n\ + | b | | 0.1 | 2065-01-08T21:15:34 | 0.8 |\n\ + | a | us-east | | 2065-01-08T21:15:35 | 0.9 |\n\ +------+---------+--------+---------------------+-------+", resp ); diff --git a/influxdb3/tests/server/system_tables.rs b/influxdb3/tests/server/system_tables.rs index 1f7bf0313c..7fd4d41bca 100644 --- a/influxdb3/tests/server/system_tables.rs +++ b/influxdb3/tests/server/system_tables.rs @@ -11,10 +11,10 @@ async fn queries_table() { server .write_lp_to_db( "foo", - "cpu,host=s1,region=us-east usage=0.9 1\n\ - cpu,host=s1,region=us-east usage=0.89 2\n\ - cpu,host=s1,region=us-east usage=0.85 3", - Precision::Nanosecond, + "cpu,host=s1,region=us-east usage=0.9 2998574931\n\ + cpu,host=s1,region=us-east usage=0.89 2998574932\n\ + cpu,host=s1,region=us-east usage=0.85 2998574933", + Precision::Second, ) .await .expect("write some lp"); diff --git a/influxdb3/tests/server/write.rs b/influxdb3/tests/server/write.rs index dab587eaca..9d5810bbb8 100644 --- a/influxdb3/tests/server/write.rs +++ b/influxdb3/tests/server/write.rs @@ -128,11 +128,11 @@ async fn api_v1_write_round_trip() { client .post(write_url) - .query(&[("db", "foo")]) + .query(&[("db", "foo"), ("precision", "s")]) .body( - "cpu,host=a usage=0.5 1 - cpu,host=a usage=0.6 2 - cpu,host=a usage=0.7 3", + "cpu,host=a usage=0.5 2998574931 + cpu,host=a usage=0.6 2998574932 + cpu,host=a usage=0.7 2998574933", ) .send() .await @@ -150,13 +150,13 @@ async fn api_v1_write_round_trip() { assert_eq!( resp, - "+------------------+-------------------------------+------+-------+\n\ - | iox::measurement | time | host | usage |\n\ - +------------------+-------------------------------+------+-------+\n\ - | cpu | 1970-01-01T00:00:00.000000001 | a | 0.5 |\n\ - | cpu | 1970-01-01T00:00:00.000000002 | a | 0.6 |\n\ - | cpu | 1970-01-01T00:00:00.000000003 | a | 0.7 |\n\ - +------------------+-------------------------------+------+-------+" + "+------------------+---------------------+------+-------+\n\ + | iox::measurement | time | host | usage |\n\ + +------------------+---------------------+------+-------+\n\ + | cpu | 2065-01-07T17:28:51 | a | 0.5 |\n\ + | cpu | 2065-01-07T17:28:52 | a | 0.6 |\n\ + | cpu | 2065-01-07T17:28:53 | a | 0.7 |\n\ + +------------------+---------------------+------+-------+" ); } @@ -266,11 +266,11 @@ async fn api_v2_write_round_trip() { client .post(write_url) - .query(&[("bucket", "foo")]) + .query(&[("bucket", "foo"), ("precision", "s")]) .body( - "cpu,host=a usage=0.5 1 - cpu,host=a usage=0.6 2 - cpu,host=a usage=0.7 3", + "cpu,host=a usage=0.5 2998574931 + cpu,host=a usage=0.6 2998574932 + cpu,host=a usage=0.7 2998574933", ) .send() .await @@ -288,13 +288,13 @@ async fn api_v2_write_round_trip() { assert_eq!( resp, - "+------------------+-------------------------------+------+-------+\n\ - | iox::measurement | time | host | usage |\n\ - +------------------+-------------------------------+------+-------+\n\ - | cpu | 1970-01-01T00:00:00.000000001 | a | 0.5 |\n\ - | cpu | 1970-01-01T00:00:00.000000002 | a | 0.6 |\n\ - | cpu | 1970-01-01T00:00:00.000000003 | a | 0.7 |\n\ - +------------------+-------------------------------+------+-------+" + "+------------------+---------------------+------+-------+\n\ + | iox::measurement | time | host | usage |\n\ + +------------------+---------------------+------+-------+\n\ + | cpu | 2065-01-07T17:28:51 | a | 0.5 |\n\ + | cpu | 2065-01-07T17:28:52 | a | 0.6 |\n\ + | cpu | 2065-01-07T17:28:53 | a | 0.7 |\n\ + +------------------+---------------------+------+-------+" ); } @@ -309,8 +309,8 @@ async fn writes_with_different_schema_should_fail() { .write_lp_to_db( "foo", "\ - t0,t0_tag0=initTag t0_f0=0i 1715694000\n\ - t0,t0_tag0=initTag t0_f0=1i 1715694001", + t0,t0_tag0=initTag t0_f0=0i\n\ + t0,t0_tag0=initTag t0_f0=1i 2998574931", Precision::Second, ) .await @@ -321,8 +321,8 @@ async fn writes_with_different_schema_should_fail() { .write_lp_to_db( "foo", "\ - t0,t0_tag0=initTag t0_f0=0u 1715694000\n\ - t0,t0_tag0=initTag t0_f0=1u 1715694001", + t0,t0_tag0=initTag t0_f0=0u 2998574930\n\ + t0,t0_tag0=initTag t0_f0=1u 2998574931", Precision::Second, ) .await diff --git a/influxdb3_write/src/lib.rs b/influxdb3_write/src/lib.rs index 6bf411e348..a4f7cc2b33 100644 --- a/influxdb3_write/src/lib.rs +++ b/influxdb3_write/src/lib.rs @@ -28,6 +28,9 @@ use serde::{Deserialize, Serialize}; use std::{fmt::Debug, sync::Arc, time::Duration}; use thiserror::Error; +/// Used to determine if writes are older than what we can accept or query +pub const THREE_DAYS: Duration = Duration::from_secs(60 * 60 * 24 * 3); + #[derive(Debug, Error)] pub enum Error { #[error("object store path error: {0}")] diff --git a/influxdb3_write/src/write_buffer/mod.rs b/influxdb3_write/src/write_buffer/mod.rs index 5a897ed161..4cc522153e 100644 --- a/influxdb3_write/src/write_buffer/mod.rs +++ b/influxdb3_write/src/write_buffer/mod.rs @@ -199,6 +199,7 @@ impl WriteBufferImpl { } let persisted_files = Arc::new(PersistedFiles::new_from_persisted_snapshots( + Arc::clone(&time_provider), persisted_snapshots, )); let queryable_buffer = Arc::new(QueryableBuffer::new(QueryableBufferArgs { @@ -208,6 +209,7 @@ impl WriteBufferImpl { last_cache_provider: Arc::clone(&last_cache), distinct_cache_provider: Arc::clone(&distinct_cache), persisted_files: Arc::clone(&persisted_files), + time_provider: Arc::clone(&time_provider), parquet_cache: parquet_cache.clone(), })); diff --git a/influxdb3_write/src/write_buffer/persisted_files.rs b/influxdb3_write/src/write_buffer/persisted_files.rs index 5c73f0b071..e43ca76dd4 100644 --- a/influxdb3_write/src/write_buffer/persisted_files.rs +++ b/influxdb3_write/src/write_buffer/persisted_files.rs @@ -7,21 +7,35 @@ use hashbrown::HashMap; use influxdb3_id::DbId; use influxdb3_id::TableId; use influxdb3_telemetry::ParquetMetrics; +use iox_time::TimeProvider; use parking_lot::RwLock; +use std::sync::Arc; type DatabaseToTables = HashMap<DbId, TableToFiles>; type TableToFiles = HashMap<TableId, Vec<ParquetFile>>; -#[derive(Debug, Default)] +#[derive(Debug)] pub struct PersistedFiles { + /// The time provider to check if something is older than 3 days + time_provider: Arc<dyn TimeProvider>, inner: RwLock<Inner>, } impl PersistedFiles { + pub fn new(time_provider: Arc<dyn TimeProvider>) -> Self { + Self { + time_provider, + inner: Default::default(), + } + } /// Create a new `PersistedFiles` from a list of persisted snapshots - pub fn new_from_persisted_snapshots(persisted_snapshots: Vec<PersistedSnapshot>) -> Self { + pub fn new_from_persisted_snapshots( + time_provider: Arc<dyn TimeProvider>, + persisted_snapshots: Vec<PersistedSnapshot>, + ) -> Self { let inner = Inner::new_from_persisted_snapshots(persisted_snapshots); Self { + time_provider, inner: RwLock::new(inner), } } @@ -34,6 +48,7 @@ impl PersistedFiles { /// Get the list of files for a given database and table, always return in descending order of min_time pub fn get_files(&self, db_id: DbId, table_id: TableId) -> Vec<ParquetFile> { + let three_days_ago = (self.time_provider.now() - crate::THREE_DAYS).timestamp_nanos(); let mut files = { let inner = self.inner.read(); inner @@ -42,6 +57,9 @@ impl PersistedFiles { .and_then(|tables| tables.get(&table_id)) .cloned() .unwrap_or_default() + .into_iter() + .filter(|file| dbg!(file.min_time) > dbg!(three_days_ago)) + .collect::<Vec<_>>() }; files.sort_by(|a, b| b.min_time.cmp(&a.min_time)); @@ -153,6 +171,8 @@ mod tests { use influxdb3_catalog::catalog::CatalogSequenceNumber; use influxdb3_wal::{SnapshotSequenceNumber, WalFileSequenceNumber}; + use iox_time::MockProvider; + use iox_time::Time; use observability_deps::tracing::info; use pretty_assertions::assert_eq; @@ -163,8 +183,12 @@ mod tests { #[test_log::test(test)] fn test_get_metrics_after_initial_load() { let all_persisted_snapshot_files = build_persisted_snapshots(); - let persisted_file = - PersistedFiles::new_from_persisted_snapshots(all_persisted_snapshot_files); + let time_provider: Arc<dyn TimeProvider> = + Arc::new(MockProvider::new(Time::from_timestamp(0, 0).unwrap())); + let persisted_file = PersistedFiles::new_from_persisted_snapshots( + time_provider, + all_persisted_snapshot_files, + ); let (file_count, size_in_mb, row_count) = persisted_file.get_metrics(); @@ -177,8 +201,12 @@ mod tests { #[test_log::test(test)] fn test_get_metrics_after_update() { let all_persisted_snapshot_files = build_persisted_snapshots(); - let persisted_file = - PersistedFiles::new_from_persisted_snapshots(all_persisted_snapshot_files); + let time_provider: Arc<dyn TimeProvider> = + Arc::new(MockProvider::new(Time::from_timestamp(0, 0).unwrap())); + let persisted_file = PersistedFiles::new_from_persisted_snapshots( + time_provider, + all_persisted_snapshot_files, + ); let parquet_files = build_parquet_files(5); let new_snapshot = build_snapshot(parquet_files, 1, 1, 1); persisted_file.add_persisted_snapshot_files(new_snapshot); @@ -207,8 +235,12 @@ mod tests { .cloned() .unwrap(); - let persisted_file = - PersistedFiles::new_from_persisted_snapshots(all_persisted_snapshot_files); + let time_provider: Arc<dyn TimeProvider> = + Arc::new(MockProvider::new(Time::from_timestamp(0, 0).unwrap())); + let persisted_file = PersistedFiles::new_from_persisted_snapshots( + time_provider, + all_persisted_snapshot_files, + ); let mut parquet_files = build_parquet_files(4); info!(all_persisted_files = ?persisted_file, "Full persisted file"); info!(already_existing_file = ?already_existing_file, "Existing file"); diff --git a/influxdb3_write/src/write_buffer/queryable_buffer.rs b/influxdb3_write/src/write_buffer/queryable_buffer.rs index dcb1c1c50b..08ea2e5a9b 100644 --- a/influxdb3_write/src/write_buffer/queryable_buffer.rs +++ b/influxdb3_write/src/write_buffer/queryable_buffer.rs @@ -26,6 +26,7 @@ use iox_query::chunk_statistics::{create_chunk_statistics, NoColumnRanges}; use iox_query::exec::Executor; use iox_query::frontend::reorg::ReorgPlanner; use iox_query::QueryChunk; +use iox_time::TimeProvider; use object_store::path::Path; use observability_deps::tracing::{debug, error, info}; use parking_lot::RwLock; @@ -47,6 +48,7 @@ pub struct QueryableBuffer { persisted_files: Arc<PersistedFiles>, buffer: Arc<RwLock<BufferState>>, parquet_cache: Option<Arc<dyn ParquetCacheOracle>>, + time_provider: Arc<dyn TimeProvider>, /// Sends a notification to this watch channel whenever a snapshot info is persisted persisted_snapshot_notify_rx: tokio::sync::watch::Receiver<Option<PersistedSnapshot>>, persisted_snapshot_notify_tx: tokio::sync::watch::Sender<Option<PersistedSnapshot>>, @@ -60,6 +62,7 @@ pub struct QueryableBufferArgs { pub distinct_cache_provider: Arc<DistinctCacheProvider>, pub persisted_files: Arc<PersistedFiles>, pub parquet_cache: Option<Arc<dyn ParquetCacheOracle>>, + pub time_provider: Arc<dyn TimeProvider>, } impl QueryableBuffer { @@ -72,6 +75,7 @@ impl QueryableBuffer { distinct_cache_provider, persisted_files, parquet_cache, + time_provider, }: QueryableBufferArgs, ) -> Self { let buffer = Arc::new(RwLock::new(BufferState::new(Arc::clone(&catalog)))); @@ -86,6 +90,7 @@ impl QueryableBuffer { persisted_files, buffer, parquet_cache, + time_provider, persisted_snapshot_notify_rx, persisted_snapshot_notify_tx, } @@ -118,6 +123,9 @@ impl QueryableBuffer { .partitioned_record_batches(Arc::clone(&table_def), filters) .map_err(|e| DataFusionError::Execution(format!("error getting batches {}", e)))? .into_iter() + .filter(|(_, (ts_min_max, _))| { + ts_min_max.min > (self.time_provider.now() - crate::THREE_DAYS).timestamp_nanos() + }) .map(|(gen_time, (ts_min_max, batches))| { let row_count = batches.iter().map(|b| b.num_rows()).sum::<usize>(); let chunk_stats = create_chunk_statistics( @@ -755,7 +763,8 @@ mod tests { Arc::clone(&catalog), ) .unwrap(), - persisted_files: Arc::new(Default::default()), + time_provider: Arc::clone(&time_provider), + persisted_files: Arc::new(PersistedFiles::new(Arc::clone(&time_provider))), parquet_cache: None, }; let queryable_buffer = QueryableBuffer::new(queryable_buffer_args); @@ -764,10 +773,18 @@ mod tests { // create the initial write with two tags let val = WriteValidator::initialize(db.clone(), Arc::clone(&catalog), 0).unwrap(); - let lp = "foo,t1=a,t2=b f1=1i 1000000000"; + let lp = format!( + "foo,t1=a,t2=b f1=1i {}", + time_provider.now().timestamp_nanos() + ); let lines = val - .v1_parse_lines_and_update_schema(lp, false, time_provider.now(), Precision::Nanosecond) + .v1_parse_lines_and_update_schema( + &lp, + false, + time_provider.now(), + Precision::Nanosecond, + ) .unwrap() .convert_lines_to_buffer(Gen1Duration::new_1m()); let batch: WriteBatch = lines.into(); diff --git a/influxdb3_write/src/write_buffer/validator.rs b/influxdb3_write/src/write_buffer/validator.rs index e602325ef1..fabe087df4 100644 --- a/influxdb3_write/src/write_buffer/validator.rs +++ b/influxdb3_write/src/write_buffer/validator.rs @@ -241,6 +241,15 @@ fn validate_and_qualify_v1_line( .timestamp .map(|ts| apply_precision_to_timestamp(precision, ts)) .unwrap_or(ingest_time.timestamp_nanos()); + + if timestamp_ns < (ingest_time - crate::THREE_DAYS).timestamp_nanos() { + return Err(WriteLineError { + original_line: line.to_string(), + line_number: line_number + 1, + error_message: "line contained a date that was more than 3 days ago".into(), + }); + } + fields.push(Field::new(time_col_id, FieldData::Timestamp(timestamp_ns))); // if we have new columns defined, add them to the db_schema table so that subsequent lines
530ee945582a1ba0c4e5c0e951a097b42bb1d85f
Andrew Lamb
2023-05-02 12:50:04
use correct sort key in projection_pushdown (#7718)
* fix: use correct sort key in projection_pushdown * fix: tabs in docs * refactor: Use Serde to format test results
null
fix: use correct sort key in projection_pushdown (#7718) * fix: use correct sort key in projection_pushdown * fix: tabs in docs * refactor: Use Serde to format test results
diff --git a/influxdb_iox/tests/query_tests2/cases/in/date_bin.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/date_bin.sql.expected index 000b433ed3..f2f3a6689f 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/date_bin.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/date_bin.sql.expected @@ -114,6 +114,6 @@ | | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | | | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: time@0 >= 957528000000000000 AND time@0 <= 957531540000000000 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=time@2 >= 957528000000000000 AND time@2 <= 957531540000000000, pruning_predicate=time_max@0 >= 957528000000000000 AND time_min@1 <= 957531540000000000, output_ordering=[time@0 ASC], projection=[time, user] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=time@2 >= 957528000000000000 AND time@2 <= 957531540000000000, pruning_predicate=time_max@0 >= 957528000000000000 AND time_min@1 <= 957531540000000000, output_ordering=[], projection=[time, user] | | | | ---------- \ No newline at end of file diff --git a/influxdb_iox/tests/query_tests2/cases/in/duplicates_ingester.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/duplicates_ingester.sql.expected index 8ae8337e3f..1ca1d93cb4 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/duplicates_ingester.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/duplicates_ingester.sql.expected @@ -90,7 +90,7 @@ | | RecordBatchesExec: batches_groups=1 batches=1 total_rows=6 | | | ProjectionExec: expr=[city@0 as name] | | | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[city@0 ASC], projection=[city] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[], projection=[city] | | | ProjectionExec: expr=[city@1 as city] | | | DeduplicateExec: [state@2 ASC,city@1 ASC,time@3 ASC] | | | SortPreservingMergeExec: [state@2 ASC,city@1 ASC,time@3 ASC,__chunk_order@0 ASC] | diff --git a/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet.sql.expected index 2a71d917d3..e21f14d74d 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/duplicates_parquet.sql.expected @@ -77,7 +77,7 @@ | | ParquetExec: limit=None, partitions={2 groups: [[1/1/1/00000000-0000-0000-0000-000000000002.parquet], [1/1/1/00000000-0000-0000-0000-000000000003.parquet]]}, output_ordering=[state@2 ASC, city@1 ASC, time@3 ASC, __chunk_order@0 ASC], projection=[__chunk_order, city, state, time] | | | ProjectionExec: expr=[city@0 as name] | | | UnionExec | -| | ParquetExec: limit=None, partitions={2 groups: [[1/1/1/00000000-0000-0000-0000-000000000000.parquet], [1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[city@0 ASC], projection=[city] | +| | ParquetExec: limit=None, partitions={2 groups: [[1/1/1/00000000-0000-0000-0000-000000000000.parquet], [1/1/1/00000000-0000-0000-0000-000000000001.parquet]]}, output_ordering=[], projection=[city] | | | ProjectionExec: expr=[city@1 as city] | | | DeduplicateExec: [state@2 ASC,city@1 ASC,time@3 ASC] | | | SortPreservingMergeExec: [state@2 ASC,city@1 ASC,time@3 ASC,__chunk_order@0 ASC] | diff --git a/influxdb_iox/tests/query_tests2/cases/in/gapfill.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/gapfill.sql.expected index 9db522e1f4..363ae18ffe 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/gapfill.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/gapfill.sql.expected @@ -37,7 +37,7 @@ | | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 | | | CoalesceBatchesExec: target_batch_size=8192 | | | FilterExec: time@0 >= 957528000000000000 AND time@0 <= 957531540000000000 | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=time@2 >= 957528000000000000 AND time@2 <= 957531540000000000, pruning_predicate=time_max@0 >= 957528000000000000 AND time_min@1 <= 957531540000000000, output_ordering=[time@0 ASC], projection=[time, user] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, predicate=time@2 >= 957528000000000000 AND time@2 <= 957531540000000000, pruning_predicate=time_max@0 >= 957528000000000000 AND time_min@1 <= 957531540000000000, output_ordering=[], projection=[time, user] | | | | ---------- -- SQL: SELECT date_bin_gapfill(interval '10 minute', time) as minute, count(cpu.user) from cpu where time between timestamp '2000-05-05T12:00:00Z' and timestamp '2000-05-05T12:59:00Z' group by minute; diff --git a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected index 9581b93e23..3c9067e03d 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/issue_6112.influxql.expected @@ -901,7 +901,7 @@ name: physical_plan RepartitionExec: partitioning=Hash([Column { name: "tag0", index: 0 }], 4), input_partitions=4 RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 AggregateExec: mode=Partial, gby=[tag0@1 as tag0], aggr=[COUNT(m0.f64), SUM(m0.f64), STDDEV(m0.f64)] - ParquetExec: limit=None, partitions={1 group: [[1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[tag0@1 ASC], projection=[f64, tag0] + ParquetExec: limit=None, partitions={1 group: [[1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[], projection=[f64, tag0] ProjectionExec: expr=[m1 as iox::measurement, 0 as time, tag0@0 as tag0, COUNT(m1.f64)@1 as count, SUM(m1.f64)@2 as sum, STDDEV(m1.f64)@3 as stddev] AggregateExec: mode=FinalPartitioned, gby=[tag0@0 as tag0], aggr=[COUNT(m1.f64), SUM(m1.f64), STDDEV(m1.f64)] CoalesceBatchesExec: target_batch_size=8192 diff --git a/influxdb_iox/tests/query_tests2/cases/in/several_chunks.sql.expected b/influxdb_iox/tests/query_tests2/cases/in/several_chunks.sql.expected index d67235eb83..81efbfe35f 100644 --- a/influxdb_iox/tests/query_tests2/cases/in/several_chunks.sql.expected +++ b/influxdb_iox/tests/query_tests2/cases/in/several_chunks.sql.expected @@ -51,7 +51,7 @@ | logical_plan | Projection: h2o.temp, h2o.other_temp, h2o.time | | | TableScan: h2o projection=[other_temp, temp, time] | | physical_plan | UnionExec | -| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[time@2 ASC], projection=[temp, other_temp, time] | +| | ParquetExec: limit=None, partitions={1 group: [[1/1/1/00000000-0000-0000-0000-000000000000.parquet]]}, output_ordering=[], projection=[temp, other_temp, time] | | | ProjectionExec: expr=[temp@3 as temp, other_temp@4 as other_temp, time@5 as time] | | | DeduplicateExec: [city@1 ASC,state@2 ASC,time@5 ASC] | | | SortPreservingMergeExec: [city@1 ASC,state@2 ASC,time@5 ASC,__chunk_order@0 ASC] | diff --git a/iox_query/src/physical_optimizer/projection_pushdown.rs b/iox_query/src/physical_optimizer/projection_pushdown.rs index 4fcf6ab550..002c83b3a8 100644 --- a/iox_query/src/physical_optimizer/projection_pushdown.rs +++ b/iox_query/src/physical_optimizer/projection_pushdown.rs @@ -94,32 +94,14 @@ impl PhysicalOptimizerRule for ProjectionPushdown { .collect::<Result<Vec<_>>>()?, None => column_indices, }; - let output_ordering = match &child_parquet.base_config().output_ordering { - Some(sort_exprs) => { - let projected_schema = projection_exec.schema(); - - // filter out sort exprs columns that got projected away - let known_columns = projected_schema - .all_fields() - .iter() - .map(|f| f.name().as_str()) - .collect::<HashSet<_>>(); - let sort_exprs = sort_exprs - .iter() - .filter(|expr| { - if let Some(col) = expr.expr.as_any().downcast_ref::<Column>() { - known_columns.contains(col.name()) - } else { - true - } - }) - .cloned() - .collect::<Vec<_>>(); - - Some(reassign_sort_exprs_columns(&sort_exprs, &projected_schema)?) - } - None => None, - }; + let output_ordering = child_parquet + .base_config() + .output_ordering + .as_ref() + .map(|output_ordering| { + project_output_ordering(output_ordering, projection_exec.schema()) + }) + .transpose()?; let base_config = FileScanConfig { projection: Some(projection), output_ordering, @@ -272,6 +254,56 @@ impl PhysicalOptimizerRule for ProjectionPushdown { } } +/// Given the output ordering and a projected schema, returns the +/// largest prefix of the ordering that is in the projection +/// +/// For example, +/// +/// ```text +/// output_ordering: a, b, c +/// projection: a, c +/// returns --> a +/// ``` +/// +/// To see why the input has to be a prefix, consider this input: +/// +/// ```text +/// a b +/// 1 1 +/// 2 2 +/// 3 1 +/// `` +/// +/// It is sorted on `a,b` but *not* sorted on `b` +fn project_output_ordering( + output_ordering: &[PhysicalSortExpr], + projected_schema: SchemaRef, +) -> Result<Vec<PhysicalSortExpr>> { + // filter out sort exprs columns that got projected away + let known_columns = projected_schema + .all_fields() + .iter() + .map(|f| f.name().as_str()) + .collect::<HashSet<_>>(); + + // take longest prefix + let sort_exprs = output_ordering + .iter() + .take_while(|expr| { + if let Some(col) = expr.expr.as_any().downcast_ref::<Column>() { + known_columns.contains(col.name()) + } else { + // do not keep exprs like `a+1` or `-a` as they may + // not maintain ordering + false + } + }) + .cloned() + .collect::<Vec<_>>(); + + reassign_sort_exprs_columns(&sort_exprs, &projected_schema) +} + fn schema_name_projection( schema: &SchemaRef, cols: &[&str], @@ -395,7 +427,7 @@ fn reassign_sort_exprs_columns( mod tests { use arrow::{ compute::SortOptions, - datatypes::{DataType, Field, Schema, SchemaRef}, + datatypes::{DataType, Field, Fields, Schema, SchemaRef}, }; use datafusion::{ datasource::object_store::ObjectStoreUrl, @@ -406,6 +438,7 @@ mod tests { }, scalar::ScalarValue, }; + use serde::Serialize; use crate::{ physical_optimizer::test_util::{assert_unknown_partitioning, OptimizationTest}, @@ -734,7 +767,7 @@ mod tests { - " ParquetExec: limit=None, partitions={0 groups: []}, predicate=tag1@0 = foo, pruning_predicate=tag1_min@0 <= foo AND foo <= tag1_max@1, output_ordering=[tag3@1 ASC, field@0 ASC, tag2@2 ASC], projection=[field, tag3, tag2]" output: Ok: - - " ParquetExec: limit=None, partitions={0 groups: []}, predicate=tag1@0 = foo, pruning_predicate=tag1_min@0 <= foo AND foo <= tag1_max@1, output_ordering=[tag3@1 ASC, tag2@0 ASC], projection=[tag2, tag3]" + - " ParquetExec: limit=None, partitions={0 groups: []}, predicate=tag1@0 = foo, pruning_predicate=tag1_min@0 <= foo AND foo <= tag1_max@1, output_ordering=[tag3@1 ASC], projection=[tag2, tag3]" "### ); @@ -1349,6 +1382,232 @@ mod tests { ); } + #[test] + fn test_project_output_ordering_keep() { + let schema = schema(); + let projection = vec!["tag1", "tag2"]; + let output_ordering = vec![ + PhysicalSortExpr { + expr: expr_col("tag1", &schema), + options: Default::default(), + }, + PhysicalSortExpr { + expr: expr_col("tag2", &schema), + options: Default::default(), + }, + ]; + + insta::assert_yaml_snapshot!( + ProjectOutputOrdering::new(&schema, output_ordering, projection), + @r###" + --- + output_ordering: + - tag1@0 + - tag2@1 + projection: + - tag1 + - tag2 + projected_ordering: + - tag1@0 + - tag2@1 + "### + ); + } + + #[test] + fn test_project_output_ordering_project_prefix() { + let schema = schema(); + let projection = vec!["tag1"]; // prefix of the sort key + let output_ordering = vec![ + PhysicalSortExpr { + expr: expr_col("tag1", &schema), + options: Default::default(), + }, + PhysicalSortExpr { + expr: expr_col("tag2", &schema), + options: Default::default(), + }, + ]; + + insta::assert_yaml_snapshot!( + ProjectOutputOrdering::new(&schema, output_ordering, projection), + @r###" + --- + output_ordering: + - tag1@0 + - tag2@1 + projection: + - tag1 + projected_ordering: + - tag1@0 + "### + ); + } + + #[test] + fn test_project_output_ordering_project_non_prefix() { + let schema = schema(); + let projection = vec!["tag2"]; // in sort key, but not prefix + let output_ordering = vec![ + PhysicalSortExpr { + expr: expr_col("tag1", &schema), + options: Default::default(), + }, + PhysicalSortExpr { + expr: expr_col("tag2", &schema), + options: Default::default(), + }, + ]; + + insta::assert_yaml_snapshot!( + ProjectOutputOrdering::new(&schema, output_ordering, projection), + @r###" + --- + output_ordering: + - tag1@0 + - tag2@1 + projection: + - tag2 + projected_ordering: [] + "### + ); + } + + #[test] + fn test_project_output_ordering_projection_reorder() { + let schema = schema(); + let projection = vec!["tag2", "tag1", "field"]; // in different order than sort key + let output_ordering = vec![ + PhysicalSortExpr { + expr: expr_col("tag1", &schema), + options: Default::default(), + }, + PhysicalSortExpr { + expr: expr_col("tag2", &schema), + options: Default::default(), + }, + ]; + + insta::assert_yaml_snapshot!( + ProjectOutputOrdering::new(&schema, output_ordering, projection), + @r###" + --- + output_ordering: + - tag1@0 + - tag2@1 + projection: + - tag2 + - tag1 + - field + projected_ordering: + - tag1@1 + - tag2@0 + "### + ); + } + + #[test] + fn test_project_output_ordering_constant() { + let schema = schema(); + let projection = vec!["tag2"]; + let output_ordering = vec![ + // ordering by a constant is ignored + PhysicalSortExpr { + expr: datafusion::physical_plan::expressions::lit(1), + options: Default::default(), + }, + PhysicalSortExpr { + expr: expr_col("tag2", &schema), + options: Default::default(), + }, + ]; + + insta::assert_yaml_snapshot!( + ProjectOutputOrdering::new(&schema, output_ordering, projection), + @r###" + --- + output_ordering: + - "1" + - tag2@1 + projection: + - tag2 + projected_ordering: [] + "### + ); + } + + #[test] + fn test_project_output_ordering_constant_second_position() { + let schema = schema(); + let projection = vec!["tag2"]; + let output_ordering = vec![ + PhysicalSortExpr { + expr: expr_col("tag2", &schema), + options: Default::default(), + }, + // ordering by a constant is ignored + PhysicalSortExpr { + expr: datafusion::physical_plan::expressions::lit(1), + options: Default::default(), + }, + ]; + + insta::assert_yaml_snapshot!( + ProjectOutputOrdering::new(&schema, output_ordering, projection), + @r###" + --- + output_ordering: + - tag2@1 + - "1" + projection: + - tag2 + projected_ordering: + - tag2@0 + "### + ); + } + + /// project the output_ordering with the projection, + // derive serde to make a nice 'insta' snapshot + #[derive(Debug, Serialize)] + struct ProjectOutputOrdering { + output_ordering: Vec<String>, + projection: Vec<String>, + projected_ordering: Vec<String>, + } + + impl ProjectOutputOrdering { + fn new( + schema: &Schema, + output_ordering: Vec<PhysicalSortExpr>, + projection: Vec<&'static str>, + ) -> Self { + let projected_fields: Fields = projection + .iter() + .map(|field_name| { + schema + .field_with_name(field_name) + .expect("finding field") + .clone() + }) + .collect(); + let projected_schema = Arc::new(Schema::new(projected_fields)); + + let projected_ordering = project_output_ordering(&output_ordering, projected_schema); + + let projected_ordering = match projected_ordering { + Ok(projected_ordering) => format_sort_exprs(&projected_ordering), + Err(e) => vec![e.to_string()], + }; + + Self { + output_ordering: format_sort_exprs(&output_ordering), + projection: projection.iter().map(|s| s.to_string()).collect(), + projected_ordering, + } + } + } + fn schema() -> SchemaRef { Arc::new(Schema::new(vec![ Field::new("tag1", DataType::Utf8, true), @@ -1357,6 +1616,16 @@ mod tests { ])) } + fn format_sort_exprs(sort_exprs: &[PhysicalSortExpr]) -> Vec<String> { + sort_exprs + .iter() + .map(|expr| { + let PhysicalSortExpr { expr, options: _ } = expr; + expr.to_string() + }) + .collect::<Vec<_>>() + } + fn expr_col(name: &str, schema: &SchemaRef) -> Arc<dyn PhysicalExpr> { Arc::new(Column::new_with_schema(name, schema).unwrap()) }
17e15411041fbd7bc93ad7fec621136ccd4389f7
Dom Dwyer
2023-08-22 18:19:53
fix typos, add handle module docs
More docs more better.
null
docs: fix typos, add handle module docs More docs more better.
diff --git a/gossip_schema/src/dispatcher.rs b/gossip_schema/src/dispatcher.rs index a9c91778cd..6251fcc29c 100644 --- a/gossip_schema/src/dispatcher.rs +++ b/gossip_schema/src/dispatcher.rs @@ -1,4 +1,5 @@ -//! A deserialiser and dispatcher of [`gossip`] messages. +//! A deserialiser and dispatcher of [`gossip`] messages for the +//! [`Topic::SchemaChanges`] topic. use std::{fmt::Debug, sync::Arc}; diff --git a/gossip_schema/src/handle.rs b/gossip_schema/src/handle.rs index ec4983bca8..475b52e68e 100644 --- a/gossip_schema/src/handle.rs +++ b/gossip_schema/src/handle.rs @@ -1,3 +1,6 @@ +//! A serialiser and broadcaster of [`gossip`] messages for the +//! [`Topic::SchemaChanges`] topic. + use generated_types::{ influxdata::iox::gossip::{ v1::{schema_message::Event, SchemaMessage, TableCreated, TableUpdated}, @@ -19,7 +22,7 @@ use tokio::{ /// transport limitations) and broadcasts the result to all listening peers. /// /// Serialisation and processing of the [`Event`] given to the -/// [`SchemaTx::broadcast()`] method happen in a background actor task, +/// [`SchemaTx::broadcast()`] method happens in a background actor task, /// decoupling the caller from the latency of processing each frame. Dropping /// the [`SchemaTx`] stops this background actor task. #[derive(Debug)] @@ -36,7 +39,7 @@ impl Drop for SchemaTx { impl SchemaTx { /// Construct a new [`SchemaTx`] that publishes gossip messages over - /// `gossip`, and delegates cache operations to `inner`. + /// `gossip`. pub fn new(gossip: gossip::GossipHandle<Topic>) -> Self { let (tx, rx) = mpsc::channel(100); @@ -47,8 +50,8 @@ impl SchemaTx { /// Asynchronously broadcast `event` to all interested peers. /// - /// This method enqueues `event` into the serialisation queue, and - /// processed & transmitted asynchronously. + /// This method enqueues `event` into the serialisation queue, and processed + /// & transmitted asynchronously. pub fn broadcast(&self, event: Event) { debug!(?event, "sending schema message"); match self.tx.try_send(event) {
63bd5096f541445a347b8533f415772f082a5ad4
Michael Gattozzi
2025-01-23 10:02:26
loosen 72 hour query/write restriction (#25890)
This commit does a few key things: - Removes the 72 hour query and write restrictions in Core - Limits the queries to a default number of parquet files. We chose 432 as this is about 72 hours using default settings for the gen1 timeblock - The file limit can be increased, but the help text and error message when exceeded note that query performance will likely be degraded as a result. - We warn users to use smaller time ranges if possible if they hit this query error With this we eliminate the hard restriction we have in place, but instead create a soft one that users can choose to take the performance hit with. If they can't take that hit then it's recomended that they upgrade to Enterprise which has the compactor built in to make performant historical queries.
null
feat: loosen 72 hour query/write restriction (#25890) This commit does a few key things: - Removes the 72 hour query and write restrictions in Core - Limits the queries to a default number of parquet files. We chose 432 as this is about 72 hours using default settings for the gen1 timeblock - The file limit can be increased, but the help text and error message when exceeded note that query performance will likely be degraded as a result. - We warn users to use smaller time ranges if possible if they hit this query error With this we eliminate the hard restriction we have in place, but instead create a soft one that users can choose to take the performance hit with. If they can't take that hit then it's recomended that they upgrade to Enterprise which has the compactor built in to make performant historical queries.
diff --git a/influxdb3/src/commands/serve.rs b/influxdb3/src/commands/serve.rs index 88c573fcf1..f528cd133c 100644 --- a/influxdb3/src/commands/serve.rs +++ b/influxdb3/src/commands/serve.rs @@ -345,6 +345,15 @@ pub struct Config { action )] pub telemetry_endpoint: String, + + /// Set the limit for number of parquet files allowed in a query. Defaults + /// to 432 which is about 3 days worth of files using default settings. + /// This number can be increased to allow more files to be queried, but + /// query performance will likely suffer, RAM usage will spike, and the + /// process might be OOM killed as a result. It would be better to specify + /// smaller time ranges if possible in a query. + #[clap(long = "query-file-limit", env = "INFLUXDB3_QUERY_FILE_LIMIT", action)] + pub query_file_limit: Option<usize>, } /// Specified size of the Parquet cache in megabytes (MB) @@ -541,6 +550,7 @@ pub async fn command(config: Config) -> Result<()> { parquet_cache, metric_registry: Arc::clone(&metrics), snapshotted_wal_files_to_keep: config.snapshotted_wal_files_to_keep, + query_file_limit: config.query_file_limit, }) .await .map_err(|e| Error::WriteBufferInit(e.into()))?; diff --git a/influxdb3_processing_engine/src/lib.rs b/influxdb3_processing_engine/src/lib.rs index a0ac0f8ee8..b11c6da5e1 100644 --- a/influxdb3_processing_engine/src/lib.rs +++ b/influxdb3_processing_engine/src/lib.rs @@ -1207,6 +1207,7 @@ mod tests { parquet_cache: None, metric_registry: Arc::clone(&metric_registry), snapshotted_wal_files_to_keep: 10, + query_file_limit: None, }) .await .unwrap(); diff --git a/influxdb3_server/src/lib.rs b/influxdb3_server/src/lib.rs index 0fc0bb03d2..301fbef40a 100644 --- a/influxdb3_server/src/lib.rs +++ b/influxdb3_server/src/lib.rs @@ -779,6 +779,7 @@ mod tests { parquet_cache: Some(parquet_cache), metric_registry: Arc::clone(&metrics), snapshotted_wal_files_to_keep: 100, + query_file_limit: None, }, ) .await diff --git a/influxdb3_server/src/query_executor/mod.rs b/influxdb3_server/src/query_executor/mod.rs index 005cf528c9..9dfae427ff 100644 --- a/influxdb3_server/src/query_executor/mod.rs +++ b/influxdb3_server/src/query_executor/mod.rs @@ -776,6 +776,7 @@ mod tests { use metric::Registry; use object_store::{local::LocalFileSystem, ObjectStore}; use parquet_file::storage::{ParquetStorage, StorageId}; + use pretty_assertions::assert_eq; use super::CreateQueryExecutorArgs; @@ -801,7 +802,9 @@ mod tests { )) } - pub(crate) async fn setup() -> ( + pub(crate) async fn setup( + query_file_limit: Option<usize>, + ) -> ( Arc<dyn WriteBuffer>, QueryExecutorImpl, Arc<MockProvider>, @@ -841,6 +844,7 @@ mod tests { parquet_cache: Some(parquet_cache), metric_registry: Default::default(), snapshotted_wal_files_to_keep: 1, + query_file_limit, }) .await .unwrap(); @@ -874,7 +878,7 @@ mod tests { #[test_log::test(tokio::test)] async fn system_parquet_files_success() { - let (write_buffer, query_executor, time_provider, _) = setup().await; + let (write_buffer, query_executor, time_provider, _) = setup(None).await; // Perform some writes to multiple tables let db_name = "test_db"; // perform writes over time to generate WAL files and some snapshots @@ -982,4 +986,232 @@ mod tests { assert_batches_sorted_eq!(t.expected, &batches); } } + + #[test_log::test(tokio::test)] + async fn query_file_limits_default() { + let (write_buffer, query_executor, time_provider, _) = setup(None).await; + // Perform some writes to multiple tables + let db_name = "test_db"; + // perform writes over time to generate WAL files and some snapshots + // the time provider is bumped to trick the system into persisting files: + for i in 0..1298 { + let time = i * 10; + let _ = write_buffer + .write_lp( + NamespaceName::new(db_name).unwrap(), + "\ + cpu,host=a,region=us-east usage=250\n\ + mem,host=a,region=us-east usage=150000\n\ + ", + Time::from_timestamp_nanos(time), + false, + influxdb3_write::Precision::Nanosecond, + ) + .await + .unwrap(); + + time_provider.set(Time::from_timestamp(time + 1, 0).unwrap()); + } + + // bump time again and sleep briefly to ensure time to persist things + time_provider.set(Time::from_timestamp(20, 0).unwrap()); + tokio::time::sleep(Duration::from_millis(500)).await; + + struct TestCase<'a> { + query: &'a str, + expected: &'a [&'a str], + } + + let test_cases = [ + TestCase { + query: "\ + SELECT COUNT(*) \ + FROM system.parquet_files \ + WHERE table_name = 'cpu'", + expected: &[ + "+----------+", + "| count(*) |", + "+----------+", + "| 432 |", + "+----------+", + ], + }, + TestCase { + query: "\ + SELECT Count(host) \ + FROM cpu", + expected: &[ + "+-----------------+", + "| count(cpu.host) |", + "+-----------------+", + "| 1298 |", + "+-----------------+", + ], + }, + ]; + + for t in test_cases { + let batch_stream = query_executor + .query_sql(db_name, t.query, None, None, None) + .await + .unwrap(); + let batches: Vec<RecordBatch> = batch_stream.try_collect().await.unwrap(); + assert_batches_sorted_eq!(t.expected, &batches); + } + + // put us over the parquet limit + let time = 12990; + let _ = write_buffer + .write_lp( + NamespaceName::new(db_name).unwrap(), + "\ + cpu,host=a,region=us-east usage=250\n\ + mem,host=a,region=us-east usage=150000\n\ + ", + Time::from_timestamp_nanos(time), + false, + influxdb3_write::Precision::Nanosecond, + ) + .await + .unwrap(); + + time_provider.set(Time::from_timestamp(time + 1, 0).unwrap()); + + // bump time again and sleep briefly to ensure time to persist things + time_provider.set(Time::from_timestamp(20, 0).unwrap()); + tokio::time::sleep(Duration::from_millis(500)).await; + + match query_executor + .query_sql(db_name, "SELECT COUNT(host) FROM CPU", None, None, None) + .await { + Ok(_) => panic!("expected to exceed parquet file limit, yet query succeeded"), + Err(err) => assert_eq!(err.to_string(), "error while planning query: External error: Query would exceed file limit of 432 parquet files. Please specify a smaller time range for your query. You can increase the file limit with the `--query-file-limit` option in the serve command, however, query performance will be slower and the server may get OOM killed or become unstable as a result".to_string()) + } + + // Make sure if we specify a smaller time range that queries will still work + query_executor + .query_sql( + db_name, + "SELECT COUNT(host) FROM CPU WHERE time < '1970-01-01T00:00:00.000000010Z'", + None, + None, + None, + ) + .await + .unwrap(); + } + + #[test_log::test(tokio::test)] + async fn query_file_limits_configured() { + let (write_buffer, query_executor, time_provider, _) = setup(Some(3)).await; + // Perform some writes to multiple tables + let db_name = "test_db"; + // perform writes over time to generate WAL files and some snapshots + // the time provider is bumped to trick the system into persisting files: + for i in 0..11 { + let time = i * 10; + let _ = write_buffer + .write_lp( + NamespaceName::new(db_name).unwrap(), + "\ + cpu,host=a,region=us-east usage=250\n\ + mem,host=a,region=us-east usage=150000\n\ + ", + Time::from_timestamp_nanos(time), + false, + influxdb3_write::Precision::Nanosecond, + ) + .await + .unwrap(); + + time_provider.set(Time::from_timestamp(time + 1, 0).unwrap()); + } + + // bump time again and sleep briefly to ensure time to persist things + time_provider.set(Time::from_timestamp(20, 0).unwrap()); + tokio::time::sleep(Duration::from_millis(500)).await; + + struct TestCase<'a> { + query: &'a str, + expected: &'a [&'a str], + } + + let test_cases = [ + TestCase { + query: "\ + SELECT COUNT(*) \ + FROM system.parquet_files \ + WHERE table_name = 'cpu'", + expected: &[ + "+----------+", + "| count(*) |", + "+----------+", + "| 3 |", + "+----------+", + ], + }, + TestCase { + query: "\ + SELECT Count(host) \ + FROM cpu", + expected: &[ + "+-----------------+", + "| count(cpu.host) |", + "+-----------------+", + "| 11 |", + "+-----------------+", + ], + }, + ]; + + for t in test_cases { + let batch_stream = query_executor + .query_sql(db_name, t.query, None, None, None) + .await + .unwrap(); + let batches: Vec<RecordBatch> = batch_stream.try_collect().await.unwrap(); + assert_batches_sorted_eq!(t.expected, &batches); + } + + // put us over the parquet limit + let time = 120; + let _ = write_buffer + .write_lp( + NamespaceName::new(db_name).unwrap(), + "\ + cpu,host=a,region=us-east usage=250\n\ + mem,host=a,region=us-east usage=150000\n\ + ", + Time::from_timestamp_nanos(time), + false, + influxdb3_write::Precision::Nanosecond, + ) + .await + .unwrap(); + + time_provider.set(Time::from_timestamp(time + 1, 0).unwrap()); + + // bump time again and sleep briefly to ensure time to persist things + time_provider.set(Time::from_timestamp(20, 0).unwrap()); + tokio::time::sleep(Duration::from_millis(500)).await; + + match query_executor + .query_sql(db_name, "SELECT COUNT(host) FROM CPU", None, None, None) + .await { + Ok(_) => panic!("expected to exceed parquet file limit, yet query succeeded"), + Err(err) => assert_eq!(err.to_string(), "error while planning query: External error: Query would exceed file limit of 3 parquet files. Please specify a smaller time range for your query. You can increase the file limit with the `--query-file-limit` option in the serve command, however, query performance will be slower and the server may get OOM killed or become unstable as a result".to_string()) + } + + // Make sure if we specify a smaller time range that queries will still work + query_executor + .query_sql( + db_name, + "SELECT COUNT(host) FROM CPU WHERE time < '1970-01-01T00:00:00.000000010Z'", + None, + None, + None, + ) + .await + .unwrap(); + } } diff --git a/influxdb3_write/src/lib.rs b/influxdb3_write/src/lib.rs index b53c6ebf6d..73e091db3e 100644 --- a/influxdb3_write/src/lib.rs +++ b/influxdb3_write/src/lib.rs @@ -47,9 +47,6 @@ use thiserror::Error; use twox_hash::XxHash64; use write_buffer::INDEX_HASH_SEED; -/// Used to determine if writes are older than what we can accept or query -pub const THREE_DAYS: Duration = Duration::from_secs(60 * 60 * 24 * 3); - #[derive(Debug, Error)] pub enum Error { #[error("object store path error: {0}")] diff --git a/influxdb3_write/src/write_buffer/mod.rs b/influxdb3_write/src/write_buffer/mod.rs index 7e272df8ae..7467e3fc51 100644 --- a/influxdb3_write/src/write_buffer/mod.rs +++ b/influxdb3_write/src/write_buffer/mod.rs @@ -146,6 +146,8 @@ pub struct WriteBufferImpl { metrics: WriteMetrics, distinct_cache: Arc<DistinctCacheProvider>, last_cache: Arc<LastCacheProvider>, + /// The number of files we will accept for a query + query_file_limit: usize, } /// The maximum number of snapshots to load on start @@ -163,6 +165,7 @@ pub struct WriteBufferImplArgs { pub parquet_cache: Option<Arc<dyn ParquetCacheOracle>>, pub metric_registry: Arc<Registry>, pub snapshotted_wal_files_to_keep: u64, + pub query_file_limit: Option<usize>, } impl WriteBufferImpl { @@ -178,6 +181,7 @@ impl WriteBufferImpl { parquet_cache, metric_registry, snapshotted_wal_files_to_keep, + query_file_limit, }: WriteBufferImplArgs, ) -> Result<Arc<Self>> { // load snapshots and replay the wal into the in memory buffer @@ -199,7 +203,6 @@ impl WriteBufferImpl { } let persisted_files = Arc::new(PersistedFiles::new_from_persisted_snapshots( - Arc::clone(&time_provider), persisted_snapshots, )); let queryable_buffer = Arc::new(QueryableBuffer::new(QueryableBufferArgs { @@ -209,7 +212,6 @@ impl WriteBufferImpl { last_cache_provider: Arc::clone(&last_cache), distinct_cache_provider: Arc::clone(&distinct_cache), persisted_files: Arc::clone(&persisted_files), - time_provider: Arc::clone(&time_provider), parquet_cache: parquet_cache.clone(), })); @@ -239,6 +241,7 @@ impl WriteBufferImpl { persisted_files, buffer: queryable_buffer, metrics: WriteMetrics::new(&metric_registry), + query_file_limit: query_file_limit.unwrap_or(432), }); Ok(result) } @@ -327,6 +330,21 @@ impl WriteBufferImpl { self.persisted_files .get_files_filtered(db_schema.id, table_def.table_id, filter); + if parquet_files.len() > self.query_file_limit { + return Err(DataFusionError::External( + format!( + "Query would exceed file limit of {} parquet files. \ + Please specify a smaller time range for your \ + query. You can increase the file limit with the \ + `--query-file-limit` option in the serve command, however, \ + query performance will be slower and the server may get \ + OOM killed or become unstable as a result", + self.query_file_limit + ) + .into(), + )); + } + let mut chunk_order = chunks.len() as i64; for parquet_file in parquet_files { @@ -723,7 +741,7 @@ impl DatabaseManager for WriteBufferImpl { "int64" => FieldDataType::Integer, "bool" => FieldDataType::Boolean, "utf8" => FieldDataType::String, - _ => todo!(), + _ => unreachable!(), }, }); } @@ -957,6 +975,7 @@ mod tests { parquet_cache: Some(Arc::clone(&parquet_cache)), metric_registry: Default::default(), snapshotted_wal_files_to_keep: 10, + query_file_limit: None, }) .await .unwrap(); @@ -1048,6 +1067,7 @@ mod tests { parquet_cache: Some(Arc::clone(&parquet_cache)), metric_registry: Default::default(), snapshotted_wal_files_to_keep: 10, + query_file_limit: None, }) .await .unwrap(); @@ -1119,6 +1139,7 @@ mod tests { parquet_cache: wbuf.parquet_cache.clone(), metric_registry: Default::default(), snapshotted_wal_files_to_keep: 10, + query_file_limit: None, }) .await .unwrap() @@ -1359,6 +1380,7 @@ mod tests { parquet_cache: write_buffer.parquet_cache.clone(), metric_registry: Default::default(), snapshotted_wal_files_to_keep: 10, + query_file_limit: None, }) .await .unwrap(); @@ -3043,6 +3065,7 @@ mod tests { parquet_cache, metric_registry: Arc::clone(&metric_registry), snapshotted_wal_files_to_keep: 10, + query_file_limit: None, }) .await .unwrap(); diff --git a/influxdb3_write/src/write_buffer/persisted_files.rs b/influxdb3_write/src/write_buffer/persisted_files.rs index c28983b69b..ece6b6efaa 100644 --- a/influxdb3_write/src/write_buffer/persisted_files.rs +++ b/influxdb3_write/src/write_buffer/persisted_files.rs @@ -8,35 +8,24 @@ use hashbrown::HashMap; use influxdb3_id::DbId; use influxdb3_id::TableId; use influxdb3_telemetry::ParquetMetrics; -use iox_time::TimeProvider; use parking_lot::RwLock; -use std::sync::Arc; type DatabaseToTables = HashMap<DbId, TableToFiles>; type TableToFiles = HashMap<TableId, Vec<ParquetFile>>; -#[derive(Debug)] +#[derive(Debug, Default)] pub struct PersistedFiles { - /// The time provider to check if something is older than 3 days - time_provider: Arc<dyn TimeProvider>, inner: RwLock<Inner>, } impl PersistedFiles { - pub fn new(time_provider: Arc<dyn TimeProvider>) -> Self { - Self { - time_provider, - inner: Default::default(), - } + pub fn new() -> Self { + Default::default() } /// Create a new `PersistedFiles` from a list of persisted snapshots - pub fn new_from_persisted_snapshots( - time_provider: Arc<dyn TimeProvider>, - persisted_snapshots: Vec<PersistedSnapshot>, - ) -> Self { + pub fn new_from_persisted_snapshots(persisted_snapshots: Vec<PersistedSnapshot>) -> Self { let inner = Inner::new_from_persisted_snapshots(persisted_snapshots); Self { - time_provider, inner: RwLock::new(inner), } } @@ -61,20 +50,16 @@ impl PersistedFiles { table_id: TableId, filter: &ChunkFilter, ) -> Vec<ParquetFile> { - let three_days_ago = (self.time_provider.now() - crate::THREE_DAYS).timestamp_nanos(); - let mut files = { - let inner = self.inner.read(); - inner - .files - .get(&db_id) - .and_then(|tables| tables.get(&table_id)) - .cloned() - .unwrap_or_default() - .into_iter() - .filter(|file| filter.test_time_stamp_min_max(file.min_time, file.max_time)) - .filter(|file| file.min_time > three_days_ago) - .collect::<Vec<_>>() - }; + let inner = self.inner.read(); + let mut files = inner + .files + .get(&db_id) + .and_then(|tables| tables.get(&table_id)) + .cloned() + .unwrap_or_default() + .into_iter() + .filter(|file| filter.test_time_stamp_min_max(file.min_time, file.max_time)) + .collect::<Vec<_>>(); files.sort_by(|a, b| b.min_time.cmp(&a.min_time)); @@ -190,11 +175,10 @@ mod tests { use influxdb3_catalog::catalog::TableDefinition; use influxdb3_id::ColumnId; use influxdb3_wal::{SnapshotSequenceNumber, WalFileSequenceNumber}; - use iox_time::MockProvider; - use iox_time::Time; use observability_deps::tracing::info; use pretty_assertions::assert_eq; use schema::InfluxColumnType; + use std::sync::Arc; use crate::ParquetFileId; @@ -203,12 +187,8 @@ mod tests { #[test_log::test(test)] fn test_get_metrics_after_initial_load() { let all_persisted_snapshot_files = build_persisted_snapshots(); - let time_provider: Arc<dyn TimeProvider> = - Arc::new(MockProvider::new(Time::from_timestamp(0, 0).unwrap())); - let persisted_file = PersistedFiles::new_from_persisted_snapshots( - time_provider, - all_persisted_snapshot_files, - ); + let persisted_file = + PersistedFiles::new_from_persisted_snapshots(all_persisted_snapshot_files); let (file_count, size_in_mb, row_count) = persisted_file.get_metrics(); @@ -221,12 +201,8 @@ mod tests { #[test_log::test(test)] fn test_get_metrics_after_update() { let all_persisted_snapshot_files = build_persisted_snapshots(); - let time_provider: Arc<dyn TimeProvider> = - Arc::new(MockProvider::new(Time::from_timestamp(0, 0).unwrap())); - let persisted_file = PersistedFiles::new_from_persisted_snapshots( - time_provider, - all_persisted_snapshot_files, - ); + let persisted_file = + PersistedFiles::new_from_persisted_snapshots(all_persisted_snapshot_files); let parquet_files = build_parquet_files(5); let new_snapshot = build_snapshot(parquet_files, 1, 1, 1); persisted_file.add_persisted_snapshot_files(new_snapshot); @@ -255,12 +231,8 @@ mod tests { .cloned() .unwrap(); - let time_provider: Arc<dyn TimeProvider> = - Arc::new(MockProvider::new(Time::from_timestamp(0, 0).unwrap())); - let persisted_file = PersistedFiles::new_from_persisted_snapshots( - time_provider, - all_persisted_snapshot_files, - ); + let persisted_file = + PersistedFiles::new_from_persisted_snapshots(all_persisted_snapshot_files); let mut parquet_files = build_parquet_files(4); info!(all_persisted_files = ?persisted_file, "Full persisted file"); info!(already_existing_file = ?already_existing_file, "Existing file"); @@ -300,10 +272,7 @@ mod tests { }) .collect(); let persisted_snapshots = vec![build_snapshot(parquet_files, 0, 0, 0)]; - let persisted_files = PersistedFiles::new_from_persisted_snapshots( - Arc::new(MockProvider::new(Time::from_timestamp_nanos(0))), - persisted_snapshots, - ); + let persisted_files = PersistedFiles::new_from_persisted_snapshots(persisted_snapshots); struct TestCase<'a> { filter: &'a [Expr], diff --git a/influxdb3_write/src/write_buffer/queryable_buffer.rs b/influxdb3_write/src/write_buffer/queryable_buffer.rs index 9ad2f621ed..b1ab80932b 100644 --- a/influxdb3_write/src/write_buffer/queryable_buffer.rs +++ b/influxdb3_write/src/write_buffer/queryable_buffer.rs @@ -25,7 +25,6 @@ use iox_query::chunk_statistics::{create_chunk_statistics, NoColumnRanges}; use iox_query::exec::Executor; use iox_query::frontend::reorg::ReorgPlanner; use iox_query::QueryChunk; -use iox_time::TimeProvider; use object_store::path::Path; use observability_deps::tracing::{error, info}; use parking_lot::RwLock; @@ -47,7 +46,6 @@ pub struct QueryableBuffer { persisted_files: Arc<PersistedFiles>, buffer: Arc<RwLock<BufferState>>, parquet_cache: Option<Arc<dyn ParquetCacheOracle>>, - time_provider: Arc<dyn TimeProvider>, /// Sends a notification to this watch channel whenever a snapshot info is persisted persisted_snapshot_notify_rx: tokio::sync::watch::Receiver<Option<PersistedSnapshot>>, persisted_snapshot_notify_tx: tokio::sync::watch::Sender<Option<PersistedSnapshot>>, @@ -61,7 +59,6 @@ pub struct QueryableBufferArgs { pub distinct_cache_provider: Arc<DistinctCacheProvider>, pub persisted_files: Arc<PersistedFiles>, pub parquet_cache: Option<Arc<dyn ParquetCacheOracle>>, - pub time_provider: Arc<dyn TimeProvider>, } impl QueryableBuffer { @@ -74,7 +71,6 @@ impl QueryableBuffer { distinct_cache_provider, persisted_files, parquet_cache, - time_provider, }: QueryableBufferArgs, ) -> Self { let buffer = Arc::new(RwLock::new(BufferState::new(Arc::clone(&catalog)))); @@ -89,7 +85,6 @@ impl QueryableBuffer { persisted_files, buffer, parquet_cache, - time_provider, persisted_snapshot_notify_rx, persisted_snapshot_notify_tx, } @@ -118,9 +113,6 @@ impl QueryableBuffer { .partitioned_record_batches(Arc::clone(&table_def), buffer_filter) .map_err(|e| DataFusionError::Execution(format!("error getting batches {}", e)))? .into_iter() - .filter(|(_, (ts_min_max, _))| { - ts_min_max.min > (self.time_provider.now() - crate::THREE_DAYS).timestamp_nanos() - }) .map(|(gen_time, (ts_min_max, batches))| { let row_count = batches.iter().map(|b| b.num_rows()).sum::<usize>(); let chunk_stats = create_chunk_statistics( @@ -801,8 +793,7 @@ mod tests { Arc::clone(&catalog), ) .unwrap(), - time_provider: Arc::clone(&time_provider), - persisted_files: Arc::new(PersistedFiles::new(Arc::clone(&time_provider))), + persisted_files: Arc::new(PersistedFiles::new()), parquet_cache: None, }; let queryable_buffer = QueryableBuffer::new(queryable_buffer_args); diff --git a/influxdb3_write/src/write_buffer/validator.rs b/influxdb3_write/src/write_buffer/validator.rs index ead88a3152..4ce101d5b1 100644 --- a/influxdb3_write/src/write_buffer/validator.rs +++ b/influxdb3_write/src/write_buffer/validator.rs @@ -242,14 +242,6 @@ fn validate_and_qualify_v1_line( .map(|ts| apply_precision_to_timestamp(precision, ts)) .unwrap_or(ingest_time.timestamp_nanos()); - if timestamp_ns < (ingest_time - crate::THREE_DAYS).timestamp_nanos() { - return Err(WriteLineError { - original_line: line.to_string(), - line_number: line_number + 1, - error_message: "line contained a date that was more than 3 days ago".into(), - }); - } - fields.push(Field::new(time_col_id, FieldData::Timestamp(timestamp_ns))); // if we have new columns defined, add them to the db_schema table so that subsequent lines
b09691dc6b6f026f282e7b226ef6447e299c48f0
Andrew Lamb
2023-01-23 12:24:22
Upgrade datafusion (again, I know) (#6639)
* chore: Update datafusion * chore: Run cargo hakari tasks
Co-authored-by: CircleCI[bot] <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
chore: Upgrade datafusion (again, I know) (#6639) * chore: Update datafusion * chore: Run cargo hakari tasks Co-authored-by: CircleCI[bot] <[email protected]> Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
diff --git a/Cargo.lock b/Cargo.lock index e7ea84c9b0..a32044b3e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1399,7 +1399,7 @@ dependencies = [ [[package]] name = "datafusion" version = "16.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=64fa312ecc5f32294e70fd7389e18cb41f25e732#64fa312ecc5f32294e70fd7389e18cb41f25e732" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=e6a050058bd704f73b38106b7abf21dc4539eebc#e6a050058bd704f73b38106b7abf21dc4539eebc" dependencies = [ "ahash 0.8.2", "arrow", @@ -1445,7 +1445,7 @@ dependencies = [ [[package]] name = "datafusion-common" version = "16.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=64fa312ecc5f32294e70fd7389e18cb41f25e732#64fa312ecc5f32294e70fd7389e18cb41f25e732" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=e6a050058bd704f73b38106b7abf21dc4539eebc#e6a050058bd704f73b38106b7abf21dc4539eebc" dependencies = [ "arrow", "chrono", @@ -1458,7 +1458,7 @@ dependencies = [ [[package]] name = "datafusion-expr" version = "16.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=64fa312ecc5f32294e70fd7389e18cb41f25e732#64fa312ecc5f32294e70fd7389e18cb41f25e732" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=e6a050058bd704f73b38106b7abf21dc4539eebc#e6a050058bd704f73b38106b7abf21dc4539eebc" dependencies = [ "ahash 0.8.2", "arrow", @@ -1470,7 +1470,7 @@ dependencies = [ [[package]] name = "datafusion-optimizer" version = "16.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=64fa312ecc5f32294e70fd7389e18cb41f25e732#64fa312ecc5f32294e70fd7389e18cb41f25e732" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=e6a050058bd704f73b38106b7abf21dc4539eebc#e6a050058bd704f73b38106b7abf21dc4539eebc" dependencies = [ "arrow", "async-trait", @@ -1486,7 +1486,7 @@ dependencies = [ [[package]] name = "datafusion-physical-expr" version = "16.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=64fa312ecc5f32294e70fd7389e18cb41f25e732#64fa312ecc5f32294e70fd7389e18cb41f25e732" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=e6a050058bd704f73b38106b7abf21dc4539eebc#e6a050058bd704f73b38106b7abf21dc4539eebc" dependencies = [ "ahash 0.8.2", "arrow", @@ -1516,7 +1516,7 @@ dependencies = [ [[package]] name = "datafusion-proto" version = "16.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=64fa312ecc5f32294e70fd7389e18cb41f25e732#64fa312ecc5f32294e70fd7389e18cb41f25e732" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=e6a050058bd704f73b38106b7abf21dc4539eebc#e6a050058bd704f73b38106b7abf21dc4539eebc" dependencies = [ "arrow", "chrono", @@ -1533,7 +1533,7 @@ dependencies = [ [[package]] name = "datafusion-row" version = "16.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=64fa312ecc5f32294e70fd7389e18cb41f25e732#64fa312ecc5f32294e70fd7389e18cb41f25e732" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=e6a050058bd704f73b38106b7abf21dc4539eebc#e6a050058bd704f73b38106b7abf21dc4539eebc" dependencies = [ "arrow", "datafusion-common", @@ -1544,7 +1544,7 @@ dependencies = [ [[package]] name = "datafusion-sql" version = "16.0.0" -source = "git+https://github.com/apache/arrow-datafusion.git?rev=64fa312ecc5f32294e70fd7389e18cb41f25e732#64fa312ecc5f32294e70fd7389e18cb41f25e732" +source = "git+https://github.com/apache/arrow-datafusion.git?rev=e6a050058bd704f73b38106b7abf21dc4539eebc#e6a050058bd704f73b38106b7abf21dc4539eebc" dependencies = [ "arrow-schema", "datafusion-common", diff --git a/Cargo.toml b/Cargo.toml index 3378e598fc..8d273eae60 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,8 +116,8 @@ license = "MIT OR Apache-2.0" [workspace.dependencies] arrow = { version = "31.0.0" } arrow-flight = { version = "31.0.0" } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="64fa312ecc5f32294e70fd7389e18cb41f25e732", default-features = false } -datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="64fa312ecc5f32294e70fd7389e18cb41f25e732" } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev="e6a050058bd704f73b38106b7abf21dc4539eebc", default-features = false } +datafusion-proto = { git = "https://github.com/apache/arrow-datafusion.git", rev="e6a050058bd704f73b38106b7abf21dc4539eebc" } hashbrown = { version = "0.13.2" } parquet = { version = "31.0.0" } diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index fe83c61631..1bae8f8392 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -29,7 +29,7 @@ bytes = { version = "1", features = ["std"] } chrono = { version = "0.4", default-features = false, features = ["alloc", "clock", "iana-time-zone", "serde", "std", "winapi"] } crossbeam-utils = { version = "0.8", features = ["std"] } crypto-common = { version = "0.1", default-features = false, features = ["std"] } -datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "64fa312ecc5f32294e70fd7389e18cb41f25e732", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] } +datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "e6a050058bd704f73b38106b7abf21dc4539eebc", features = ["async-compression", "bzip2", "compression", "crypto_expressions", "flate2", "regex_expressions", "unicode_expressions", "xz2"] } digest = { version = "0.10", features = ["alloc", "block-buffer", "core-api", "mac", "std", "subtle"] } either = { version = "1", features = ["use_std"] } fixedbitset = { version = "0.4", features = ["std"] }
be445b5057bb724233db49208e11e3a78335af4e
Carol (Nichols || Goulding)
2022-11-23 14:18:30
Remove API to get multiple SegmentEntry records at once; it's not quite right
See #6219 for batching reads.
null
fix: Remove API to get multiple SegmentEntry records at once; it's not quite right See #6219 for batching reads.
diff --git a/wal/src/blocking/reader.rs b/wal/src/blocking/reader.rs index 39f6acbb66..928ea3469b 100644 --- a/wal/src/blocking/reader.rs +++ b/wal/src/blocking/reader.rs @@ -87,10 +87,6 @@ where })) } - pub fn entries(&mut self) -> Result<Vec<SegmentEntry>> { - self.collect() - } - pub fn next_ops(&mut self) -> Result<Option<SequencedWalOp>> { if let Some(entry) = self.one_entry()? { let decoded = ProtoSequencedWalOp::decode(&*entry.data) @@ -105,17 +101,6 @@ where } } -impl<R> Iterator for ClosedSegmentFileReader<R> -where - R: io::Read, -{ - type Item = Result<SegmentEntry>; - - fn next(&mut self) -> Option<Self::Item> { - self.one_entry().transpose() - } -} - struct CrcReader<R> { inner: R, hasher: Hasher, diff --git a/wal/src/lib.rs b/wal/src/lib.rs index 5e79b91881..aab4de945a 100644 --- a/wal/src/lib.rs +++ b/wal/src/lib.rs @@ -486,8 +486,6 @@ impl OpenSegmentFile { enum ClosedSegmentFileReaderRequest { ReadHeader(oneshot::Sender<blocking::ReaderResult<(FileTypeIdentifier, uuid::Bytes)>>), - Entries(oneshot::Sender<blocking::ReaderResult<Vec<SegmentEntry>>>), - NextOps(oneshot::Sender<blocking::ReaderResult<Option<SequencedWalOp>>>), } @@ -537,10 +535,6 @@ impl ClosedSegmentFileReader { tx.send(reader.read_header()).ok(); } - Entries(tx) => { - tx.send(reader.entries()).ok(); - } - NextOps(tx) => { tx.send(reader.next_ops()).ok(); } @@ -567,13 +561,6 @@ impl ClosedSegmentFileReader { .context(UnableToReceiveResponseFromSenderTaskSnafu) } - // TODO: Should this return a stream instead of a big vector? - async fn entries(&mut self) -> Result<Vec<SegmentEntry>> { - Self::one_command(&self.tx, ClosedSegmentFileReaderRequest::Entries) - .await? - .context(UnableToReadEntriesSnafu) - } - /// Return the next [`SequencedWalOp`] from this reader, if any. pub async fn next_ops(&mut self) -> Result<Option<SequencedWalOp>> { Self::one_command(&self.tx, ClosedSegmentFileReaderRequest::NextOps) @@ -609,39 +596,6 @@ mod tests { use generated_types::influxdata::pbdata::v1::DatabaseBatch; use mutable_batch_lp::lines_to_batches; - #[tokio::test] - async fn segment_file_write_and_read_entries() { - let dir = test_helpers::tmp_dir().unwrap(); - let sf = OpenSegmentFile::new_in_directory(dir.path()).await.unwrap(); - let writer = sf.write_handle(); - - let data = b"whatevs"; - let write_summary = writer.write(data).await.unwrap(); - - let data2 = b"another"; - let summary2 = writer.write(data2).await.unwrap(); - - let closed = sf.rotate().await.unwrap(); - - let mut reader = ClosedSegmentFileReader::from_path(&closed.path) - .await - .unwrap(); - let entries = reader.entries().await.unwrap(); - assert_eq!( - &entries, - &[ - SegmentEntry { - checksum: write_summary.checksum, - data: data.to_vec(), - }, - SegmentEntry { - checksum: summary2.checksum, - data: data2.to_vec() - }, - ] - ); - } - #[tokio::test] async fn segment_file_write_and_read_ops() { let dir = test_helpers::tmp_dir().unwrap();