diff --git "a/val.csv" "b/val.csv"
--- "a/val.csv"
+++ "b/val.csv"
@@ -1,140433 +1,3 @@
-hash,date,author,commit_message,is_merge,git_diff,type,masked_commit_message
-e8e252376813264d2ee75b3ecef022471f6d6bf4,2022-10-05 15:07:53,Periklis Tsirakidis,operator: Use quayio v2.7.0-pre image for openshift overlay (#7329),False,"diff --git a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
-index 2315967ded340..320afd11b71ed 100644
---- a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
-+++ b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
-@@ -1199,7 +1199,7 @@ spec:
- - /manager
- env:
- - name: RELATED_IMAGE_LOKI
-- value: docker.io/grafana/loki:main-ec0bf70
-+ value: quay.io/openshift-logging/loki:v2.7.0-pre
- - name: RELATED_IMAGE_GATEWAY
- value: quay.io/observatorium/api:latest
- - name: RELATED_IMAGE_OPA
-@@ -1327,7 +1327,7 @@ spec:
- provider:
- name: Grafana.com
- relatedImages:
-- - image: docker.io/grafana/loki:main-ec0bf70
-+ - image: quay.io/openshift-logging/loki:v2.7.0-pre
- name: loki
- - image: quay.io/observatorium/api:latest
- name: gateway
-diff --git a/operator/config/overlays/openshift/manager_related_image_patch.yaml b/operator/config/overlays/openshift/manager_related_image_patch.yaml
-index 589a8610ee434..8d1b4495102f8 100644
---- a/operator/config/overlays/openshift/manager_related_image_patch.yaml
-+++ b/operator/config/overlays/openshift/manager_related_image_patch.yaml
-@@ -9,7 +9,7 @@ spec:
- - name: manager
- env:
- - name: RELATED_IMAGE_LOKI
-- value: docker.io/grafana/loki:main-ec0bf70
-+ value: quay.io/openshift-logging/loki:v2.7.0-pre
- - name: RELATED_IMAGE_GATEWAY
- value: quay.io/observatorium/api:latest
- - name: RELATED_IMAGE_OPA",operator,Use quayio v2.7.0-pre image for openshift overlay (#7329)
-706c22e9e40b0156031f214b63dc6ed4e210abc1,2022-11-10 19:39:30,Jasper,"Loki: Add querier config to loki helm (#7627)
-
-add the ability to update querier config using `values.yaml` file",False,"diff --git a/docs/sources/installation/helm/reference.md b/docs/sources/installation/helm/reference.md
-index d8710129b6d32..0c64710293a98 100644
---- a/docs/sources/installation/helm/reference.md
-+++ b/docs/sources/installation/helm/reference.md
-@@ -1312,6 +1312,15 @@ null
- ""runAsUser"": 10001
- }
-
-+
-+
-+
-+ | loki.querier |
-+ object |
-+ Optional querier configuration |
-+
-+{}
-+
- |
-
-
-diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
-index ba474e79ac67d..48b1d58113a73 100644
---- a/production/helm/loki/Chart.yaml
-+++ b/production/helm/loki/Chart.yaml
-@@ -4,7 +4,7 @@ name: loki
- description: Helm chart for Grafana Loki in simple, scalable mode
- type: application
- appVersion: 2.6.1
--version: 3.3.3
-+version: 3.3.4
- home: https://grafana.github.io/helm-charts
- sources:
- - https://github.com/grafana/loki
-diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
-index 910dfa49e6a57..e0cfe25733cac 100644
---- a/production/helm/loki/README.md
-+++ b/production/helm/loki/README.md
-@@ -1,6 +1,6 @@
- # loki
-
--  
-+  
-
- Helm chart for Grafana Loki in simple, scalable mode
-
-diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml
-index 18f95051a6456..2dbfb65308ac6 100644
---- a/production/helm/loki/values.yaml
-+++ b/production/helm/loki/values.yaml
-@@ -173,6 +173,11 @@ loki:
- {{- tpl (. | toYaml) $ | nindent 4 }}
- {{- end }}
-
-+ {{- with .Values.loki.querier }}
-+ querier:
-+ {{- tpl (. | toYaml) $ | nindent 4 }}
-+ {{- end }}
-+
- # Should authentication be enabled
- auth_enabled: true
-
-@@ -259,6 +264,9 @@ loki:
- # -- Optional analytics configuration
- analytics: {}
-
-+ # -- Optional querier configuration
-+ querier: {}
-+
- enterprise:
- # Enable enterprise features, license must be provided
- enabled: false",Loki,"Add querier config to loki helm (#7627)
-
-add the ability to update querier config using `values.yaml` file"
-3c47735deabab6fcdc7fad9bb15016dcefa0d692,2021-06-03 14:23:30,Michel Hollands,"Add a QueryFrontendTripperware module (#3792)
-
-Signed-off-by: Michel Hollands ",False,"diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
-index efcf230d78a1e..f3964238d254a 100644
---- a/pkg/loki/loki.go
-+++ b/pkg/loki/loki.go
-@@ -366,6 +366,7 @@ func (t *Loki) setupModuleManager() error {
- mm.RegisterModule(Ingester, t.initIngester)
- mm.RegisterModule(Querier, t.initQuerier)
- mm.RegisterModule(IngesterQuerier, t.initIngesterQuerier)
-+ mm.RegisterModule(QueryFrontendTripperware, t.initQueryFrontendTripperware, modules.UserInvisibleModule)
- mm.RegisterModule(QueryFrontend, t.initQueryFrontend)
- mm.RegisterModule(RulerStorage, t.initRulerStorage, modules.UserInvisibleModule)
- mm.RegisterModule(Ruler, t.initRuler)
-@@ -375,19 +376,20 @@ func (t *Loki) setupModuleManager() error {
-
- // Add dependencies
- deps := map[string][]string{
-- Ring: {RuntimeConfig, Server, MemberlistKV},
-- Overrides: {RuntimeConfig},
-- TenantConfigs: {RuntimeConfig},
-- Distributor: {Ring, Server, Overrides, TenantConfigs},
-- Store: {Overrides},
-- Ingester: {Store, Server, MemberlistKV, TenantConfigs},
-- Querier: {Store, Ring, Server, IngesterQuerier, TenantConfigs},
-- QueryFrontend: {Server, Overrides, TenantConfigs},
-- Ruler: {Ring, Server, Store, RulerStorage, IngesterQuerier, Overrides, TenantConfigs},
-- TableManager: {Server},
-- Compactor: {Server, Overrides},
-- IngesterQuerier: {Ring},
-- All: {Querier, Ingester, Distributor, TableManager, Ruler},
-+ Ring: {RuntimeConfig, Server, MemberlistKV},
-+ Overrides: {RuntimeConfig},
-+ TenantConfigs: {RuntimeConfig},
-+ Distributor: {Ring, Server, Overrides, TenantConfigs},
-+ Store: {Overrides},
-+ Ingester: {Store, Server, MemberlistKV, TenantConfigs},
-+ Querier: {Store, Ring, Server, IngesterQuerier, TenantConfigs},
-+ QueryFrontendTripperware: {Server, Overrides, TenantConfigs},
-+ QueryFrontend: {QueryFrontendTripperware},
-+ Ruler: {Ring, Server, Store, RulerStorage, IngesterQuerier, Overrides, TenantConfigs},
-+ TableManager: {Server},
-+ Compactor: {Server, Overrides},
-+ IngesterQuerier: {Ring},
-+ All: {Querier, Ingester, Distributor, TableManager, Ruler},
- }
-
- // Add IngesterQuerier as a dependency for store when target is either ingester or querier.
-diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
-index 28beb64bcbb54..6a04381da06e7 100644
---- a/pkg/loki/modules.go
-+++ b/pkg/loki/modules.go
-@@ -58,23 +58,24 @@ const maxChunkAgeForTableManager = 12 * time.Hour
-
- // The various modules that make up Loki.
- const (
-- Ring string = ""ring""
-- RuntimeConfig string = ""runtime-config""
-- Overrides string = ""overrides""
-- TenantConfigs string = ""tenant-configs""
-- Server string = ""server""
-- Distributor string = ""distributor""
-- Ingester string = ""ingester""
-- Querier string = ""querier""
-- IngesterQuerier string = ""ingester-querier""
-- QueryFrontend string = ""query-frontend""
-- RulerStorage string = ""ruler-storage""
-- Ruler string = ""ruler""
-- Store string = ""store""
-- TableManager string = ""table-manager""
-- MemberlistKV string = ""memberlist-kv""
-- Compactor string = ""compactor""
-- All string = ""all""
-+ Ring string = ""ring""
-+ RuntimeConfig string = ""runtime-config""
-+ Overrides string = ""overrides""
-+ TenantConfigs string = ""tenant-configs""
-+ Server string = ""server""
-+ Distributor string = ""distributor""
-+ Ingester string = ""ingester""
-+ Querier string = ""querier""
-+ IngesterQuerier string = ""ingester-querier""
-+ QueryFrontend string = ""query-frontend""
-+ QueryFrontendTripperware string = ""query-frontend-tripperware""
-+ RulerStorage string = ""ruler-storage""
-+ Ruler string = ""ruler""
-+ Store string = ""store""
-+ TableManager string = ""table-manager""
-+ MemberlistKV string = ""memberlist-kv""
-+ Compactor string = ""compactor""
-+ All string = ""all""
- )
-
- func (t *Loki) initServer() (services.Service, error) {
-@@ -377,6 +378,26 @@ type disabledShuffleShardingLimits struct{}
-
- func (disabledShuffleShardingLimits) MaxQueriersPerUser(userID string) int { return 0 }
-
-+func (t *Loki) initQueryFrontendTripperware() (_ services.Service, err error) {
-+ level.Debug(util_log.Logger).Log(""msg"", ""initializing query frontend tripperware"")
-+
-+ tripperware, stopper, err := queryrange.NewTripperware(
-+ t.Cfg.QueryRange,
-+ util_log.Logger,
-+ t.overrides,
-+ t.Cfg.SchemaConfig.SchemaConfig,
-+ t.Cfg.Querier.QueryIngestersWithin,
-+ prometheus.DefaultRegisterer,
-+ )
-+ if err != nil {
-+ return
-+ }
-+ t.stopper = stopper
-+ t.QueryFrontEndTripperware = tripperware
-+
-+ return services.NewIdleService(nil, nil), nil
-+}
-+
- func (t *Loki) initQueryFrontend() (_ services.Service, err error) {
- level.Debug(util_log.Logger).Log(""msg"", ""initializing query frontend"", ""config"", fmt.Sprintf(""%+v"", t.Cfg.Frontend))
-
-@@ -394,27 +415,7 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) {
- frontendv1pb.RegisterFrontendServer(t.Server.GRPC, t.frontend)
- }
-
-- level.Debug(util_log.Logger).Log(""msg"", ""initializing query range tripperware"",
-- ""config"", fmt.Sprintf(""%+v"", t.Cfg.QueryRange),
-- ""limits"", fmt.Sprintf(""%+v"", t.Cfg.LimitsConfig),
-- )
-- tripperware, stopper, err := queryrange.NewTripperware(
-- t.Cfg.QueryRange,
-- util_log.Logger,
-- t.overrides,
-- t.Cfg.SchemaConfig.SchemaConfig,
-- t.Cfg.Querier.QueryIngestersWithin,
-- prometheus.DefaultRegisterer,
-- )
-- if err != nil {
-- return
-- }
-- t.stopper = stopper
--
-- roundTripper = tripperware(roundTripper)
-- if t.QueryFrontEndTripperware != nil {
-- roundTripper = t.QueryFrontEndTripperware(roundTripper)
-- }
-+ roundTripper = t.QueryFrontEndTripperware(roundTripper)
-
- frontendHandler := transport.NewHandler(t.Cfg.Frontend.Handler, roundTripper, util_log.Logger, prometheus.DefaultRegisterer)
- if t.Cfg.Frontend.CompressResponses {",unknown,"Add a QueryFrontendTripperware module (#3792)
-
-Signed-off-by: Michel Hollands "
-12c7eab8bb94fd82b184c1c222200e37f2ca050a,2020-05-20 21:08:32,Ed Welch,"Prep 1.5.0 release (#2098)
-
-* Updating the Changelog and Upgrade guide for 1.5.0 release.
-
-Signed-off-by: Ed Welch
-
-* Changing release number in all the docs
-
-Signed-off-by: Ed Welch ",False,"diff --git a/CHANGELOG.md b/CHANGELOG.md
-index 49f786e889916..85ac6c732839a 100644
---- a/CHANGELOG.md
-+++ b/CHANGELOG.md
-@@ -1,3 +1,255 @@
-+## 1.5.0 (2020-05-20)
-+
-+It's been a busy month and a half since 1.4.0 was released, and a lot of new improvements have been added to Loki since!
-+
-+Be prepared for some configuration changes that may cause some bumps when upgrading,
-+we apologize for this but are always striving to reach the right compromise of code simplicity and user/operating experience.
-+
-+In this case we opted to keep a simplified configuration inline with Cortex rather than a more complicated and error prone internal config mapping or difficult to implement support for multiple config names for the same feature.
-+
-+This does result in breaking config changes for some configurations, however, these will fail fast and with the [list of diffs](https://cortexmetrics.io/docs/changelog/#config-file-breaking-changes) from the Cortex project should be quick to fix.
-+
-+### Important Notes
-+
-+**Be prepared for breaking config changes.** Loki 1.5.0 vendors cortex [v1.0.1-0.20200430170006-3462eb63f324](https://github.com/cortexproject/cortex/commit/3462eb63f324c649bbaa122933bc591b710f4e48),
-+there were substantial breaking config changes in Cortex 1.0 which standardized config options, and fixed typos.
-+
-+**The Loki docker image user has changed to no longer be root**
-+
-+Check the [upgrade guide](https://github.com/grafana/loki/blob/master/docs/operations/upgrade.md#150) for more detailed information on these changes.
-+
-+### Notable Features and Fixes
-+
-+There are quite a few we want to mention listed in order they were merged (mostly)
-+
-+* [1837](https://github.com/grafana/loki/pull/1837) **sandeepsukhani**: flush boltdb to object store
-+
-+This is perhaps the most exciting feature of 1.5.0, the first steps in removing a dependency on a separate index store! This feature is still very new and experimental, however, we want this to be the future for Loki. Only requiring just an object store.
-+
-+If you want to test this new feature, and help us find any bugs, check out the [docs](docs/operations/storage/boltdb-shipper.md) to learn more and get started.
-+
-+* [2073](https://github.com/grafana/loki/pull/2073) **slim-bean**: Loki: Allow configuring query_store_max_look_back_period when running a filesystem store and boltdb-shipper
-+
-+This is even more experimental than the previous feature mentioned however also pretty exciting for Loki users who use the filesystem storage. We can leverage changes made in [1837](https://github.com/grafana/loki/pull/1837) to now allow Loki to run in a clustered mode with individual filesystem stores!
-+
-+Please check out the last section in the [filesystem docs](docs/operations/storage/filesystem.md) for more details on how this works and how to use it!
-+
-+* [2095](https://github.com/grafana/loki/pull/2095) **cyriltovena**: Adds backtick for the quoted string token lexer.
-+
-+This will come as a big win to anyone who is writing complicated reqular expressions in either their Label matchers or Filter Expressions. Starting now you can use the backtick to encapsulate your regex **and not have to do any escaping of special characters!!**
-+
-+Examples:
-+
-+```
-+{name=""cassandra""} |~ `error=\w+`
-+{name!~`mysql-\d+`}
-+```
-+
-+* [2055](https://github.com/grafana/loki/pull/2055) **aknuds1**: Chore: Fix spelling of per second in code
-+
-+This is technically a breaking change for anyone who wrote code to processes the new statistics output in the query result added in 1.4.0, we apologize to anyone in this situation but if we don't fix this kind of error now it will be there forever.
-+And at the same time we didn't feel it was appropriate to make any major api revision changes for such a new feature and simple change. We are always trying to use our best judgement in cases like this.
-+
-+* [2031](https://github.com/grafana/loki/pull/2031) **cyriltovena**: Improve protobuf serialization
-+
-+Thanks @cyriltovena for another big performance improvement in Loki, this time around protbuf's!
-+
-+* [2021](https://github.com/grafana/loki/pull/2021) **slim-bean**: Loki: refactor validation and improve error messages
-+* [2012](https://github.com/grafana/loki/pull/2012) **slim-bean**: Loki: Improve logging and add metrics to streams dropped by stream limit
-+
-+These two changes standardize the metrics used to report when a tenant hits a limit, now all discarded samples should be reported under `loki_discarded_samples_total` and you no longer need to also reference `cortex_discarded_samples_total`.
-+Additionally error messages were improved to help clients take better action when hitting limits.
-+
-+* [1970](https://github.com/grafana/loki/pull/1970) **cyriltovena**: Allow to aggregate binary operations.
-+
-+Another nice improvement to the query language which allows queries like this to work now:
-+
-+```
-+sum by (job) (count_over_time({namespace=""tns""}[5m] |= ""level=error"") / count_over_time({namespace=""tns""}[5m]))
-+```
-+
-+* [1713](https://github.com/grafana/loki/pull/1713) **adityacs**: Log error message for invalid checksum
-+
-+In the event something went wrong with a stored chunk, rather than fail the query we ignore the chunk and return the rest.
-+
-+* [2066](https://github.com/grafana/loki/pull/2066) **slim-bean**: Promtail: metrics stage can also count line bytes
-+
-+This is a nice extension to a previous feature which let you add a metric to count log lines per stream, you can now count log bytes per stream.
-+
-+Check out [this example](docs/clients/promtail/configuration.md#counter) to configure this in your promtail pipelines.
-+
-+* [1935](https://github.com/grafana/loki/pull/1935) **cyriltovena**: Support stdin target via flag instead of automatic detection.
-+
-+Third times a charm! With 1.4.0 we allowed sending logs directly to promtail via stdin, with 1.4.1 we released a patch for this feature which wasn't detecting stdin correctly on some operating systems.
-+Unfortunately after a few more bug reports it seems this change caused some more undesired side effects so we decided to not try to autodetect stdin at all, instead now you must pass the `--stdin` flag if you want Promtail to listen for logs on stdin.
-+
-+* [2076](https://github.com/grafana/loki/pull/2076) **cyriltovena**: Allows to pass inlined pipeline stages to the docker driver.
-+* [1906](https://github.com/grafana/loki/pull/1906) **cyriltovena**: Add no-file and keep-file log option for docker driver.
-+
-+The docker logging driver received a couple very nice updates, it's always been challenging to configure pipeline stages for the docker driver, with the first PR there are now a few easier ways to do this!
-+In the second PR we added config options to control keeping any log files on the host when using the docker logging driver, allowing you to run with no disk access if you would like, as well as allowing you to control keeping log files available after container restarts.
-+
-+** [1864](https://github.com/grafana/loki/pull/1864) **cyriltovena**: Sign helm package with GPG.
-+
-+We now GPG sign helm packages!
-+
-+### All Changes
-+
-+#### Loki
-+
-+* [2097](https://github.com/grafana/loki/pull/2097) **owen-d**: simplifies/updates some of our configuration examples
-+* [2095](https://github.com/grafana/loki/pull/2095) **cyriltovena**: Adds backtick for the quoted string token lexer.
-+* [2093](https://github.com/grafana/loki/pull/2093) **cyriltovena**: Fixes unit in stats request log.
-+* [2088](https://github.com/grafana/loki/pull/2088) **slim-bean**: Loki: allow no encoding/compression on chunks
-+* [2078](https://github.com/grafana/loki/pull/2078) **owen-d**: removes yolostring
-+* [2073](https://github.com/grafana/loki/pull/2073) **slim-bean**: Loki: Allow configuring query_store_max_look_back_period when running a filesystem store and boltdb-shipper
-+* [2064](https://github.com/grafana/loki/pull/2064) **cyriltovena**: Reverse entry iterator pool
-+* [2059](https://github.com/grafana/loki/pull/2059) **cyriltovena**: Recover from panic in http and grpc handlers.
-+* [2058](https://github.com/grafana/loki/pull/2058) **cyriltovena**: Fix a bug in range vector skipping data.
-+* [2055](https://github.com/grafana/loki/pull/2055) **aknuds1**: Chore: Fix spelling of per second in code
-+* [2046](https://github.com/grafana/loki/pull/2046) **gouthamve**: Fix bug in logql parsing that leads to crash.
-+* [2050](https://github.com/grafana/loki/pull/2050) **aknuds1**: Chore: Correct typo ""per seconds""
-+* [2034](https://github.com/grafana/loki/pull/2034) **sandeepsukhani**: some metrics for measuring performance and failures in boltdb shipper
-+* [2031](https://github.com/grafana/loki/pull/2031) **cyriltovena**: Improve protobuf serialization
-+* [2030](https://github.com/grafana/loki/pull/2030) **adityacs**: Update loki to cortex master
-+* [2023](https://github.com/grafana/loki/pull/2023) **cyriltovena**: Support post requests in the frontend queryrange handler.
-+* [2021](https://github.com/grafana/loki/pull/2021) **slim-bean**: Loki: refactor validation and improve error messages
-+* [2019](https://github.com/grafana/loki/pull/2019) **slim-bean**: make `loki_ingester_memory_streams` Gauge per tenant.
-+* [2012](https://github.com/grafana/loki/pull/2012) **slim-bean**: Loki: Improve logging and add metrics to streams dropped by stream limit
-+* [2010](https://github.com/grafana/loki/pull/2010) **cyriltovena**: Update lz4 library to latest to ensure deterministic output.
-+* [2001](https://github.com/grafana/loki/pull/2001) **sandeepsukhani**: table client for boltdb shipper to enforce retention
-+* [1995](https://github.com/grafana/loki/pull/1995) **sandeepsukhani**: make boltdb shipper singleton and some other minor refactoring
-+* [1987](https://github.com/grafana/loki/pull/1987) **slim-bean**: Loki: Add a missing method to facade which is called by the metrics storage client in cortex
-+* [1982](https://github.com/grafana/loki/pull/1982) **cyriltovena**: Update cortex to latest.
-+* [1977](https://github.com/grafana/loki/pull/1977) **cyriltovena**: Ensure trace propagation in our logs.
-+* [1976](https://github.com/grafana/loki/pull/1976) **slim-bean**: incorporate some better defaults into table-manager configs
-+* [1975](https://github.com/grafana/loki/pull/1975) **slim-bean**: Update cortex vendoring to latest master
-+* [1970](https://github.com/grafana/loki/pull/1970) **cyriltovena**: Allow to aggregate binary operations.
-+* [1965](https://github.com/grafana/loki/pull/1965) **slim-bean**: Loki: Adds an `interval` paramater to query_range queries allowing a sampling of events to be returned based on the provided interval
-+* [1964](https://github.com/grafana/loki/pull/1964) **owen-d**: chunk bounds metric now records 8h range in 1h increments
-+* [1963](https://github.com/grafana/loki/pull/1963) **cyriltovena**: Improve the local config to work locally and inside docker.
-+* [1961](https://github.com/grafana/loki/pull/1961) **jpmcb**: [Bug] Workaround for broken etcd gomod import
-+* [1958](https://github.com/grafana/loki/pull/1958) **owen-d**: chunk lifespan histogram
-+* [1956](https://github.com/grafana/loki/pull/1956) **sandeepsukhani**: update cortex to latest master
-+* [1953](https://github.com/grafana/loki/pull/1953) **jpmcb**: Go mod: explicit golang.org/x/net replace
-+* [1950](https://github.com/grafana/loki/pull/1950) **cyriltovena**: Fixes case handling in regex simplification.
-+* [1949](https://github.com/grafana/loki/pull/1949) **SerialVelocity**: [Loki]: Cleanup dockerfile
-+* [1946](https://github.com/grafana/loki/pull/1946) **slim-bean**: Loki Update the cut block size counter when creating a memchunk from byte slice
-+* [1939](https://github.com/grafana/loki/pull/1939) **owen-d**: adds config validation, similar to cortex
-+* [1916](https://github.com/grafana/loki/pull/1916) **cyriltovena**: Add cap_net_bind_service linux capabilities to Loki.
-+* [1914](https://github.com/grafana/loki/pull/1914) **owen-d**: only fetches one chunk per series in /series
-+* [1875](https://github.com/grafana/loki/pull/1875) **owen-d**: support `match[]` encoding
-+* [1869](https://github.com/grafana/loki/pull/1869) **pstibrany**: Update Cortex to latest master
-+* [1846](https://github.com/grafana/loki/pull/1846) **owen-d**: Sharding optimizations I: AST mapping
-+* [1838](https://github.com/grafana/loki/pull/1838) **cyriltovena**: Move default port for Loki to 3100 everywhere.
-+* [1837](https://github.com/grafana/loki/pull/1837) **sandeepsukhani**: flush boltdb to object store
-+* [1834](https://github.com/grafana/loki/pull/1834) **Mario-Hofstaetter**: Loki/Change local storage directory to /loki/ and fix permissions (#1833)
-+* [1819](https://github.com/grafana/loki/pull/1819) **cyriltovena**: Adds a counter for total flushed chunks per reason.
-+* [1816](https://github.com/grafana/loki/pull/1816) **sdojjy**: loki can not be started with loki-local-config.yaml
-+* [1810](https://github.com/grafana/loki/pull/1810) **cyriltovena**: Optimize empty filter queries.
-+* [1809](https://github.com/grafana/loki/pull/1809) **cyriltovena**: Test stats memchunk
-+* [1804](https://github.com/grafana/loki/pull/1804) **pstibrany**: Convert Loki modules to services
-+* [1799](https://github.com/grafana/loki/pull/1799) **pstibrany**: loki: update Cortex to master
-+* [1798](https://github.com/grafana/loki/pull/1798) **adityacs**: Support configurable maximum of the limits parameter
-+* [1713](https://github.com/grafana/loki/pull/1713) **adityacs**: Log error message for invalid checksum
-+* [1706](https://github.com/grafana/loki/pull/1706) **cyriltovena**: Non-root user docker image for Loki.
-+
-+#### Logcli
-+* [2027](https://github.com/grafana/loki/pull/2027) **pstibrany**: logcli: Query needs to be stored into url.RawQuery, and not url.Path
-+* [2000](https://github.com/grafana/loki/pull/2000) **cyriltovena**: Improve URL building in the logcli to strip trailing /.
-+* [1922](https://github.com/grafana/loki/pull/1922) **bavarianbidi**: logcli: org-id/tls-skip-verify set via env var
-+* [1861](https://github.com/grafana/loki/pull/1861) **yeya24**: Support series API in logcli
-+* [1850](https://github.com/grafana/loki/pull/1850) **chrischdi**: BugFix: Fix logcli client to use OrgID in LiveTail
-+* [1814](https://github.com/grafana/loki/pull/1814) **cyriltovena**: Logcli remote storage.
-+* [1712](https://github.com/grafana/loki/pull/1712) **rfratto**: clarify logcli commands and output
-+
-+#### Promtail
-+* [2069](https://github.com/grafana/loki/pull/2069) **slim-bean**: Promtail: log at debug level when nothing matches the specified path for a file target
-+* [2066](https://github.com/grafana/loki/pull/2066) **slim-bean**: Promtail: metrics stage can also count line bytes
-+* [2049](https://github.com/grafana/loki/pull/2049) **adityacs**: Fix promtail client default values
-+* [2075](https://github.com/grafana/loki/pull/2075) **cyriltovena**: Fixes a panic in dry-run when using external labels.
-+* [2026](https://github.com/grafana/loki/pull/2026) **adityacs**: Targets not required in promtail config
-+* [2004](https://github.com/grafana/loki/pull/2004) **cyriltovena**: Adds config to disable HTTP and GRPC server in Promtail.
-+* [1935](https://github.com/grafana/loki/pull/1935) **cyriltovena**: Support stdin target via flag instead of automatic detection.
-+* [1920](https://github.com/grafana/loki/pull/1920) **alexanderGalushka**: feat: tms readiness check bypass implementation
-+* [1894](https://github.com/grafana/loki/pull/1894) **cyriltovena**: Fixes possible panic in json pipeline stage.
-+* [1865](https://github.com/grafana/loki/pull/1865) **adityacs**: Fix flaky promtail test
-+* [1815](https://github.com/grafana/loki/pull/1815) **adityacs**: Log error message when source does not exist in extracted values
-+* [1627](https://github.com/grafana/loki/pull/1627) **rfratto**: Proposal: Promtail Push API
-+
-+#### Docker Driver
-+* [2076](https://github.com/grafana/loki/pull/2076) **cyriltovena**: Allows to pass inlined pipeline stages to the docker driver.
-+* [2054](https://github.com/grafana/loki/pull/2054) **bkmit**: Docker driver: Allow to provision external pipeline files to plugin
-+* [1906](https://github.com/grafana/loki/pull/1906) **cyriltovena**: Add no-file and keep-file log option for docker driver.
-+* [1903](https://github.com/grafana/loki/pull/1903) **cyriltovena**: Log docker driver config map.
-+
-+#### FluentD
-+* [2074](https://github.com/grafana/loki/pull/2074) **osela**: fluentd plugin: support placeholders in tenant field
-+* [2006](https://github.com/grafana/loki/pull/2006) **Skeen**: fluent-plugin-loki: Restructuring and CI
-+* [1909](https://github.com/grafana/loki/pull/1909) **jgehrcke**: fluentd loki plugin README: add note about labels
-+* [1853](https://github.com/grafana/loki/pull/1853) **wardbekker**: bump gem version
-+* [1811](https://github.com/grafana/loki/pull/1811) **JamesJJ**: Error handling: Show data stream at ""debug"" level, not ""warn""
-+
-+#### Fluent Bit
-+* [2040](https://github.com/grafana/loki/pull/2040) **avii-ridge**: Add extraOutputs variable to support multiple outputs for fluent-bit
-+* [1915](https://github.com/grafana/loki/pull/1915) **DirtyCajunRice**: Fix fluent-bit metrics
-+* [1890](https://github.com/grafana/loki/pull/1890) **dottedmag**: fluentbit: JSON encoding: avoid base64 encoding of []byte inside other slices
-+* [1791](https://github.com/grafana/loki/pull/1791) **cyriltovena**: Improve fluentbit logfmt.
-+
-+#### Ksonnet
-+* [1980](https://github.com/grafana/loki/pull/1980) **cyriltovena**: Log slow query from the frontend by default in ksonnet.
-+
-+##### Mixins
-+* [2080](https://github.com/grafana/loki/pull/2080) **beorn7**: mixin: Accept suffixes to pod name in instance labels
-+* [2044](https://github.com/grafana/loki/pull/2044) **slim-bean**: Dashboards: fixes the cpu usage graphs
-+* [2043](https://github.com/grafana/loki/pull/2043) **joe-elliott**: Swapped to container restarts over terminated reasons
-+* [2041](https://github.com/grafana/loki/pull/2041) **slim-bean**: Dashboard: Loki Operational improvements
-+* [1934](https://github.com/grafana/loki/pull/1934) **tomwilkie**: Put loki-mixin and promtail-mixin dashboards in a folder.
-+* [1913](https://github.com/grafana/loki/pull/1913) **tomwilkie**: s/dashboards/grafanaDashboards.
-+
-+#### Helm
-+* [2038](https://github.com/grafana/loki/pull/2038) **oke-py**: Docs: update Loki Helm Chart document to support Helm 3
-+* [2015](https://github.com/grafana/loki/pull/2015) **etashsingh**: Change image tag from 1.4.1 to 1.4.0 in Helm chart
-+* [1981](https://github.com/grafana/loki/pull/1981) **sshah90**: added extraCommandlineArgs in values file
-+* [1967](https://github.com/grafana/loki/pull/1967) **rdxmb**: helm chart: add missing line feed
-+* [1898](https://github.com/grafana/loki/pull/1898) **stefanandres**: [helm loki/promtail] make UpdateStrategy configurable
-+* [1871](https://github.com/grafana/loki/pull/1871) **stefanandres**: [helm loki/promtail] Add systemd-journald example with extraMount, extraVolumeMount
-+* [1864](https://github.com/grafana/loki/pull/1864) **cyriltovena**: Sign helm package with GPG.
-+* [1825](https://github.com/grafana/loki/pull/1825) **polar3130**: Helm/loki-stack: refresh default grafana.image.tag to 6.7.0
-+* [1817](https://github.com/grafana/loki/pull/1817) **bclermont**: Helm chart: Prevent prometheus to scrape both services
-+
-+#### Loki Canary
-+* [1891](https://github.com/grafana/loki/pull/1891) **joe-elliott**: Addition of a `/suspend` endpoint to Loki Canary
-+
-+#### Docs
-+* [2056](https://github.com/grafana/loki/pull/2056) **cyriltovena**: Update api.md
-+* [2014](https://github.com/grafana/loki/pull/2014) **jsoref**: Spelling
-+* [1999](https://github.com/grafana/loki/pull/1999) **oddlittlebird**: Docs: Added labels content
-+* [1974](https://github.com/grafana/loki/pull/1974) **rfratto**: fix stores for chunk and index in documentation for period_config
-+* [1966](https://github.com/grafana/loki/pull/1966) **oddlittlebird**: Docs: Update docker.md
-+* [1951](https://github.com/grafana/loki/pull/1951) **cstyan**: Move build from source instructions to root readme.
-+* [1945](https://github.com/grafana/loki/pull/1945) **FlorianLudwig**: docs: version pin the docker image in docker-compose
-+* [1925](https://github.com/grafana/loki/pull/1925) **wardbekker**: Clarified that the api push path needs to be specified.
-+* [1905](https://github.com/grafana/loki/pull/1905) **sshah90**: updating typo for end time parameter in api docs
-+* [1888](https://github.com/grafana/loki/pull/1888) **slim-bean**: docs: cleaning up the comments for the cache_config, default_validity option
-+* [1887](https://github.com/grafana/loki/pull/1887) **slim-bean**: docs: Adding a config change in release 1.4 upgrade doc, updating readme with new doc links
-+* [1881](https://github.com/grafana/loki/pull/1881) **cyriltovena**: Add precision about the range notation for LogQL.
-+* [1879](https://github.com/grafana/loki/pull/1879) **slim-bean**: docs: update promtail docs for backoff
-+* [1873](https://github.com/grafana/loki/pull/1873) **owen-d**: documents frontend worker
-+* [1870](https://github.com/grafana/loki/pull/1870) **ushuz**: Docs: Keep plugin install command example in one line
-+* [1856](https://github.com/grafana/loki/pull/1856) **slim-bean**: docs: tweak the doc section of the readme a little
-+* [1852](https://github.com/grafana/loki/pull/1852) **slim-bean**: docs: clean up schema recommendations
-+* [1843](https://github.com/grafana/loki/pull/1843) **vishesh92**: Docs: Update configuration docs for redis
-+
-+#### Build
-+* [2042](https://github.com/grafana/loki/pull/2042) **rfratto**: Fix drone
-+* [2009](https://github.com/grafana/loki/pull/2009) **cyriltovena**: Adds :delegated flags to speed up build experience on MacOS.
-+* [1942](https://github.com/grafana/loki/pull/1942) **owen-d**: delete tag script filters by prefix instead of substring
-+* [1918](https://github.com/grafana/loki/pull/1918) **slim-bean**: build: This Dockerfile is a remnant from a long time ago, not needed.
-+* [1911](https://github.com/grafana/loki/pull/1911) **slim-bean**: build: push images for `k` branches
-+* [1849](https://github.com/grafana/loki/pull/1849) **cyriltovena**: Pin helm version in circle-ci helm testing workflow.
-+
-+
- ## 1.4.1 (2020-04-06)
-
- We realized after the release last week that piping data into promtail was not working on Linux or Windows, this should fix this issue for both platforms:
-diff --git a/README.md b/README.md
-index 2779ada7f55a0..c713ee8c2f3ec 100644
---- a/README.md
-+++ b/README.md
-@@ -29,11 +29,9 @@ Loki differs from Prometheus by focusing on logs instead of metrics, and deliver
-
- ## Getting started
-
--* [Installing Loki](https://github.com/grafana/loki/tree/v1.4.1/docs/installation/README.md)
--* [Installing
--Promtail](https://github.com/grafana/loki/tree/v1.4.1/docs/clients/promtail/installation.md)
--* [Getting
--Started Guide](https://github.com/grafana/loki/tree/v1.4.1/docs/getting-started/README.md)
-+* [Installing Loki](https://github.com/grafana/loki/tree/v1.5.0/docs/installation/README.md)
-+* [Installing Promtail](https://github.com/grafana/loki/tree/v1.5.0/docs/clients/promtail/installation.md)
-+* [Getting Started Guide](https://github.com/grafana/loki/tree/v1.5.0/docs/getting-started/README.md)
-
- ## Upgrading
-
-@@ -42,6 +40,7 @@ Started Guide](https://github.com/grafana/loki/tree/v1.4.1/docs/getting-started/
- ### Documentation
-
- * [master](./docs/README.md)
-+* [v1.5.0](https://github.com/grafana/loki/tree/v1.5.0/docs/README.md)
- * [v1.4.1](https://github.com/grafana/loki/tree/v1.4.1/docs/README.md)
- * [v1.4.0](https://github.com/grafana/loki/tree/v1.4.0/docs/README.md)
- * [v1.3.0](https://github.com/grafana/loki/tree/v1.3.0/docs/README.md)
-@@ -49,18 +48,18 @@ Started Guide](https://github.com/grafana/loki/tree/v1.4.1/docs/getting-started/
- * [v1.1.0](https://github.com/grafana/loki/tree/v1.1.0/docs/README.md)
- * [v1.0.0](https://github.com/grafana/loki/tree/v1.0.0/docs/README.md)
-
--Commonly used sections (from the latest release v1.4.1):
-+Commonly used sections (from the latest release v1.5.0):
-
--- [API documentation](https://github.com/grafana/loki/tree/v1.4.1/docs/api.md) for alternative ways of getting logs into Loki.
-+- [API documentation](https://github.com/grafana/loki/tree/v1.5.0/docs/api.md) for alternative ways of getting logs into Loki.
- - [Labels](https://github.com/grafana/loki/blob/master/docs/getting-started/labels.md)
--- [Operations](https://github.com/grafana/loki/tree/v1.4.1/docs/operations) for important aspects of running Loki.
--- [Promtail](https://github.com/grafana/loki/tree/v1.4.1/docs/clients/promtail) is an agent which can tail your log files and push them to Loki.
--- [Pipelines](https://github.com/grafana/loki/tree/v1.4.1/docs/clients/promtail/pipelines.md) for detailed log processing pipeline documentation
--- [Docker Logging Driver](https://github.com/grafana/loki/tree/v1.4.1/docs/clients/docker-driver) is a docker plugin to send logs directly to Loki from Docker containers.
--- [LogCLI](https://github.com/grafana/loki/tree/v1.4.1/docs/getting-started/logcli.md) on how to query your logs without Grafana.
--- [Loki Canary](https://github.com/grafana/loki/tree/v1.4.1/docs/operations/loki-canary.md) for monitoring your Loki installation for missing logs.
--- [Troubleshooting](https://github.com/grafana/loki/tree/v1.4.1/docs/getting-started/troubleshooting.md) for help around frequent error messages.
--- [Loki in Grafana](https://github.com/grafana/loki/tree/v1.4.1/docs/getting-started/grafana.md) for how to set up a Loki datasource in Grafana and query your logs.
-+- [Operations](https://github.com/grafana/loki/tree/v1.5.0/docs/operations) for important aspects of running Loki.
-+- [Promtail](https://github.com/grafana/loki/tree/v1.5.0/docs/clients/promtail) is an agent which can tail your log files and push them to Loki.
-+- [Pipelines](https://github.com/grafana/loki/tree/v1.5.0/docs/clients/promtail/pipelines.md) for detailed log processing pipeline documentation
-+- [Docker Logging Driver](https://github.com/grafana/loki/tree/v1.5.0/docs/clients/docker-driver) is a docker plugin to send logs directly to Loki from Docker containers.
-+- [LogCLI](https://github.com/grafana/loki/tree/v1.5.0/docs/getting-started/logcli.md) on how to query your logs without Grafana.
-+- [Loki Canary](https://github.com/grafana/loki/tree/v1.5.0/docs/operations/loki-canary.md) for monitoring your Loki installation for missing logs.
-+- [Troubleshooting](https://github.com/grafana/loki/tree/v1.5.0/docs/getting-started/troubleshooting.md) for help around frequent error messages.
-+- [Loki in Grafana](https://github.com/grafana/loki/tree/v1.5.0/docs/getting-started/grafana.md) for how to set up a Loki datasource in Grafana and query your logs.
-
- ## Getting Help
-
-diff --git a/docs/clients/promtail/installation.md b/docs/clients/promtail/installation.md
-index 7dae8efd31659..191e3efa870b7 100644
---- a/docs/clients/promtail/installation.md
-+++ b/docs/clients/promtail/installation.md
-@@ -12,7 +12,7 @@ Every release includes binaries for Promtail which can be found on the
-
- ```bash
- # modify tag to most recent version
--$ docker pull grafana/promtail:1.4.1
-+$ docker pull grafana/promtail:1.5.0
- ```
-
- ## Helm
-diff --git a/docs/installation/docker.md b/docs/installation/docker.md
-index 82be1cec37273..850ae4b3eedd5 100644
---- a/docs/installation/docker.md
-+++ b/docs/installation/docker.md
-@@ -15,10 +15,10 @@ For production, we recommend Tanka or Helm.
- Copy and paste the commands below into your command line.
-
- ```bash
--wget https://raw.githubusercontent.com/grafana/loki/v1.4.1/cmd/loki/loki-local-config.yaml -O loki-config.yaml
--docker run -v $(pwd):/mnt/config -p 3100:3100 grafana/loki:1.4.1 -config.file=/mnt/config/loki-config.yaml
--wget https://raw.githubusercontent.com/grafana/loki/v1.4.1/cmd/promtail/promtail-docker-config.yaml -O promtail-config.yaml
--docker run -v $(pwd):/mnt/config -v /var/log:/var/log grafana/promtail:1.4.1 -config.file=/mnt/config/promtail-config.yaml
-+wget https://raw.githubusercontent.com/grafana/loki/v1.5.0/cmd/loki/loki-local-config.yaml -O loki-config.yaml
-+docker run -v $(pwd):/mnt/config -p 3100:3100 grafana/loki:1.5.0 -config.file=/mnt/config/loki-config.yaml
-+wget https://raw.githubusercontent.com/grafana/loki/v1.5.0/cmd/promtail/promtail-docker-config.yaml -O promtail-config.yaml
-+docker run -v $(pwd):/mnt/config -v /var/log:/var/log grafana/promtail:1.5.0 -config.file=/mnt/config/promtail-config.yaml
- ```
-
- When finished, loki-config.yaml and promtail-config.yaml are downloaded in the directory you chose. Docker containers are running Loki and Promtail using those config files.
-@@ -31,10 +31,10 @@ Copy and paste the commands below into your terminal. Note that you will need to
-
- ```bash
- cd """"
--wget https://raw.githubusercontent.com/grafana/loki/v1.4.1/cmd/loki/loki-local-config.yaml -O loki-config.yaml
--docker run -v :/mnt/config -p 3100:3100 grafana/loki:1.4.1 --config.file=/mnt/config/loki-config.yaml
--wget https://raw.githubusercontent.com/grafana/loki/v1.4.1/cmd/promtail/promtail-docker-config.yaml -O promtail-config.yaml
--docker run -v :/mnt/config -v /var/log:/var/log grafana/promtail:1.4.1 --config.file=/mnt/config/promtail-config.yaml
-+wget https://raw.githubusercontent.com/grafana/loki/v1.5.0/cmd/loki/loki-local-config.yaml -O loki-config.yaml
-+docker run -v :/mnt/config -p 3100:3100 grafana/loki:1.5.0 --config.file=/mnt/config/loki-config.yaml
-+wget https://raw.githubusercontent.com/grafana/loki/v1.5.0/cmd/promtail/promtail-docker-config.yaml -O promtail-config.yaml
-+docker run -v :/mnt/config -v /var/log:/var/log grafana/promtail:1.5.0 --config.file=/mnt/config/promtail-config.yaml
- ```
-
- When finished, loki-config.yaml and promtail-config.yaml are downloaded in the directory you chose. Docker containers are running Loki and Promtail using those config files.
-@@ -44,6 +44,6 @@ Navigate to http://localhost:3100/metrics to view the output.
- ## Install with Docker Compose
-
- ```bash
--$ wget https://raw.githubusercontent.com/grafana/loki/v1.4.1/production/docker-compose.yaml -O docker-compose.yaml
-+$ wget https://raw.githubusercontent.com/grafana/loki/v1.5.0/production/docker-compose.yaml -O docker-compose.yaml
- $ docker-compose -f docker-compose.yaml up
- ```
-diff --git a/docs/operations/loki-canary.md b/docs/operations/loki-canary.md
-index 7aa31373b6ea1..5709b2f756d77 100644
---- a/docs/operations/loki-canary.md
-+++ b/docs/operations/loki-canary.md
-@@ -67,7 +67,7 @@ Loki Canary is also provided as a Docker container image:
-
- ```bash
- # change tag to the most recent release
--$ docker pull grafana/loki-canary:1.4.1
-+$ docker pull grafana/loki-canary:1.5.0
- ```
-
- ### Kubernetes
-diff --git a/docs/operations/upgrade.md b/docs/operations/upgrade.md
-index 9cf4deb10a218..bbd142fd41e05 100644
---- a/docs/operations/upgrade.md
-+++ b/docs/operations/upgrade.md
-@@ -8,6 +8,10 @@ On this page we will document any upgrade issues/gotchas/considerations we are a
-
- ## 1.5.0
-
-+Note: The required upgrade path outlined for version 1.4.0 below is still true for moving to 1.5.0 from any release older than 1.4.0 (e.g. 1.3.0->1.5.0 needs to also look at the 1.4.0 upgrade requirements).
-+
-+### Breaking config changes!
-+
- Loki 1.5.0 vendors Cortex v1.0.0 (congratulations!), which has a [massive list of changes](https://cortexmetrics.io/docs/changelog/#1-0-0-2020-04-02).
-
- While changes in the command line flags affect Loki as well, we usually recommend people to use configuration file instead.
-@@ -16,6 +20,92 @@ Cortex has done lot of cleanup in the configuration files, and you are strongly
-
- Following fields were removed from YAML configuration completely: `claim_on_rollout` (always true), `normalise_tokens` (always true).
-
-+#### Test Your Config
-+
-+To see if your config needs to change, one way to quickly test is to download a 1.5.0 (or newer) binary from the [release page](https://github.com/grafana/loki/releases/tag/v1.5.0)
-+
-+Then run the binary providing your config file `./loki-linux-amd64 -config.file=myconfig.yaml`
-+
-+If there are configs which are no longer valid you will see errors immediately:
-+
-+```shell
-+./loki-linux-amd64 -config.file=loki-local-config.yaml
-+failed parsing config: loki-local-config.yaml: yaml: unmarshal errors:
-+ line 35: field dynamodbconfig not found in type aws.StorageConfig
-+```
-+
-+Referencing the [list of diffs](https://cortexmetrics.io/docs/changelog/#config-file-breaking-changes) I can see this config changed:
-+
-+```diff
-+- dynamodbconfig:
-++ dynamodb:
-+```
-+
-+Also several other AWS related configs changed and would need to udpate those as well.
-+
-+
-+### Loki Docker Image User and File Location Changes
-+
-+To improve security concerns, in 1.5.0 the Docker container no longer runs the loki process as `root` and instead the process runs as user `loki` with UID `10001` and GID `10001`
-+
-+This may affect people in a couple ways:
-+
-+#### Loki Port
-+
-+If you are running Loki with a config that opens a port number above 1000 (which is the default, 3100 for HTTP and 9095 for GRPC) everything should work fine in regards to ports.
-+
-+If you are running Loki with a config that opens a port number less than 1000 Linux normally requires root permissions to do this, HOWEVER in the Docker container we run `setcap cap_net_bind_service=+ep /usr/bin/loki`
-+
-+This capability lets the loki process bind to a port less than 1000 when run as a non root user.
-+
-+Not every environment will allow this capability however, it's possible to restrict this capability in linux. If this restriction is in place, you will be forced to run Loki with a config that has HTTP and GRPC ports above 1000.
-+
-+#### Filesystem
-+
-+**Please note the location Loki is looking for files with the provided config in the docker image has changed**
-+
-+In 1.4.0 and earlier the included config file in the docker container was using directories:
-+
-+```
-+/tmp/loki/index
-+/tmp/loki/chunks
-+```
-+
-+In 1.5.0 this has changed:
-+
-+```
-+/loki/index
-+/loki/chunks
-+```
-+
-+This will mostly affect anyone using docker-compose or docker to run Loki and are specifying a volume to persist storage.
-+
-+**There are two concerns to track here, one is the correct ownership of the files and the other is making sure your mounts updated to the new location.**
-+
-+One possible upgrade path would look like this:
-+
-+If I were running Loki with this command `docker run -d --name=loki --mount source=loki-data,target=/tmp/loki -p 3100:3100 grafana/loki:1.4.0`
-+
-+This would mount a docker volume named `loki-data` to the `/temp/loki` folder which is where Loki will persist the `index` and `chunks` folder in 1.4.0
-+
-+To move to 1.5.0 I can do the following (please note that your container names and paths and volumes etc may be different):
-+
-+```
-+docker stop loki
-+docker rm loki
-+docker run --rm --name=""loki-perm"" -it --mount source=loki-data,target=/mnt ubuntu /bin/bash
-+cd /mnt
-+chown -R 10001:10001 ./*
-+exit
-+docker run -d --name=loki --mount source=loki-data,target=/loki -p 3100:3100 grafana/loki:1.5.0
-+```
-+
-+Notice the change in the `target=/loki` for 1.5.0 to the new data directory location specified in the [included Loki config file](../../cmd/loki/loki-docker-config.yaml).
-+
-+The intermediate step of using an ubuntu image to change the ownership of the Loki files to the new user might not be necessary if you can easily access these files to run the `chown` command directly.
-+That is if you have access to `/var/lib/docker/volumes` or if you mounted to a different local filesystem directory, you can change the ownership directly without using a container.
-+
-+
- ## 1.4.0
-
- Loki 1.4.0 vendors Cortex v0.7.0-rc.0 which contains [several breaking config changes](https://github.com/cortexproject/cortex/blob/v0.7.0-rc.0/CHANGELOG.md).
-@@ -84,4 +174,3 @@ If you attempt to add a v1.4.0 ingester to a ring created by Loki v1.2.0 or olde
- This will result in distributors failing to write and a general ingestion failure for the system.
-
- If this happens to you, you will want to rollback your deployment immediately. You need to remove the v1.4.0 ingester from the ring ASAP, this should allow the existing ingesters to re-insert their tokens. You will also want to remove any v1.4.0 distributors as they will not understand the old ring either and will fail to send traffic.
--
-diff --git a/production/docker-compose.yaml b/production/docker-compose.yaml
-index 80edcd8651e52..b31014645673e 100644
---- a/production/docker-compose.yaml
-+++ b/production/docker-compose.yaml
-@@ -5,7 +5,7 @@ networks:
-
- services:
- loki:
-- image: grafana/loki:1.4.1
-+ image: grafana/loki:1.5.0
- ports:
- - ""3100:3100""
- command: -config.file=/etc/loki/local-config.yaml
-@@ -13,7 +13,7 @@ services:
- - loki
-
- promtail:
-- image: grafana/promtail:1.4.1
-+ image: grafana/promtail:1.5.0
- volumes:
- - /var/log:/var/log
- command: -config.file=/etc/promtail/docker-config.yaml",unknown,"Prep 1.5.0 release (#2098)
-
-* Updating the Changelog and Upgrade guide for 1.5.0 release.
-
-Signed-off-by: Ed Welch
-
-* Changing release number in all the docs
-
-Signed-off-by: Ed Welch "
-a2060efd6332464d3239ebce882e8d4a1fcd61e3,2019-09-26 18:33:56,polar3130,"Helm: Remove default value of storageClassName in loki/loki helm chart (#1058)
-
-* delete default name of storageClassName ""default""
-
-* bump loki stack",False,"diff --git a/production/helm/loki-stack/Chart.yaml b/production/helm/loki-stack/Chart.yaml
-index 687f2668abcde..ed11e9e5f2fca 100644
---- a/production/helm/loki-stack/Chart.yaml
-+++ b/production/helm/loki-stack/Chart.yaml
-@@ -1,5 +1,5 @@
- name: loki-stack
--version: 0.16.3
-+version: 0.16.4
- appVersion: v0.3.0
- kubeVersion: ""^1.10.0-0""
- description: ""Loki: like Prometheus, but for logs.""
-diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
-index f0de1c0f9b7cb..3936cc566ccaa 100644
---- a/production/helm/loki/Chart.yaml
-+++ b/production/helm/loki/Chart.yaml
-@@ -1,5 +1,5 @@
- name: loki
--version: 0.14.2
-+version: 0.14.3
- appVersion: v0.3.0
- kubeVersion: ""^1.10.0-0""
- description: ""Loki: like Prometheus, but for logs.""
-diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml
-index 826e5ca793fbb..c82c68e794214 100644
---- a/production/helm/loki/values.yaml
-+++ b/production/helm/loki/values.yaml
-@@ -96,7 +96,6 @@ persistence:
- accessModes:
- - ReadWriteOnce
- size: 10Gi
-- storageClassName: default
- annotations: {}
- # subPath: """"
- # existingClaim:",Helm,"Remove default value of storageClassName in loki/loki helm chart (#1058)
-
-* delete default name of storageClassName ""default""
-
-* bump loki stack"
-94d1550e9cb60d35a175d1e186acdb70670d57bf,2022-01-04 14:18:39,Karen Miller,Docs: improve Promtail installation prose (#5017),False,"diff --git a/docs/sources/clients/promtail/installation.md b/docs/sources/clients/promtail/installation.md
-index 380c867d92989..fb0fde141e68c 100644
---- a/docs/sources/clients/promtail/installation.md
-+++ b/docs/sources/clients/promtail/installation.md
-@@ -3,8 +3,8 @@ title: Installation
- ---
- # Install Promtail
-
--Promtail is distributed as a [binary](#binary), [Docker container](#docker), and
--[Helm chart](#helm).
-+Promtail is distributed as a binary, in a Docker container,
-+or there is a Helm chart to install it in a Kubernetes cluster.
-
- ## Binary
-
-@@ -20,8 +20,8 @@ docker pull grafana/promtail:2.0.0
-
- ## Helm
-
--Make sure that Helm is
--[installed](https://helm.sh/docs/using_helm/#installing-helm).
-+Make sure that Helm is installed.
-+See [Installing Helm](https://helm.sh/docs/intro/install/).
- Then you can add Grafana's chart repository to Helm:
-
- ```bash
-@@ -46,7 +46,7 @@ $ helm upgrade --install promtail grafana/promtail --set ""loki.serviceName=loki""
-
- A `DaemonSet` will deploy Promtail on every node within a Kubernetes cluster.
-
--The DaemonSet deployment is great to collect the logs of all containers within a
-+The DaemonSet deployment works well at collecting the logs of all containers within a
- cluster. It's the best solution for a single-tenant model.
-
- ```yaml",Docs,improve Promtail installation prose (#5017)
-4b4655300ccbd992816ac4013dbd79aef20bcd00,2024-11-15 19:22:36,renovate[bot],"fix(deps): update module golang.org/x/time to v0.8.0 (#14930)
-
-Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/go.mod b/go.mod
-index e190c8ade0070..dc03337ac00e1 100644
---- a/go.mod
-+++ b/go.mod
-@@ -102,7 +102,7 @@ require (
- golang.org/x/net v0.30.0
- golang.org/x/sync v0.9.0
- golang.org/x/sys v0.27.0
-- golang.org/x/time v0.7.0
-+ golang.org/x/time v0.8.0
- google.golang.org/api v0.203.0
- google.golang.org/grpc v1.68.0
- gopkg.in/alecthomas/kingpin.v2 v2.2.6
-diff --git a/go.sum b/go.sum
-index d83f44d81b152..0d498e901e62b 100644
---- a/go.sum
-+++ b/go.sum
-@@ -3288,8 +3288,8 @@ golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxb
- golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
- golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
- golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
--golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
--golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
-+golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
-+golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
- golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
- golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
- golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-diff --git a/vendor/modules.txt b/vendor/modules.txt
-index 244ed31f53211..0789e209ff2cf 100644
---- a/vendor/modules.txt
-+++ b/vendor/modules.txt
-@@ -1931,7 +1931,7 @@ golang.org/x/text/secure/bidirule
- golang.org/x/text/transform
- golang.org/x/text/unicode/bidi
- golang.org/x/text/unicode/norm
--# golang.org/x/time v0.7.0
-+# golang.org/x/time v0.8.0
- ## explicit; go 1.18
- golang.org/x/time/rate
- # golang.org/x/tools v0.23.0",fix,"update module golang.org/x/time to v0.8.0 (#14930)
-
-Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>"
-0ade92aef0658db021f4f52232925c99ff00824d,2018-11-19 23:39:59,Goutham Veeramachaneni,"Initial Mixin (#25)
-
-Signed-off-by: Goutham Veeramachaneni ",False,"diff --git a/.gitignore b/.gitignore
-index 4ff530d89f31f..ec955370bddc9 100644
---- a/.gitignore
-+++ b/.gitignore
-@@ -10,3 +10,4 @@ cmd/querier/querier
- cmd/promtail/promtail
- *.output
- /images/
-+mixin/vendor/
-diff --git a/mixin/alerts.libsonnet b/mixin/alerts.libsonnet
-new file mode 100644
-index 0000000000000..a43ac122415b0
---- /dev/null
-+++ b/mixin/alerts.libsonnet
-@@ -0,0 +1,120 @@
-+{
-+ prometheusAlerts+:: {
-+ groups+: [
-+ {
-+ name: 'logish_alerts',
-+ rules: [
-+ {
-+ alert: 'LogishRequestErrors',
-+ expr: |||
-+ 100 * sum(rate(logish_request_duration_seconds_count{status_code=~""5..""}[1m])) by (namespace, job, route)
-+ /
-+ sum(rate(logish_request_duration_seconds_count[1m])) by (namespace, job, route)
-+ > 10
-+ |||,
-+ 'for': '15m',
-+ labels: {
-+ severity: 'critical',
-+ },
-+ annotations: {
-+ message: |||
-+ {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf ""%.2f"" $value }}% errors.
-+ |||,
-+ },
-+ },
-+ {
-+ alert: 'LogishRequestLatency',
-+ expr: |||
-+ namespace_job_route:logish_request_duration_seconds:99quantile > 1
-+ |||,
-+ 'for': '15m',
-+ labels: {
-+ severity: 'critical',
-+ },
-+ annotations: {
-+ message: |||
-+ {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf ""%.2f"" $value }}s 99th percentile latency.
-+ |||,
-+ },
-+ },
-+ ],
-+ },
-+ {
-+ name: 'logish_frontend_alerts',
-+ rules: [
-+ {
-+ alert: 'FrontendRequestErrors',
-+ expr: |||
-+ 100 * sum(rate(cortex_gw_request_duration_seconds_count{status_code=~""5..""}[1m])) by (namespace, job, route)
-+ /
-+ sum(rate(cortex_gw_request_duration_seconds_count[1m])) by (namespace, job, route)
-+ > 10
-+ |||,
-+ 'for': '15m',
-+ labels: {
-+ severity: 'critical',
-+ },
-+ annotations: {
-+ message: |||
-+ {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf ""%.2f"" $value }}% errors.
-+ |||,
-+ },
-+ },
-+ {
-+ alert: 'FrontendRequestLatency',
-+ expr: |||
-+ namespace_job_route:cortex_gw_request_duration_seconds:99quantile > 1
-+ |||,
-+ 'for': '15m',
-+ labels: {
-+ severity: 'critical',
-+ },
-+ annotations: {
-+ message: |||
-+ {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf ""%.2f"" $value }}s 99th percentile latency.
-+ |||,
-+ },
-+ },
-+ ],
-+ },
-+ {
-+ name: 'promtail_alerts',
-+ rules: [
-+ {
-+ alert: 'PromtailRequestsErrors',
-+ expr: |||
-+ 100 * sum(rate(promtail_request_duration_seconds_count{status_code=~""5..|failed""}[1m])) by (namespace, job, route, instance)
-+ /
-+ sum(rate(promtail_request_duration_seconds_count[1m])) by (namespace, job, route, instance)
-+ > 10
-+ |||,
-+ 'for': '15m',
-+ labels: {
-+ severity: 'critical',
-+ },
-+ annotations: {
-+ message: |||
-+ {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf ""%.2f"" $value }}% errors.
-+ |||,
-+ },
-+ },
-+ {
-+ alert: 'PromtailRequestLatency',
-+ expr: |||
-+ job_status_code:promtail_request_duration_seconds:99quantile > 1
-+ |||,
-+ 'for': '15m',
-+ labels: {
-+ severity: 'critical',
-+ },
-+ annotations: {
-+ message: |||
-+ {{ $labels.job }} {{ $labels.route }} is experiencing {{ printf ""%.2f"" $value }}s 99th percentile latency.
-+ |||,
-+ },
-+ },
-+ ],
-+ },
-+ ],
-+ },
-+}
-\ No newline at end of file
-diff --git a/mixin/dashboards.libsonnet b/mixin/dashboards.libsonnet
-new file mode 100644
-index 0000000000000..fe20a63e9fcd4
---- /dev/null
-+++ b/mixin/dashboards.libsonnet
-@@ -0,0 +1,164 @@
-+local g = import 'grafana-builder/grafana.libsonnet';
-+
-+{
-+ dashboards+: {
-+ 'logish-writes.json':
-+ g.dashboard('Logish / Writes')
-+ .addTemplate('cluster', 'kube_pod_container_info{image=~"".*logish.*""}', 'cluster')
-+ .addTemplate('namespace', 'kube_pod_container_info{image=~"".*logish.*""}', 'namespace')
-+ .addRow(
-+ g.row('Frontend (cortex_gw)')
-+ .addPanel(
-+ g.panel('QPS') +
-+ g.qpsPanel('cortex_gw_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/cortex-gw"", route=""cortex-write""}')
-+ )
-+ .addPanel(
-+ g.panel('Latency') +
-+ g.latencyRecordingRulePanel('cortex_gw_request_duration_seconds', [g.selector.eq('job', '$namespace/cortex-gw'), g.selector.eq('route', 'cortex-write')], extra_selectors=[g.selector.eq('cluster', '$cluster')])
-+ )
-+ )
-+ .addRow(
-+ g.row('Distributor')
-+ .addPanel(
-+ g.panel('QPS') +
-+ g.qpsPanel('logish_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/distributor"", route=""api_prom_push""}')
-+ )
-+ .addPanel(
-+ g.panel('Latency') +
-+ g.latencyRecordingRulePanel('logish_request_duration_seconds', [g.selector.eq('job', '$namespace/distributor'), g.selector.eq('route', 'api_prom_push')], extra_selectors=[g.selector.eq('cluster', '$cluster')])
-+ )
-+ )
-+ .addRow(
-+ g.row('Ingester')
-+ .addPanel(
-+ g.panel('QPS') +
-+ g.qpsPanel('logish_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/ingester"",route=""/logproto.Pusher/Push""}')
-+ )
-+ .addPanel(
-+ g.panel('Latency') +
-+ g.latencyRecordingRulePanel('logish_request_duration_seconds', [g.selector.eq('job', '$namespace/ingester'), g.selector.eq('route', '/logproto.Pusher/Push')], extra_selectors=[g.selector.eq('cluster', '$cluster')])
-+ )
-+ ),
-+
-+ 'logish-reads.json':
-+ g.dashboard('logish / Reads')
-+ .addTemplate('cluster', 'kube_pod_container_info{image=~"".*logish.*""}', 'cluster')
-+ .addTemplate('namespace', 'kube_pod_container_info{image=~"".*logish.*""}', 'namespace')
-+ .addRow(
-+ g.row('Frontend (cortex_gw)')
-+ .addPanel(
-+ g.panel('QPS') +
-+ g.qpsPanel('cortex_gw_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/cortex-gw"", route=""cortex-read""}')
-+ )
-+ .addPanel(
-+ g.panel('Latency') +
-+ g.latencyRecordingRulePanel('cortex_gw_request_duration_seconds', [g.selector.eq('job', '$namespace/cortex-gw'), g.selector.eq('route', 'cortex-read')], extra_selectors=[g.selector.eq('cluster', '$cluster')])
-+ )
-+ )
-+ .addRow(
-+ g.row('Querier')
-+ .addPanel(
-+ g.panel('QPS') +
-+ g.qpsPanel('logish_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/querier""}')
-+ )
-+ .addPanel(
-+ g.panel('Latency') +
-+ g.latencyRecordingRulePanel('logish_request_duration_seconds', [g.selector.eq('job', '$namespace/querier')], extra_selectors=[g.selector.eq('cluster', '$cluster')])
-+ )
-+ )
-+ .addRow(
-+ g.row('Ingester')
-+ .addPanel(
-+ g.panel('QPS') +
-+ g.qpsPanel('logish_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/ingester"",route!~""/logproto.Pusher/Push|metrics|ready|traces""}')
-+ )
-+ .addPanel(
-+ g.panel('Latency') +
-+ g.latencyRecordingRulePanel('logish_request_duration_seconds', [g.selector.eq('job', '$namespace/ingester'), g.selector.nre('route', '/logproto.Pusher/Push|metrics|ready')], extra_selectors=[g.selector.eq('cluster', '$cluster')])
-+ )
-+ ),
-+
-+
-+ 'logish-chunks.json':
-+ g.dashboard('Logish / Chunks')
-+ .addTemplate('cluster', 'kube_pod_container_info{image=~"".*logish.*""}', 'cluster')
-+ .addTemplate('namespace', 'kube_pod_container_info{image=~"".*logish.*""}', 'namespace')
-+ .addRow(
-+ g.row('Active Series / Chunks')
-+ .addPanel(
-+ g.panel('Series') +
-+ g.queryPanel('sum(logish_ingester_memory_chunks{cluster=""$cluster"", job=""$namespace/ingester""})', 'series'),
-+ )
-+ .addPanel(
-+ g.panel('Chunks per series') +
-+ g.queryPanel('sum(logish_ingester_memory_chunks{cluster=""$cluster"", job=""$namespace/ingester""}) / sum(logish_ingester_memory_series{job=""$namespace/ingester""})', 'chunks'),
-+ )
-+ )
-+ .addRow(
-+ g.row('Flush Stats')
-+ .addPanel(
-+ g.panel('Utilization') +
-+ g.latencyPanel('logish_ingester_chunk_utilization', '{cluster=""$cluster"", job=""$namespace/ingester""}', multiplier='1') +
-+ { yaxes: g.yaxes('percentunit') },
-+ )
-+ .addPanel(
-+ g.panel('Age') +
-+ g.latencyPanel('logish_ingester_chunk_age_seconds', '{cluster=""$cluster"", job=""$namespace/ingester""}'),
-+ ),
-+ )
-+ .addRow(
-+ g.row('Flush Stats')
-+ .addPanel(
-+ g.panel('Size') +
-+ g.latencyPanel('logish_ingester_chunk_length', '{cluster=""$cluster"", job=""$namespace/ingester""}', multiplier='1') +
-+ { yaxes: g.yaxes('short') },
-+ )
-+ .addPanel(
-+ g.panel('Entries') +
-+ g.queryPanel('sum(rate(logish_chunk_store_index_entries_per_chunk_sum{cluster=""$cluster"", job=""$namespace/ingester""}[5m])) / sum(rate(logish_chunk_store_index_entries_per_chunk_count{cluster=""$cluster"", job=""$namespace/ingester""}[5m]))', 'entries'),
-+ ),
-+ )
-+ .addRow(
-+ g.row('Flush Stats')
-+ .addPanel(
-+ g.panel('Queue Length') +
-+ g.queryPanel('logish_ingester_flush_queue_length{cluster=""$cluster"", job=""$namespace/ingester""}', '{{instance}}'),
-+ )
-+ .addPanel(
-+ g.panel('Flush Rate') +
-+ g.qpsPanel('logish_ingester_chunk_age_seconds_count{cluster=""$cluster"", job=""$namespace/ingester""}'),
-+ ),
-+ ),
-+
-+ 'logish-frontend.json':
-+ g.dashboard('Logish / Frontend')
-+ .addTemplate('cluster', 'kube_pod_container_info{image=~"".*logish.*""}', 'cluster')
-+ .addTemplate('namespace', 'kube_pod_container_info{image=~"".*logish.*""}', 'namespace')
-+ .addRow(
-+ g.row('logish Reqs (cortex_gw)')
-+ .addPanel(
-+ g.panel('QPS') +
-+ g.qpsPanel('cortex_gw_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/cortex-gw""}')
-+ )
-+ .addPanel(
-+ g.panel('Latency') +
-+ g.latencyRecordingRulePanel('cortex_gw_request_duration_seconds', [g.selector.eq('job', '$namespace/cortex-gw')], extra_selectors=[g.selector.eq('cluster', '$cluster')])
-+ )
-+ ),
-+ 'promtail.json':
-+ g.dashboard('Logish / Promtail')
-+ .addTemplate('cluster', 'kube_pod_container_info{image=~"".*logish.*""}', 'cluster')
-+ .addTemplate('namespace', 'kube_pod_container_info{image=~"".*logish.*""}', 'namespace')
-+ .addRow(
-+ g.row('promtail Reqs')
-+ .addPanel(
-+ g.panel('QPS') +
-+ g.qpsPanel('promtail_request_duration_seconds_count{cluster=""$cluster"", job=""$namespace/promtail""}')
-+ )
-+ .addPanel(
-+ g.panel('Latency') +
-+ g.latencyRecordingRulePanel('promtail_request_duration_seconds', [g.selector.eq('job', '$namespace/promtail')], extra_selectors=[g.selector.eq('cluster', '$cluster')])
-+ )
-+ )
-+ },
-+}
-\ No newline at end of file
-diff --git a/mixin/jsonnetfile.json b/mixin/jsonnetfile.json
-new file mode 100644
-index 0000000000000..3a5d8ad2223e4
---- /dev/null
-+++ b/mixin/jsonnetfile.json
-@@ -0,0 +1,14 @@
-+{
-+ ""dependencies"": [
-+ {
-+ ""name"": ""grafana-builder"",
-+ ""source"": {
-+ ""git"": {
-+ ""remote"": ""https://github.com/kausalco/public"",
-+ ""subdir"": ""grafana-builder""
-+ }
-+ },
-+ ""version"": ""master""
-+ }
-+ ]
-+}
-\ No newline at end of file
-diff --git a/mixin/jsonnetfile.lock.json b/mixin/jsonnetfile.lock.json
-new file mode 100644
-index 0000000000000..933c3204b739c
---- /dev/null
-+++ b/mixin/jsonnetfile.lock.json
-@@ -0,0 +1,14 @@
-+{
-+ ""dependencies"": [
-+ {
-+ ""name"": ""grafana-builder"",
-+ ""source"": {
-+ ""git"": {
-+ ""remote"": ""https://github.com/kausalco/public"",
-+ ""subdir"": ""grafana-builder""
-+ }
-+ },
-+ ""version"": ""cab274f882aae97ad6add33590a3b149e6f8eeac""
-+ }
-+ ]
-+}
-\ No newline at end of file
-diff --git a/mixin/mixin.libsonnet b/mixin/mixin.libsonnet
-new file mode 100644
-index 0000000000000..a684acd630f0f
---- /dev/null
-+++ b/mixin/mixin.libsonnet
-@@ -0,0 +1,3 @@
-+(import 'dashboards.libsonnet') +
-+(import 'alerts.libsonnet') +
-+(import 'recording_rules.libsonnet')
-\ No newline at end of file
-diff --git a/mixin/recording_rules.libsonnet b/mixin/recording_rules.libsonnet
-new file mode 100644
-index 0000000000000..18404e2fe9749
---- /dev/null
-+++ b/mixin/recording_rules.libsonnet
-@@ -0,0 +1,43 @@
-+local histogramRules(metric, labels) =
-+ local vars = {
-+ metric: metric,
-+ labels_underscore: std.join('_', labels),
-+ labels_comma: std.join(', ', labels),
-+ };
-+ [
-+ {
-+ record: '%(labels_underscore)s:%(metric)s:99quantile' % vars,
-+ expr: 'histogram_quantile(0.99, sum(rate(%(metric)s_bucket[5m])) by (le, %(labels_comma)s))' % vars,
-+ },
-+ {
-+ record: '%(labels_underscore)s:%(metric)s:50quantile' % vars,
-+ expr: 'histogram_quantile(0.50, sum(rate(%(metric)s_bucket[5m])) by (le, %(labels_comma)s))' % vars,
-+ },
-+ {
-+ record: '%(labels_underscore)s:%(metric)s:avg' % vars,
-+ expr: 'sum(rate(%(metric)s_sum[5m])) by (%(labels_comma)s) / sum(rate(%(metric)s_count[5m])) by (%(labels_comma)s)' % vars,
-+ },
-+ ];
-+
-+{
-+ prometheus_rules+:: {
-+ groups+: [{
-+ name: 'logish_rules',
-+ rules:
-+ histogramRules('logish_request_duration_seconds', ['job']) +
-+ histogramRules('logish_request_duration_seconds', ['job', 'route']) +
-+ histogramRules('logish_request_duration_seconds', ['namespace', 'job', 'route']),
-+ }, {
-+ name: 'logish_frontend_rules',
-+ rules:
-+ histogramRules('cortex_gw_request_duration_seconds', ['job']) +
-+ histogramRules('cortex_gw_request_duration_seconds', ['job', 'route']) +
-+ histogramRules('cortex_gw_request_duration_seconds', ['namespace', 'job', 'route']),
-+ }, {
-+ name: 'promtail_rules',
-+ rules:
-+ histogramRules('promtail_request_duration_seconds', ['job']) +
-+ histogramRules('promtail_request_duration_seconds', ['job', 'status_code']),
-+ }],
-+ },
-+}
-\ No newline at end of file",unknown,"Initial Mixin (#25)
-
-Signed-off-by: Goutham Veeramachaneni "
-93a5a71e621f742b47fdb49d4f11ae6c0ead27b9,2022-09-05 14:51:26,李国忠,"[doc] logql: logql engine support exec vector(0) grama (#7044)
-
-
-
-**What this PR does / why we need it**:
-logql engine support exec vector(0) grama.
-new PR of :https://github.com/grafana/loki/pull/7023
-
-**Which issue(s) this PR fixes**:
-Fixes #6946
-
-**Special notes for your reviewer**:
-preview
-
-
-
-**Checklist**
-- [ ] Documentation added
-- [ ] Tests updated
-- [ ] Is this an important fix or new feature? Add an entry in the `CHANGELOG.md`.
-- [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/upgrading/_index.md`
-
-Co-authored-by: Danny Kopping ",False,"diff --git a/CHANGELOG.md b/CHANGELOG.md
-index 26938816d84e1..81d0d1328ebaa 100644
---- a/CHANGELOG.md
-+++ b/CHANGELOG.md
-@@ -5,6 +5,7 @@
- #### Loki
-
- ##### Enhancements
-+* [7023](https://github.com/grafana/loki/pull/7023) **liguozhong**: logql engine support exec `vector(0)` grammar.
- * [6983](https://github.com/grafana/loki/pull/6983) **slim-bean**: `__timestamp__` and `__line__` are now available in the logql `label_format` query stage.
- * [6821](https://github.com/grafana/loki/pull/6821) **kavirajk**: Introduce new cache type `embedded-cache` which is an in-process cache system that runs loki without the need for an external cache (like memcached, redis, etc). It can be run in two modes `distributed: false` (default, and same as old `fifocache`) and `distributed: true` which runs cache in distributed fashion sharding keys across peers if Loki is run in microservices or SSD mode.
- * [6691](https://github.com/grafana/loki/pull/6691) **dannykopping**: Update production-ready Loki cluster in docker-compose
-diff --git a/docs/sources/logql/metric_queries.md b/docs/sources/logql/metric_queries.md
-index a26f668b33f76..2a2d03bdcdf53 100644
---- a/docs/sources/logql/metric_queries.md
-+++ b/docs/sources/logql/metric_queries.md
-@@ -121,3 +121,20 @@ The `without` clause removes the listed labels from the resulting vector, keepin
- The `by` clause does the opposite, dropping labels that are not listed in the clause, even if their label values are identical between all elements of the vector.
-
- See [vector aggregation examples](../query_examples/#vector-aggregation-examples) for query examples that use vector aggregation expressions.
-+
-+## Functions
-+
-+LogQL supports a set of built-in functions.
-+
-+- `vector(s scalar)`: returns the scalar s as a vector with no labels. This behaves identically to the [Prometheus `vector()` function](https://prometheus.io/docs/prometheus/latest/querying/functions/#vector).
-+ `vector` is mainly used to return a value for a series that would otherwise return nothing; this can be useful when using LogQL to define an alert.
-+
-+Examples:
-+
-+- Count all the log lines within the last five minutes for the traefik namespace.
-+
-+ ```logql
-+ sum(count_over_time({namespace=""traefik""}[5m])) # will return nothing
-+ or
-+ vector(0) # will return 0
-+ ```",unknown,"[doc] logql: logql engine support exec vector(0) grama (#7044)
-
-
-
-**What this PR does / why we need it**:
-logql engine support exec vector(0) grama.
-new PR of :https://github.com/grafana/loki/pull/7023
-
-**Which issue(s) this PR fixes**:
-Fixes #6946
-
-**Special notes for your reviewer**:
-preview
-
-
-
-**Checklist**
-- [ ] Documentation added
-- [ ] Tests updated
-- [ ] Is this an important fix or new feature? Add an entry in the `CHANGELOG.md`.
-- [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/upgrading/_index.md`
-
-Co-authored-by: Danny Kopping "
-46d3dec010f09158c2ad2c95c1958455f01d0e07,2022-10-13 12:09:11,Joel Verezhak,"[loki-canary] Allow insecure TLS connections (#7398)
-
-**What this PR does / why we need it**:
-This change allows client certificates signed by a self-signed
-certificate authority to be used by the Loki canary.
-
-**Which issue(s) this PR fixes**:
-Fixes #4366
-
-**Special notes for your reviewer**:
-This has been tested on linux amd64 with self-signed certificates.
-
-**Checklist**
-- [x] Reviewed the `CONTRIBUTING.md` guide
-- [x] Documentation added
-- [x] Tests updated
-- [x] `CHANGELOG.md` updated
-- [x] Changes that require user attention or interaction to upgrade are
-documented in `docs/sources/upgrading/_index.md`",False,"diff --git a/CHANGELOG.md b/CHANGELOG.md
-index 33d526c0f4797..e5a5224dc126e 100644
---- a/CHANGELOG.md
-+++ b/CHANGELOG.md
-@@ -66,6 +66,7 @@
- #### Fluent Bit
-
- #### Loki Canary
-+* [7398](https://github.com/grafana/loki/pull/7398) **verejoel**: Allow insecure TLS connections
-
- #### Jsonnet
- * [6189](https://github.com/grafana/loki/pull/6189) **irizzant**: Add creation of a `ServiceMonitor` object for Prometheus scraping through configuration parameter `create_service_monitor`. Simplify mixin usage by adding (https://github.com/prometheus-operator/kube-prometheus) library.
-diff --git a/cmd/loki-canary/main.go b/cmd/loki-canary/main.go
-index 881412a25b8b2..67903e89516bc 100644
---- a/cmd/loki-canary/main.go
-+++ b/cmd/loki-canary/main.go
-@@ -53,6 +53,7 @@ func main() {
- certFile := flag.String(""cert-file"", """", ""Client PEM encoded X.509 certificate for optional use with TLS connection to Loki"")
- keyFile := flag.String(""key-file"", """", ""Client PEM encoded X.509 key for optional use with TLS connection to Loki"")
- caFile := flag.String(""ca-file"", """", ""Client certificate authority for optional use with TLS connection to Loki"")
-+ insecureSkipVerify := flag.Bool(""insecure"", false, ""Allow insecure TLS connections"")
- user := flag.String(""user"", """", ""Loki username."")
- pass := flag.String(""pass"", """", ""Loki password. This credential should have both read and write permissions to Loki endpoints"")
- tenantID := flag.String(""tenant-id"", """", ""Tenant ID to be set in X-Scope-OrgID header."")
-@@ -113,7 +114,7 @@ func main() {
- tc.CAFile = *caFile
- tc.CertFile = *certFile
- tc.KeyFile = *keyFile
-- tc.InsecureSkipVerify = false
-+ tc.InsecureSkipVerify = *insecureSkipVerify
-
- var err error
- tlsConfig, err = config.NewTLSConfig(&tc)
-diff --git a/docs/sources/operations/loki-canary.md b/docs/sources/operations/loki-canary.md
-index 051800654828b..1169728f4581c 100644
---- a/docs/sources/operations/loki-canary.md
-+++ b/docs/sources/operations/loki-canary.md
-@@ -303,74 +303,75 @@ All options:
-
- ```
- -addr string
-- The Loki server URL:Port, e.g. loki:3100
-+ The Loki server URL:Port, e.g. loki:3100
- -buckets int
-- Number of buckets in the response_latency histogram (default 10)
-+ Number of buckets in the response_latency histogram (default 10)
-+ -ca-file string
-+ Client certificate authority for optional use with TLS connection to Loki
-+ -cert-file string
-+ Client PEM encoded X.509 certificate for optional use with TLS connection to Loki
-+ -insecure
-+ Allow insecure TLS connections
- -interval duration
-- Duration between log entries (default 1s)
-+ Duration between log entries (default 1s)
-+ -key-file string
-+ Client PEM encoded X.509 key for optional use with TLS connection to Loki
- -labelname string
-- The label name for this instance of Loki Canary to use in the log selector
-- (default ""name"")
-+ The label name for this instance of loki-canary to use in the log selector (default ""name"")
- -labelvalue string
-- The unique label value for this instance of Loki Canary to use in the log selector
-- (default ""loki-canary"")
-+ The unique label value for this instance of loki-canary to use in the log selector (default ""loki-canary"")
-+ -max-wait duration
-+ Duration to keep querying Loki for missing websocket entries before reporting them missing (default 5m0s)
- -metric-test-interval duration
-- The interval the metric test query should be run (default 1h0m0s)
-+ The interval the metric test query should be run (default 1h0m0s)
- -metric-test-range duration
-- The range value [24h] used in the metric test instant-query. This value is truncated
-- to the running time of the canary until this value is reached (default 24h0m0s)
-+ The range value [24h] used in the metric test instant-query. Note: this value is truncated to the running time of the canary until this value is reached (default 24h0m0s)
- -out-of-order-max duration
-- Maximum amount of time (in seconds) in the past an out of order entry may have as a
-- timestamp. (default 60s)
-+ Maximum amount of time to go back for out of order entries (in seconds). (default 1m0s)
- -out-of-order-min duration
-- Minimum amount of time (in seconds) in the past an out of order entry may have as a
-- timestamp. (default 30s)
-+ Minimum amount of time to go back for out of order entries (in seconds). (default 30s)
- -out-of-order-percentage int
-- Percentage (0-100) of log entries that should be sent out of order
-+ Percentage (0-100) of log entries that should be sent out of order.
- -pass string
-- Loki password. This credential should have both read and write permissions to Loki endpoints
-+ Loki password. This credential should have both read and write permissions to Loki endpoints
- -port int
-- Port which Loki Canary should expose metrics (default 3500)
-+ Port which loki-canary should expose metrics (default 3500)
- -pruneinterval duration
-- Frequency to check sent versus received logs, and also the frequency at which queries
-- for missing logs will be dispatched to Loki, and the frequency spot check queries are run
-- (default 1m0s)
-+ Frequency to check sent vs received logs, also the frequency which queries for missing logs will be dispatched to loki (default 1m0s)
- -push
-- Push the logs directly to given Loki address
-+ Push the logs directly to given Loki address
- -query-timeout duration
-- How long to wait for a query response from Loki (default 10s)
-+ How long to wait for a query response from Loki (default 10s)
- -size int
-- Size in bytes of each log line (default 100)
-+ Size in bytes of each log line (default 100)
-+ -spot-check-initial-wait duration
-+ How long should the spot check query wait before starting to check for entries (default 10s)
- -spot-check-interval duration
-- Interval that a single result will be kept from sent entries and spot-checked against
-- Loki. For example, with the 15 minute default, one entry every 15 minutes will be saved,
-- and then queried again every 15 minutes until the time defined by spot-check-max is
-- reached (default 15m0s)
-+ Interval that a single result will be kept from sent entries and spot-checked against Loki, e.g. 15min default one entry every 15 min will be saved and then queried again every 15min until spot-check-max is reached (default 15m0s)
- -spot-check-max duration
-- How far back to check a spot check an entry before dropping it (default 4h0m0s)
-+ How far back to check a spot check entry before dropping it (default 4h0m0s)
- -spot-check-query-rate duration
-- Interval that Loki Canary will query Loki for the current list of all spot check entries
-- (default 1m0s)
-+ Interval that the canary will query Loki for the current list of all spot check entries (default 1m0s)
- -streamname string
-- The stream name for this instance of Loki Canary to use in the log selector
-- (default ""stream"")
-+ The stream name for this instance of loki-canary to use in the log selector (default ""stream"")
- -streamvalue string
-- The unique stream value for this instance of Loki Canary to use in the log selector
-- (default ""stdout"")
-+ The unique stream value for this instance of loki-canary to use in the log selector (default ""stdout"")
- -tenant-id string
-- Tenant ID to be set in X-Scope-OrgID header.
-+ Tenant ID to be set in X-Scope-OrgID header.
- -tls
-- Does the Loki connection use TLS?
-+ Does the loki connection use TLS?
- -user string
-- Loki user name
-+ Loki username.
- -version
-- Print this build's version information
-+ Print this builds version information
- -wait duration
-- Duration to wait for log entries before reporting them as lost (default 1m0s)
-+ Duration to wait for log entries on websocket before querying loki for them (default 1m0s)
- -write-max-backoff duration
- Maximum backoff time between retries (default 5m0s)
- -write-max-retries int
- Maximum number of retries when push a log entry (default 10)
- -write-min-backoff duration
- Initial backoff time before first retry (default 500ms)
-+ -write-timeout duration
-+ How long to wait write response from Loki (default 10s)
- ```",unknown,"[loki-canary] Allow insecure TLS connections (#7398)
-
-**What this PR does / why we need it**:
-This change allows client certificates signed by a self-signed
-certificate authority to be used by the Loki canary.
-
-**Which issue(s) this PR fixes**:
-Fixes #4366
-
-**Special notes for your reviewer**:
-This has been tested on linux amd64 with self-signed certificates.
-
-**Checklist**
-- [x] Reviewed the `CONTRIBUTING.md` guide
-- [x] Documentation added
-- [x] Tests updated
-- [x] `CHANGELOG.md` updated
-- [x] Changes that require user attention or interaction to upgrade are
-documented in `docs/sources/upgrading/_index.md`"
-f22527f3d91b58c230b8ea1f831b5221060d1bbe,2025-02-10 21:05:55,Paul Rogers,fix(ci): Pass image tag details to logcli docker build (#16159),False,"diff --git a/cmd/logcli/Dockerfile b/cmd/logcli/Dockerfile
-index ddccaff2a7813..c35f7fbe07dd3 100644
---- a/cmd/logcli/Dockerfile
-+++ b/cmd/logcli/Dockerfile
-@@ -1,9 +1,10 @@
- ARG GO_VERSION=1.23
-+ARG IMAGE_TAG
- FROM golang:${GO_VERSION} AS build
-
- COPY . /src/loki
- WORKDIR /src/loki
--RUN make clean && make BUILD_IN_CONTAINER=false logcli
-+RUN make clean && make BUILD_IN_CONTAINER=false IMAGE_TAG=${IMAGE_TAG} logcli
-
-
- FROM gcr.io/distroless/static:debug",fix,Pass image tag details to logcli docker build (#16159)
-d1ae91cea72ba80de744f6ba315cfea525c6924f,2020-02-05 20:22:37,Robert Fratto,ci: pin plugins/manifest image tag (#1637),False,"diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet
-index a43b131c28498..1531870334c20 100644
---- a/.drone/drone.jsonnet
-+++ b/.drone/drone.jsonnet
-@@ -113,7 +113,7 @@ local manifest(apps) = pipeline('manifest') {
- steps: std.foldl(
- function(acc, app) acc + [{
- name: 'manifest-' + app,
-- image: 'plugins/manifest',
-+ image: 'plugins/manifest:1.2.3',
- settings: {
- // the target parameter is abused for the app's name,
- // as it is unused in spec mode. See docker-manifest.tmpl
-diff --git a/.drone/drone.yml b/.drone/drone.yml
-index d159080238a81..46251c351c50e 100644
---- a/.drone/drone.yml
-+++ b/.drone/drone.yml
-@@ -504,7 +504,7 @@ platform:
-
- steps:
- - name: manifest-promtail
-- image: plugins/manifest
-+ image: plugins/manifest:1.2.3
- settings:
- password:
- from_secret: docker_password
-@@ -516,7 +516,7 @@ steps:
- - clone
-
- - name: manifest-loki
-- image: plugins/manifest
-+ image: plugins/manifest:1.2.3
- settings:
- password:
- from_secret: docker_password
-@@ -529,7 +529,7 @@ steps:
- - manifest-promtail
-
- - name: manifest-loki-canary
-- image: plugins/manifest
-+ image: plugins/manifest:1.2.3
- settings:
- password:
- from_secret: docker_password",ci,pin plugins/manifest image tag (#1637)
-0b629147ce591d598ff51d3c4b9bd9acb6510bcb,2019-11-05 22:58:34,Wojtek,Fixed whitespaces in example ingress yaml (#1082),False,"diff --git a/docs/installation/helm.md b/docs/installation/helm.md
-index 5cf2e711ea83f..0f3a42cab3f22 100644
---- a/docs/installation/helm.md
-+++ b/docs/installation/helm.md
-@@ -95,21 +95,21 @@ Sample Helm template for Ingress:
- apiVersion: extensions/v1beta1
- kind: Ingress
- metadata:
--annotations:
-+ annotations:
- kubernetes.io/ingress.class: {{ .Values.ingress.class }}
- ingress.kubernetes.io/auth-type: ""basic""
- ingress.kubernetes.io/auth-secret: {{ .Values.ingress.basic.secret }}
--name: loki
-+ name: loki
- spec:
--rules:
--- host: {{ .Values.ingress.host }}
-+ rules:
-+ - host: {{ .Values.ingress.host }}
- http:
-- paths:
-- - backend:
-- serviceName: loki
-- servicePort: 3100
--tls:
--- secretName: {{ .Values.ingress.cert }}
-+ paths:
-+ - backend:
-+ serviceName: loki
-+ servicePort: 3100
-+ tls:
-+ - secretName: {{ .Values.ingress.cert }}
- hosts:
- - {{ .Values.ingress.host }}
- ```",unknown,Fixed whitespaces in example ingress yaml (#1082)
-2ac409c23e834474b89f7f0974a859c271219e7d,2021-11-04 19:24:08,Ed Welch,"Build: simplify how protos are built (#4639)
-
-* we always seem to be chasing our tails with how protos are generated and Makes use of timestamps to determine if files should be recompiled. Instead of touching files and altering timestamps always delete the compiled proto files when calling `protos` to make sure they are compiled every time.
-
-Removed targets to build protos, yaccs, and ragel files when trying to build loki or the canary. This isn't necesary, if you are changing these files you would know you need to build them and the `check-generated-files` should catch any changes to them not committed.
-
-* rm -rf
-
-* we have a race between check-generated-files and our other steps, so let that run first.
-
-also removing `check-generated-files` from the `all` target because its redundant with a separate step and could also race with the parallel lint
-
-* remove TOUCH_PROTOS
-
-* more cleanup of TOUCH_PROTOS",False,"diff --git a/.circleci/config.yml b/.circleci/config.yml
-index 90a3e4c2604bd..f54f68ca03777 100644
---- a/.circleci/config.yml
-+++ b/.circleci/config.yml
-@@ -55,9 +55,6 @@ jobs:
- steps:
- - checkout
- - setup_remote_docker
-- - run:
-- name: touch-protos
-- command: make touch-protos
- - run:
- name: build
- command: make GOOS=windows GOGC=10 promtail
-diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet
-index b2efdbe2323bd..eca7e77caf4c7 100644
---- a/.drone/drone.jsonnet
-+++ b/.drone/drone.jsonnet
-@@ -210,7 +210,6 @@ local promtail(arch) = pipeline('promtail-' + arch) + arch_image(arch) {
- when: condition('exclude').tagMain,
- settings+: {
- dry_run: true,
-- build_args: ['TOUCH_PROTOS=1'],
- },
- },
- ] + [
-@@ -218,9 +217,7 @@ local promtail(arch) = pipeline('promtail-' + arch) + arch_image(arch) {
- clients_docker(arch, 'promtail') {
- depends_on: ['image-tag'],
- when: condition('include').tagMain,
-- settings+: {
-- build_args: ['TOUCH_PROTOS=1'],
-- },
-+ settings+: {},
- },
- ],
- depends_on: ['check'],
-@@ -249,9 +246,7 @@ local lambda_promtail(tags='') = pipeline('lambda-promtail'){
- lambda_promtail_ecr('lambda-promtail') {
- depends_on: ['image-tag'],
- when: condition('include').tagMain,
-- settings+: {
-- build_args: ['TOUCH_PROTOS=1'],
-- },
-+ settings+: {},
- },
- ],
- depends_on: ['check'],
-@@ -265,7 +260,6 @@ local multiarch_image(arch) = pipeline('docker-' + arch) + arch_image(arch) {
- when: condition('exclude').tagMain,
- settings+: {
- dry_run: true,
-- build_args: ['TOUCH_PROTOS=1'],
- },
- }
- for app in apps
-@@ -274,9 +268,7 @@ local multiarch_image(arch) = pipeline('docker-' + arch) + arch_image(arch) {
- docker(arch, app) {
- depends_on: ['image-tag'],
- when: condition('include').tagMain,
-- settings+: {
-- build_args: ['TOUCH_PROTOS=1'],
-- },
-+ settings+: {},
- }
- for app in apps
- ],
-@@ -323,9 +315,9 @@ local manifest(apps) = pipeline('manifest') {
- path: 'loki',
- },
- steps: [
-- make('test', container=false) { depends_on: ['clone'] },
-- make('lint', container=false) { depends_on: ['clone'] },
- make('check-generated-files', container=false) { depends_on: ['clone'] },
-+ make('test', container=false) { depends_on: ['clone','check-generated-files'] },
-+ make('lint', container=false) { depends_on: ['clone','check-generated-files'] },
- make('check-mod', container=false) { depends_on: ['clone', 'test', 'lint'] },
- {
- name: 'shellcheck',
-diff --git a/.drone/drone.yml b/.drone/drone.yml
-index add72a6c397d7..3cbb6adbdbb1d 100644
---- a/.drone/drone.yml
-+++ b/.drone/drone.yml
-@@ -3,23 +3,25 @@ kind: pipeline
- name: check
- steps:
- - commands:
-- - make BUILD_IN_CONTAINER=false test
-+ - make BUILD_IN_CONTAINER=false check-generated-files
- depends_on:
- - clone
- image: grafana/loki-build-image:0.18.0
-- name: test
-+ name: check-generated-files
- - commands:
-- - make BUILD_IN_CONTAINER=false lint
-+ - make BUILD_IN_CONTAINER=false test
- depends_on:
- - clone
-+ - check-generated-files
- image: grafana/loki-build-image:0.18.0
-- name: lint
-+ name: test
- - commands:
-- - make BUILD_IN_CONTAINER=false check-generated-files
-+ - make BUILD_IN_CONTAINER=false lint
- depends_on:
- - clone
-+ - check-generated-files
- image: grafana/loki-build-image:0.18.0
-- name: check-generated-files
-+ name: lint
- - commands:
- - make BUILD_IN_CONTAINER=false check-mod
- depends_on:
-@@ -73,8 +75,6 @@ steps:
- image: plugins/docker
- name: build-loki-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/loki/Dockerfile
- dry_run: true
- password:
-@@ -93,8 +93,6 @@ steps:
- image: plugins/docker
- name: build-loki-canary-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/loki-canary/Dockerfile
- dry_run: true
- password:
-@@ -113,8 +111,6 @@ steps:
- image: plugins/docker
- name: build-logcli-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/logcli/Dockerfile
- dry_run: true
- password:
-@@ -133,8 +129,6 @@ steps:
- image: plugins/docker
- name: publish-loki-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/loki/Dockerfile
- dry_run: false
- password:
-@@ -153,8 +147,6 @@ steps:
- image: plugins/docker
- name: publish-loki-canary-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/loki-canary/Dockerfile
- dry_run: false
- password:
-@@ -173,8 +165,6 @@ steps:
- image: plugins/docker
- name: publish-logcli-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/logcli/Dockerfile
- dry_run: false
- password:
-@@ -208,8 +198,6 @@ steps:
- image: plugins/docker
- name: build-loki-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/loki/Dockerfile
- dry_run: true
- password:
-@@ -228,8 +216,6 @@ steps:
- image: plugins/docker
- name: build-loki-canary-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/loki-canary/Dockerfile
- dry_run: true
- password:
-@@ -248,8 +234,6 @@ steps:
- image: plugins/docker
- name: build-logcli-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/logcli/Dockerfile
- dry_run: true
- password:
-@@ -268,8 +252,6 @@ steps:
- image: plugins/docker
- name: publish-loki-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/loki/Dockerfile
- dry_run: false
- password:
-@@ -288,8 +270,6 @@ steps:
- image: plugins/docker
- name: publish-loki-canary-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/loki-canary/Dockerfile
- dry_run: false
- password:
-@@ -308,8 +288,6 @@ steps:
- image: plugins/docker
- name: publish-logcli-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/logcli/Dockerfile
- dry_run: false
- password:
-@@ -343,8 +321,6 @@ steps:
- image: plugins/docker
- name: build-loki-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/loki/Dockerfile
- dry_run: true
- password:
-@@ -363,8 +339,6 @@ steps:
- image: plugins/docker
- name: build-loki-canary-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/loki-canary/Dockerfile
- dry_run: true
- password:
-@@ -383,8 +357,6 @@ steps:
- image: plugins/docker
- name: build-logcli-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/logcli/Dockerfile
- dry_run: true
- password:
-@@ -403,8 +375,6 @@ steps:
- image: plugins/docker
- name: publish-loki-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/loki/Dockerfile
- dry_run: false
- password:
-@@ -423,8 +393,6 @@ steps:
- image: plugins/docker
- name: publish-loki-canary-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/loki-canary/Dockerfile
- dry_run: false
- password:
-@@ -443,8 +411,6 @@ steps:
- image: plugins/docker
- name: publish-logcli-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: cmd/logcli/Dockerfile
- dry_run: false
- password:
-@@ -478,8 +444,6 @@ steps:
- image: plugins/docker
- name: build-promtail-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: clients/cmd/promtail/Dockerfile
- dry_run: true
- password:
-@@ -498,8 +462,6 @@ steps:
- image: plugins/docker
- name: publish-promtail-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: clients/cmd/promtail/Dockerfile
- dry_run: false
- password:
-@@ -533,8 +495,6 @@ steps:
- image: plugins/docker
- name: build-promtail-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: clients/cmd/promtail/Dockerfile
- dry_run: true
- password:
-@@ -553,8 +513,6 @@ steps:
- image: plugins/docker
- name: publish-promtail-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: clients/cmd/promtail/Dockerfile
- dry_run: false
- password:
-@@ -588,8 +546,6 @@ steps:
- image: plugins/docker
- name: build-promtail-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: clients/cmd/promtail/Dockerfile.arm32
- dry_run: true
- password:
-@@ -608,8 +564,6 @@ steps:
- image: plugins/docker
- name: publish-promtail-image
- settings:
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: clients/cmd/promtail/Dockerfile.arm32
- dry_run: false
- password:
-@@ -925,8 +879,6 @@ steps:
- settings:
- access_key:
- from_secret: ecr_key
-- build_args:
-- - TOUCH_PROTOS=1
- dockerfile: tools/lambda-promtail/Dockerfile
- dry_run: false
- region: us-east-1
-@@ -984,6 +936,6 @@ kind: secret
- name: deploy_config
- ---
- kind: signature
--hmac: 55440faa2728a5b8abbd213c2cf198e01f00201ba7143391924da1b9aa02c350
-+hmac: b51ec8dfc84d0be83827fc851b21b81e1091886be480d675f51485b647e58001
-
- ...
-diff --git a/Makefile b/Makefile
-index 0b9c510017701..a6ea61fecff53 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,5 +1,5 @@
- .DEFAULT_GOAL := all
--.PHONY: all images check-generated-files logcli loki loki-debug promtail promtail-debug loki-canary lint test clean yacc protos touch-protobuf-sources touch-protos format
-+.PHONY: all images check-generated-files logcli loki loki-debug promtail promtail-debug loki-canary lint test clean yacc protos touch-protobuf-sources format
- .PHONY: docker-driver docker-driver-clean docker-driver-enable docker-driver-push
- .PHONY: fluent-bit-image, fluent-bit-push, fluent-bit-test
- .PHONY: fluentd-image, fluentd-push, fluentd-test
-@@ -8,6 +8,7 @@
- .PHONY: benchmark-store, drone, check-mod
- .PHONY: migrate migrate-image lint-markdown ragel
- .PHONY: validate-example-configs generate-example-config-doc check-example-config-doc
-+.PHONY: clean clean-protos
-
- SHELL = /usr/bin/env bash
-
-@@ -128,10 +129,10 @@ binfmt:
- ################
- # Main Targets #
- ################
--all: promtail logcli loki loki-canary check-generated-files
-+all: promtail logcli loki loki-canary
-
- # This is really a check for the CI to make sure generated files are built and checked in manually
--check-generated-files: touch-protobuf-sources yacc ragel protos clients/pkg/promtail/server/ui/assets_vfsdata.go
-+check-generated-files: yacc ragel protos clients/pkg/promtail/server/ui/assets_vfsdata.go
- @if ! (git diff --exit-code $(YACC_GOS) $(RAGEL_GOS) $(PROTO_GOS) $(PROMTAIL_GENERATED_FILE)); then \
- echo ""\nChanges found in generated files""; \
- echo ""Run 'make check-generated-files' and commit the changes to fix this error.""; \
-@@ -140,14 +141,6 @@ check-generated-files: touch-protobuf-sources yacc ragel protos clients/pkg/prom
- exit 1; \
- fi
-
--# Trick used to ensure that protobuf files are always compiled even if not changed, because the
--# tooling may have been upgraded and the compiled output may be different. We're not using a
--# PHONY target so that we can control where we want to touch it.
--touch-protobuf-sources:
-- for def in $(PROTO_DEFS); do \
-- touch $$def; \
-- done
--
- ##########
- # Logcli #
- ##########
-@@ -165,8 +158,8 @@ cmd/logcli/logcli: $(APP_GO_FILES) cmd/logcli/main.go
- # Loki #
- ########
-
--loki: protos yacc ragel cmd/loki/loki
--loki-debug: protos yacc ragel cmd/loki/loki-debug
-+loki: cmd/loki/loki
-+loki-debug: cmd/loki/loki-debug
-
- cmd/loki/loki: $(APP_GO_FILES) cmd/loki/main.go
- CGO_ENABLED=0 go build $(GO_FLAGS) -o $@ ./$(@D)
-@@ -180,7 +173,7 @@ cmd/loki/loki-debug: $(APP_GO_FILES) cmd/loki/main.go
- # Loki-Canary #
- ###############
-
--loki-canary: protos yacc ragel cmd/loki-canary/loki-canary
-+loki-canary: cmd/loki-canary/loki-canary
-
- cmd/loki-canary/loki-canary: $(APP_GO_FILES) cmd/loki-canary/main.go
- CGO_ENABLED=0 go build $(GO_FLAGS) -o $@ ./$(@D)
-@@ -281,6 +274,9 @@ test: all
- # Clean #
- #########
-
-+clean-protos:
-+ rm -rf $(PROTO_GOS)
-+
- clean:
- rm -rf clients/cmd/promtail/promtail
- rm -rf cmd/loki/loki
-@@ -340,13 +336,9 @@ endif
- # Protobufs #
- #############
-
--protos: $(PROTO_GOS)
--
--# use with care. This signals to make that the proto definitions don't need recompiling.
--touch-protos:
-- for proto in $(PROTO_GOS); do [ -f ""./$${proto}"" ] && touch ""$${proto}"" && echo ""touched $${proto}""; done
-+protos: clean-protos $(PROTO_GOS)
-
--%.pb.go: $(PROTO_DEFS)
-+%.pb.go:
- ifeq ($(BUILD_IN_CONTAINER),true)
- @mkdir -p $(shell pwd)/.pkg
- @mkdir -p $(shell pwd)/.cache
-diff --git a/clients/cmd/promtail/Dockerfile b/clients/cmd/promtail/Dockerfile
-index 726d7366ff3b0..1d037f7c39001 100644
---- a/clients/cmd/promtail/Dockerfile
-+++ b/clients/cmd/promtail/Dockerfile
-@@ -1,13 +1,11 @@
- FROM golang:1.17.2-bullseye as build
--# TOUCH_PROTOS signifies if we should touch the compiled proto files and thus not regenerate them.
--# This is helpful when file system timestamps can't be trusted with make
--ARG TOUCH_PROTOS
-+
- COPY . /src/loki
- WORKDIR /src/loki
- # Backports repo required to get a libsystemd version 246 or newer which is required to handle journal +ZSTD compression
- RUN echo ""deb http://deb.debian.org/debian bullseye-backports main"" >> /etc/apt/sources.list
- RUN apt-get update && apt-get install -t bullseye-backports -qy libsystemd-dev
--RUN make clean && (if [ ""${TOUCH_PROTOS}"" ]; then make touch-protos; fi) && make BUILD_IN_CONTAINER=false promtail
-+RUN make clean && make BUILD_IN_CONTAINER=false promtail
-
- # Promtail requires debian as the base image to support systemd journal reading
- FROM debian:bullseye-slim
-diff --git a/clients/cmd/promtail/Dockerfile.arm32 b/clients/cmd/promtail/Dockerfile.arm32
-index 0a5e8c7590907..a0da39364c117 100644
---- a/clients/cmd/promtail/Dockerfile.arm32
-+++ b/clients/cmd/promtail/Dockerfile.arm32
-@@ -1,11 +1,9 @@
- FROM golang:1.17.2 as build
--# TOUCH_PROTOS signifies if we should touch the compiled proto files and thus not regenerate them.
--# This is helpful when file system timestamps can't be trusted with make
--ARG TOUCH_PROTOS
-+
- COPY . /src/loki
- WORKDIR /src/loki
- RUN apt-get update && apt-get install -qy libsystemd-dev
--RUN make clean && (if [ ""${TOUCH_PROTOS}"" ]; then make touch-protos; fi) && make BUILD_IN_CONTAINER=false promtail
-+RUN make clean && make BUILD_IN_CONTAINER=false promtail
-
- # Promtail requires debian as the base image to support systemd journal reading
- FROM debian:stretch-slim
-diff --git a/cmd/logcli/Dockerfile b/cmd/logcli/Dockerfile
-index d804f41128099..70598177de1aa 100644
---- a/cmd/logcli/Dockerfile
-+++ b/cmd/logcli/Dockerfile
-@@ -1,9 +1,8 @@
- FROM golang:1.17.2 as build
-
--ARG TOUCH_PROTOS
- COPY . /src/loki
- WORKDIR /src/loki
--RUN make clean && (if [ ""${TOUCH_PROTOS}"" ]; then make touch-protos; fi) && make BUILD_IN_CONTAINER=false logcli
-+RUN make clean && make BUILD_IN_CONTAINER=false logcli
-
- FROM alpine:3.13
-
-diff --git a/cmd/loki-canary/Dockerfile b/cmd/loki-canary/Dockerfile
-index e5aa0b6a8c805..7faa6450efa9c 100644
---- a/cmd/loki-canary/Dockerfile
-+++ b/cmd/loki-canary/Dockerfile
-@@ -1,10 +1,8 @@
- FROM golang:1.17.2 as build
--# TOUCH_PROTOS signifies if we should touch the compiled proto files and thus not regenerate them.
--# This is helpful when file system timestamps can't be trusted with make
--ARG TOUCH_PROTOS
-+
- COPY . /src/loki
- WORKDIR /src/loki
--RUN make clean && (if [ ""${TOUCH_PROTOS}"" ]; then make touch-protos; fi) && make BUILD_IN_CONTAINER=false loki-canary
-+RUN make clean && make BUILD_IN_CONTAINER=false loki-canary
-
- FROM alpine:3.13
- RUN apk add --update --no-cache ca-certificates
-diff --git a/cmd/loki/Dockerfile b/cmd/loki/Dockerfile
-index 81b69d4ef4c82..0fad55ff9bcf4 100644
---- a/cmd/loki/Dockerfile
-+++ b/cmd/loki/Dockerfile
-@@ -1,10 +1,8 @@
- FROM golang:1.17.2 as build
--# TOUCH_PROTOS signifies if we should touch the compiled proto files and thus not regenerate them.
--# This is helpful when file system timestamps can't be trusted with make
--ARG TOUCH_PROTOS
-+
- COPY . /src/loki
- WORKDIR /src/loki
--RUN make clean && (if [ ""${TOUCH_PROTOS}"" ]; then make touch-protos; fi) && make BUILD_IN_CONTAINER=false loki
-+RUN make clean && make BUILD_IN_CONTAINER=false loki
-
- FROM alpine:3.13
-
-diff --git a/cmd/querytee/Dockerfile b/cmd/querytee/Dockerfile
-index ec873807529aa..61e328e0a5d20 100644
---- a/cmd/querytee/Dockerfile
-+++ b/cmd/querytee/Dockerfile
-@@ -1,6 +1,5 @@
- FROM golang:1.17.2 as build
-
--ARG TOUCH_PROTOS
- COPY . /src/loki
- WORKDIR /src/loki
- RUN make clean && make BUILD_IN_CONTAINER=false loki-querytee",Build,"simplify how protos are built (#4639)
-
-* we always seem to be chasing our tails with how protos are generated and Makes use of timestamps to determine if files should be recompiled. Instead of touching files and altering timestamps always delete the compiled proto files when calling `protos` to make sure they are compiled every time.
-
-Removed targets to build protos, yaccs, and ragel files when trying to build loki or the canary. This isn't necesary, if you are changing these files you would know you need to build them and the `check-generated-files` should catch any changes to them not committed.
-
-* rm -rf
-
-* we have a race between check-generated-files and our other steps, so let that run first.
-
-also removing `check-generated-files` from the `all` target because its redundant with a separate step and could also race with the parallel lint
-
-* remove TOUCH_PROTOS
-
-* more cleanup of TOUCH_PROTOS"
-a205dce83600a874da5a02176217e333662cbf01,2023-05-22 21:26:42,Andreas Gebhardt,"chunks-inspect: print chunk version (format) (#9490)
-
-Get the version byte of the current inspecting Loki chunk printed.",False,"diff --git a/cmd/chunks-inspect/loki.go b/cmd/chunks-inspect/loki.go
-index 35bb90774a2cb..d8fd5d0a913fd 100644
---- a/cmd/chunks-inspect/loki.go
-+++ b/cmd/chunks-inspect/loki.go
-@@ -62,6 +62,7 @@ const (
- )
-
- type LokiChunk struct {
-+ format byte
- encoding Encoding
-
- blocks []LokiBlock
-@@ -149,6 +150,7 @@ func parseLokiChunk(chunkHeader *ChunkHeader, r io.Reader) (*LokiChunk, error) {
- metadata = metadata[n:]
-
- lokiChunk := &LokiChunk{
-+ format: f,
- encoding: compression,
- metadataChecksum: metaChecksum,
- computedMetadataChecksum: computedMetaChecksum,
-diff --git a/cmd/chunks-inspect/main.go b/cmd/chunks-inspect/main.go
-index 0aa99a57f1249..c25f621845b06 100644
---- a/cmd/chunks-inspect/main.go
-+++ b/cmd/chunks-inspect/main.go
-@@ -65,6 +65,7 @@ func printFile(filename string, blockDetails, printLines, storeBlocks bool) {
- return
- }
-
-+ fmt.Println(""Format (Version):"", lokiChunk.format)
- fmt.Println(""Encoding:"", lokiChunk.encoding)
- fmt.Print(""Blocks Metadata Checksum: "", fmt.Sprintf(""%08x"", lokiChunk.metadataChecksum))
- if lokiChunk.metadataChecksum == lokiChunk.computedMetadataChecksum {",unknown,"chunks-inspect: print chunk version (format) (#9490)
-
-Get the version byte of the current inspecting Loki chunk printed."
-b1d4efab1203adf5d110261f12171fc03148ebbe,2022-10-06 01:17:00,Dylan Guedes,"Loki: Per-tenant stream sharding (#7311)
-
-**What this PR does / why we need it**:
-- Move stream sharding configuration to its own package to avoid cyclic
-imports
-- Change stream sharding to be a per-tenant configuration
-- Change ingesters to reject whole streams due to rate-limit based on
-per-tenant stream sharding
-- Change stream sharding flags prefix from `distributor.shard-stream` to
-`shard-stream`",False,"diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md
-index 5d1b71bdf5261..70d28b98fd079 100644
---- a/docs/sources/configuration/_index.md
-+++ b/docs/sources/configuration/_index.md
-@@ -318,28 +318,6 @@ ring:
- # reading and writing.
- # CLI flag: -distributor.ring.heartbeat-timeout
- [heartbeat_timeout: | default = 1m]
--
--# Configures the distributor to shard streams that are too big
--shard_streams:
-- # Whether to enable stream sharding
-- #
-- # CLI flag: -distributor.shard-streams.enabled
-- [enabled: | default = false]
--
-- # Enable logging when sharding streams because logging on the read path may
-- # impact performance. When disabled, stream sharding will emit no logs
-- # regardless of log level
-- #
-- # CLI flag: -distributor.shard-streams.logging-enabled
-- [logging_enabled: | default = false]
--
-- # Threshold that determines how much the stream should be sharded.
-- # The formula used is n = ceil(stream size + ingested rate / desired rate), where n is the number of shards.
-- # For instance, if a stream ingestion is at 10MB, desired rate is 3MB (default), and a stream of size 1MB is
-- # received, the given stream will be split into n = ceil((1 + 10)/3) = 4 shards.
-- #
-- # CLI flag: -distributor.shard-streams.desired-rate
-- [desired_rate: | default = 3MB]
- ```
-
- ## querier
-@@ -2364,6 +2342,28 @@ The `limits_config` block configures global and per-tenant limits in Loki.
- # CLI flag: -ingester.per-stream-rate-limit-burst
- [per_stream_rate_limit_burst: | default = ""15MB""]
-
-+# Configures the distributor to shard streams that are too big
-+shard_streams:
-+ # Whether to enable stream sharding
-+ #
-+ # CLI flag: -shard-streams.enabled
-+ [enabled: | default = false]
-+
-+ # Enable logging when sharding streams because logging on the read path may
-+ # impact performance. When disabled, stream sharding will emit no logs
-+ # regardless of log level
-+ #
-+ # CLI flag: -shard-streams.logging-enabled
-+ [logging_enabled: | default = false]
-+
-+ # Threshold that determines how much the stream should be sharded.
-+ # The formula used is n = ceil(stream size + ingested rate / desired rate), where n is the number of shards.
-+ # For instance, if a stream ingestion is at 10MB, desired rate is 3MB (default), and a stream of size 1MB is
-+ # received, the given stream will be split into n = ceil((1 + 10)/3) = 4 shards.
-+ #
-+ # CLI flag: -shard-streams.desired-rate
-+ [desired_rate: | default = 3MB]
-+
- # Limit how far back in time series data and metadata can be queried,
- # up until lookback duration ago.
- # This limit is enforced in the query frontend, the querier and the ruler.
-diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
-index ef95af7c6f3e2..f75fdf5a957f7 100644
---- a/pkg/distributor/distributor.go
-+++ b/pkg/distributor/distributor.go
-@@ -30,6 +30,7 @@ import (
- ""go.uber.org/atomic""
-
- ""github.com/grafana/loki/pkg/distributor/clientpool""
-+ ""github.com/grafana/loki/pkg/distributor/shardstreams""
- ""github.com/grafana/loki/pkg/ingester/client""
- ""github.com/grafana/loki/pkg/logproto""
- ""github.com/grafana/loki/pkg/logql/syntax""
-@@ -37,7 +38,6 @@ import (
- ""github.com/grafana/loki/pkg/storage/stores/indexshipper/compactor/retention""
- ""github.com/grafana/loki/pkg/usagestats""
- ""github.com/grafana/loki/pkg/util""
-- ""github.com/grafana/loki/pkg/util/flagext""
- util_log ""github.com/grafana/loki/pkg/util/log""
- ""github.com/grafana/loki/pkg/validation""
- )
-@@ -51,21 +51,6 @@ var (
- rfStats = usagestats.NewInt(""distributor_replication_factor"")
- )
-
--type ShardStreamsConfig struct {
-- Enabled bool `yaml:""enabled""`
-- LoggingEnabled bool `yaml:""logging_enabled""`
--
-- // DesiredRate is the threshold used to shard the stream into smaller pieces.
-- // Expected to be in bytes.
-- DesiredRate flagext.ByteSize `yaml:""desired_rate""`
--}
--
--func (cfg *ShardStreamsConfig) RegisterFlagsWithPrefix(prefix string, fs *flag.FlagSet) {
-- fs.BoolVar(&cfg.Enabled, prefix+"".enabled"", false, ""Automatically shard streams to keep them under the per-stream rate limit"")
-- fs.BoolVar(&cfg.LoggingEnabled, prefix+"".logging-enabled"", false, ""Enable logging when sharding streams"")
-- fs.Var(&cfg.DesiredRate, prefix+"".desired-rate"", ""threshold used to cut a new shard. Default (3MB) means if a rate is above 3MB, it will be sharded."")
--}
--
- // Config for a Distributor.
- type Config struct {
- // Distributors ring
-@@ -73,15 +58,11 @@ type Config struct {
-
- // For testing.
- factory ring_client.PoolFactory `yaml:""-""`
--
-- // ShardStreams configures wether big streams should be sharded or not.
-- ShardStreams ShardStreamsConfig `yaml:""shard_streams""`
- }
-
- // RegisterFlags registers distributor-related flags.
- func (cfg *Config) RegisterFlags(fs *flag.FlagSet) {
- cfg.DistributorRing.RegisterFlags(fs)
-- cfg.ShardStreams.RegisterFlagsWithPrefix(""distributor.shard-streams"", fs)
- }
-
- // RateStore manages the ingestion rate of streams, populated by data fetched from ingesters.
-@@ -329,7 +310,8 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
- }
- stream.Entries = stream.Entries[:n]
-
-- if d.cfg.ShardStreams.Enabled {
-+ shardStreamsCfg := d.validator.Limits.ShardStreams(userID)
-+ if shardStreamsCfg.Enabled {
- derivedKeys, derivedStreams := d.shardStream(stream, streamSize, userID)
- keys = append(keys, derivedKeys...)
- streams = append(streams, derivedStreams...)
-@@ -409,15 +391,15 @@ func min(x1, x2 int) int {
- // N is the sharding size for the given stream. shardSteam returns the smaller
- // streams and their associated keys for hashing to ingesters.
- func (d *Distributor) shardStream(stream logproto.Stream, streamSize int, userID string) ([]uint32, []streamTracker) {
-+ shardStreamsCfg := d.validator.Limits.ShardStreams(userID)
- logger := log.With(util_log.WithUserID(userID, util_log.Logger), ""stream"", stream.Labels)
--
-- shardCount := d.shardCountFor(logger, &stream, streamSize, d.cfg.ShardStreams.DesiredRate.Val(), d.rateStore)
-+ shardCount := d.shardCountFor(logger, &stream, streamSize, d.rateStore, shardStreamsCfg)
-
- if shardCount <= 1 {
- return []uint32{util.TokenFor(userID, stream.Labels)}, []streamTracker{{stream: stream}}
- }
-
-- if d.cfg.ShardStreams.LoggingEnabled {
-+ if shardStreamsCfg.LoggingEnabled {
- level.Info(logger).Log(""msg"", ""sharding request"", ""shard_count"", shardCount)
- }
-
-@@ -427,7 +409,7 @@ func (d *Distributor) shardStream(stream logproto.Stream, streamSize int, userID
- derivedKeys := make([]uint32, 0, shardCount)
- derivedStreams := make([]streamTracker, 0, shardCount)
- for i := 0; i < shardCount; i++ {
-- shard, ok := d.createShard(stream, streamLabels, streamPattern, shardCount, i)
-+ shard, ok := d.createShard(shardStreamsCfg, stream, streamLabels, streamPattern, shardCount, i)
- if !ok {
- level.Error(logger).Log(""msg"", ""couldn't create shard"", ""idx"", i)
- continue
-@@ -436,7 +418,7 @@ func (d *Distributor) shardStream(stream logproto.Stream, streamSize int, userID
- derivedKeys = append(derivedKeys, util.TokenFor(userID, shard.Labels))
- derivedStreams = append(derivedStreams, streamTracker{stream: shard})
-
-- if d.cfg.ShardStreams.LoggingEnabled {
-+ if shardStreamsCfg.LoggingEnabled {
- level.Info(util_log.Logger).Log(""msg"", ""stream derived from sharding"", ""src-stream"", stream.Labels, ""derived-stream"", shard.Labels)
- }
- }
-@@ -460,8 +442,8 @@ func labelTemplate(lbls string) labels.Labels {
- return streamLabels
- }
-
--func (d *Distributor) createShard(stream logproto.Stream, lbls labels.Labels, streamPattern string, totalShards, shardNumber int) (logproto.Stream, bool) {
-- lowerBound, upperBound, ok := d.boundsFor(stream, totalShards, shardNumber)
-+func (d *Distributor) createShard(shardStreamsCfg *shardstreams.Config, stream logproto.Stream, lbls labels.Labels, streamPattern string, totalShards, shardNumber int) (logproto.Stream, bool) {
-+ lowerBound, upperBound, ok := d.boundsFor(stream, totalShards, shardNumber, shardStreamsCfg.LoggingEnabled)
- if !ok {
- return logproto.Stream{}, false
- }
-@@ -475,7 +457,7 @@ func (d *Distributor) createShard(stream logproto.Stream, lbls labels.Labels, st
- }, true
- }
-
--func (d *Distributor) boundsFor(stream logproto.Stream, totalShards, shardNumber int) (int, int, bool) {
-+func (d *Distributor) boundsFor(stream logproto.Stream, totalShards, shardNumber int, loggingEnabled bool) (int, int, bool) {
- entriesPerWindow := float64(len(stream.Entries)) / float64(totalShards)
-
- fIdx := float64(shardNumber)
-@@ -483,7 +465,7 @@ func (d *Distributor) boundsFor(stream logproto.Stream, totalShards, shardNumber
- upperBound := min(int(entriesPerWindow*(1+fIdx)), len(stream.Entries))
-
- if lowerBound > upperBound {
-- if d.cfg.ShardStreams.LoggingEnabled {
-+ if loggingEnabled {
- level.Warn(util_log.Logger).Log(""msg"", ""sharding with lowerbound > upperbound"", ""lowerbound"", lowerBound, ""upperbound"", upperBound, ""shards"", totalShards, ""labels"", stream.Labels)
- }
- return 0, 0, false
-@@ -598,10 +580,10 @@ func (d *Distributor) parseStreamLabels(vContext validationContext, key string,
- // based on the rate stored in the rate store and will store the new evaluated number of shards.
- //
- // desiredRate is expected to be given in bytes.
--func (d *Distributor) shardCountFor(logger log.Logger, stream *logproto.Stream, streamSize, desiredRate int, rateStore RateStore) int {
-- if desiredRate <= 0 {
-- if d.cfg.ShardStreams.LoggingEnabled {
-- level.Error(logger).Log(""msg"", ""invalid desired rate"", ""desired_rate"", desiredRate)
-+func (d *Distributor) shardCountFor(logger log.Logger, stream *logproto.Stream, streamSize int, rateStore RateStore, streamShardcfg *shardstreams.Config) int {
-+ if streamShardcfg.DesiredRate.Val() <= 0 {
-+ if streamShardcfg.LoggingEnabled {
-+ level.Error(logger).Log(""msg"", ""invalid desired rate"", ""desired_rate"", streamShardcfg.DesiredRate.String())
- }
- return 1
- }
-@@ -609,16 +591,16 @@ func (d *Distributor) shardCountFor(logger log.Logger, stream *logproto.Stream,
- rate, err := rateStore.RateFor(stream)
- if err != nil {
- d.streamShardingFailures.WithLabelValues(""rate_not_found"").Inc()
-- if d.cfg.ShardStreams.LoggingEnabled {
-+ if streamShardcfg.LoggingEnabled {
- level.Error(logger).Log(""msg"", ""couldn't shard stream because rate store returned error"", ""err"", err)
- }
- return 1
- }
-
-- shards := calculateShards(rate, streamSize, desiredRate)
-+ shards := calculateShards(rate, streamSize, streamShardcfg.DesiredRate.Val())
- if shards > len(stream.Entries) {
- d.streamShardingFailures.WithLabelValues(""too_many_shards"").Inc()
-- if d.cfg.ShardStreams.LoggingEnabled {
-+ if streamShardcfg.LoggingEnabled {
- level.Error(logger).Log(""msg"", ""number of shards bigger than number of entries"", ""shards"", shards, ""entries"", len(stream.Entries))
- }
- return len(stream.Entries)
-diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
-index 9dbf4132e6d63..e802b20215233 100644
---- a/pkg/distributor/distributor_test.go
-+++ b/pkg/distributor/distributor_test.go
-@@ -672,11 +672,21 @@ func TestStreamShard(t *testing.T) {
- t.Run(tc.name, func(t *testing.T) {
- baseStream.Entries = tc.entries
-
-+ distributorLimits := &validation.Limits{}
-+ flagext.DefaultValues(distributorLimits)
-+ distributorLimits.ShardStreams.DesiredRate = desiredRate
-+
-+ overrides, err := validation.NewOverrides(*distributorLimits, nil)
-+ require.NoError(t, err)
-+
-+ validator, err := NewValidator(overrides)
-+ require.NoError(t, err)
-+
- d := Distributor{
- rateStore: &noopRateStore{},
- streamShardingFailures: shardingFailureMetric,
-+ validator: validator,
- }
-- d.cfg.ShardStreams.DesiredRate = desiredRate
-
- _, derivedStreams := d.shardStream(baseStream, tc.streamSize, ""fake"")
- require.Equal(t, tc.wantDerivedStream, derivedStreams)
-@@ -865,21 +875,12 @@ func TestShardCountFor(t *testing.T) {
- name string
- stream *logproto.Stream
- rate int
-- desiredRate int
-+ desiredRate loki_flagext.ByteSize
-
- wantStreamSize int // used for sanity check.
- wantShards int
- wantErr bool
- }{
-- {
-- name: ""2 entries with zero rate and desired rate < 0, return 1 shard"",
-- stream: &logproto.Stream{Hash: 1},
-- rate: 0,
-- desiredRate: -5, // in bytes
-- wantStreamSize: 2, // in bytes
-- wantShards: 1,
-- wantErr: false,
-- },
- {
- name: ""2 entries with zero rate and desired rate == 0, return 1 shard"",
- stream: &logproto.Stream{Hash: 1},
-@@ -953,11 +954,12 @@ func TestShardCountFor(t *testing.T) {
- limits := &validation.Limits{}
- flagext.DefaultValues(limits)
- limits.EnforceMetricName = false
-+ limits.ShardStreams.DesiredRate = tc.desiredRate
-
- d := &Distributor{
- streamShardingFailures: shardingFailureMetric,
- }
-- got := d.shardCountFor(util_log.Logger, tc.stream, tc.wantStreamSize, tc.desiredRate, &noopRateStore{tc.rate})
-+ got := d.shardCountFor(util_log.Logger, tc.stream, tc.wantStreamSize, &noopRateStore{tc.rate}, limits.ShardStreams)
- require.Equal(t, tc.wantShards, got)
- })
- }
-diff --git a/pkg/distributor/limits.go b/pkg/distributor/limits.go
-index 9cff9c140140f..7b6fa23d7287d 100644
---- a/pkg/distributor/limits.go
-+++ b/pkg/distributor/limits.go
-@@ -1,6 +1,10 @@
- package distributor
-
--import ""time""
-+import (
-+ ""time""
-+
-+ ""github.com/grafana/loki/pkg/distributor/shardstreams""
-+)
-
- // Limits is an interface for distributor limits/related configs
- type Limits interface {
-@@ -16,4 +20,6 @@ type Limits interface {
- RejectOldSamplesMaxAge(userID string) time.Duration
-
- IncrementDuplicateTimestamps(userID string) bool
-+
-+ ShardStreams(userID string) *shardstreams.Config
- }
-diff --git a/pkg/distributor/shardstreams/config.go b/pkg/distributor/shardstreams/config.go
-new file mode 100644
-index 0000000000000..6a92472451543
---- /dev/null
-+++ b/pkg/distributor/shardstreams/config.go
-@@ -0,0 +1,23 @@
-+package shardstreams
-+
-+import (
-+ ""flag""
-+
-+ ""github.com/grafana/loki/pkg/util/flagext""
-+)
-+
-+type Config struct {
-+ Enabled bool `yaml:""enabled"" json:""enabled""`
-+ LoggingEnabled bool `yaml:""logging_enabled"" json:""logging_enabled""`
-+
-+ // DesiredRate is the threshold used to shard the stream into smaller pieces.
-+ // Expected to be in bytes.
-+ DesiredRate flagext.ByteSize `yaml:""desired_rate"" json:""desired_rate""`
-+}
-+
-+func (cfg *Config) RegisterFlagsWithPrefix(prefix string, fs *flag.FlagSet) {
-+ fs.BoolVar(&cfg.Enabled, prefix+"".enabled"", false, ""Automatically shard streams to keep them under the per-stream rate limit"")
-+ fs.BoolVar(&cfg.LoggingEnabled, prefix+"".logging-enabled"", false, ""Enable logging when sharding streams"")
-+ cfg.DesiredRate.Set(""3mb"") //nolint:errcheck
-+ fs.Var(&cfg.DesiredRate, prefix+"".desired-rate"", ""threshold used to cut a new shard. Default (3MB) means if a rate is above 3MB, it will be sharded."")
-+}
-diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
-index 85662cd86be5e..9609d6ad7d998 100644
---- a/pkg/ingester/ingester.go
-+++ b/pkg/ingester/ingester.go
-@@ -105,9 +105,6 @@ type Config struct {
- IndexShards int `yaml:""index_shards""`
-
- MaxDroppedStreams int `yaml:""max_dropped_streams""`
--
-- // Whether nor not to ingest all at once or not. Comes from distributor StreamShards Enabled
-- RateLimitWholeStream bool `yaml:""-""`
- }
-
- // RegisterFlags registers the flags.
-diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go
-index 9518c50ab9c8d..41351c27146b3 100644
---- a/pkg/ingester/instance.go
-+++ b/pkg/ingester/instance.go
-@@ -172,6 +172,7 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error {
- record := recordPool.GetRecord()
- record.UserID = i.instanceID
- defer recordPool.PutRecord(record)
-+ rateLimitWholeStream := i.limiter.limits.ShardStreams(i.instanceID).Enabled
-
- var appendErr error
- for _, reqStream := range req.Streams {
-@@ -195,7 +196,7 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error {
- continue
- }
-
-- _, appendErr = s.Push(ctx, reqStream.Entries, record, 0, false)
-+ _, appendErr = s.Push(ctx, reqStream.Entries, record, 0, false, rateLimitWholeStream)
- s.chunkMtx.Unlock()
- }
-
-diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go
-index c6d6362aa724b..dbe486b3debfd 100644
---- a/pkg/ingester/instance_test.go
-+++ b/pkg/ingester/instance_test.go
-@@ -10,19 +10,20 @@ import (
- ""testing""
- ""time""
-
-- ""github.com/grafana/loki/pkg/logql/syntax""
-- ""github.com/grafana/loki/pkg/querier/astmapper""
-- ""github.com/grafana/loki/pkg/storage/chunk""
-- ""github.com/grafana/loki/pkg/storage/config""
--
-+ ""github.com/grafana/dskit/flagext""
- ""github.com/pkg/errors""
- ""github.com/prometheus/common/model""
- ""github.com/prometheus/prometheus/model/labels""
- ""github.com/stretchr/testify/require""
-
-+ ""github.com/grafana/loki/pkg/distributor/shardstreams""
- ""github.com/grafana/loki/pkg/logproto""
- ""github.com/grafana/loki/pkg/logql""
-+ ""github.com/grafana/loki/pkg/logql/syntax""
-+ ""github.com/grafana/loki/pkg/querier/astmapper""
- loki_runtime ""github.com/grafana/loki/pkg/runtime""
-+ ""github.com/grafana/loki/pkg/storage/chunk""
-+ ""github.com/grafana/loki/pkg/storage/config""
- ""github.com/grafana/loki/pkg/validation""
- )
-
-@@ -646,6 +647,109 @@ func Test_QuerySampleWithDelete(t *testing.T) {
- require.Equal(t, samples, []float64{1.})
- }
-
-+type fakeLimits struct {
-+ limits map[string]*validation.Limits
-+}
-+
-+func (f fakeLimits) TenantLimits(userID string) *validation.Limits {
-+ limits, ok := f.limits[userID]
-+ if !ok {
-+ return nil
-+ }
-+
-+ return limits
-+}
-+
-+func (f fakeLimits) AllByUserID() map[string]*validation.Limits {
-+ return f.limits
-+}
-+
-+func TestStreamShardingUsage(t *testing.T) {
-+ setupCustomTenantLimit := func(perStreamLimit string) *validation.Limits {
-+ shardStreamsCfg := &shardstreams.Config{Enabled: true, LoggingEnabled: true}
-+ shardStreamsCfg.DesiredRate.Set(""6MB"") //nolint:errcheck
-+
-+ customTenantLimits := &validation.Limits{}
-+ flagext.DefaultValues(customTenantLimits)
-+
-+ customTenantLimits.PerStreamRateLimit.Set(perStreamLimit) //nolint:errcheck
-+ customTenantLimits.PerStreamRateLimitBurst.Set(perStreamLimit) //nolint:errcheck
-+ customTenantLimits.ShardStreams = shardStreamsCfg
-+
-+ return customTenantLimits
-+ }
-+
-+ customTenant1 := ""my-org1""
-+ customTenant2 := ""my-org2""
-+
-+ limitsDefinition := &fakeLimits{
-+ limits: make(map[string]*validation.Limits),
-+ }
-+ // testing with 1 because although 1 is enough to accept at least the
-+ // first line entry, because per-stream sharding is enabled,
-+ // all entries are rejected if one of them isn't to be accepted.
-+ limitsDefinition.limits[customTenant1] = setupCustomTenantLimit(""1"")
-+ limitsDefinition.limits[customTenant2] = setupCustomTenantLimit(""4"")
-+
-+ limits, err := validation.NewOverrides(defaultLimitsTestConfig(), limitsDefinition)
-+ require.NoError(t, err)
-+
-+ limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
-+
-+ defaultShardStreamsCfg := limiter.limits.ShardStreams(""fake"")
-+ tenantShardStreamsCfg := limiter.limits.ShardStreams(customTenant1)
-+
-+ t.Run(""test default configuration"", func(t *testing.T) {
-+ require.Equal(t, false, defaultShardStreamsCfg.Enabled)
-+ require.Equal(t, ""3MB"", defaultShardStreamsCfg.DesiredRate.String())
-+ require.Equal(t, false, defaultShardStreamsCfg.LoggingEnabled)
-+ })
-+
-+ t.Run(""test configuration being applied"", func(t *testing.T) {
-+ require.Equal(t, true, tenantShardStreamsCfg.Enabled)
-+ require.Equal(t, ""6MB"", tenantShardStreamsCfg.DesiredRate.String())
-+ require.Equal(t, true, tenantShardStreamsCfg.LoggingEnabled)
-+ })
-+
-+ t.Run(""invalid push returns error"", func(t *testing.T) {
-+ i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant1, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil)
-+ ctx := context.Background()
-+
-+ err = i.Push(ctx, &logproto.PushRequest{
-+ Streams: []logproto.Stream{
-+ {
-+ Labels: `{cpu=""10"",endpoint=""https"",instance=""10.253.57.87:9100"",job=""node-exporter"",mode=""idle"",namespace=""observability"",pod=""node-exporter-l454v"",service=""node-exporter""}`,
-+ Entries: []logproto.Entry{
-+ {Timestamp: time.Now(), Line: ""1""},
-+ {Timestamp: time.Now(), Line: ""2""},
-+ {Timestamp: time.Now(), Line: ""3""},
-+ },
-+ },
-+ },
-+ })
-+ require.Error(t, err)
-+ })
-+
-+ t.Run(""valid push returns no error"", func(t *testing.T) {
-+ i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant2, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil)
-+ ctx := context.Background()
-+
-+ err = i.Push(ctx, &logproto.PushRequest{
-+ Streams: []logproto.Stream{
-+ {
-+ Labels: `{myotherlabel=""myothervalue""}`,
-+ Entries: []logproto.Entry{
-+ {Timestamp: time.Now(), Line: ""1""},
-+ {Timestamp: time.Now(), Line: ""2""},
-+ {Timestamp: time.Now(), Line: ""3""},
-+ },
-+ },
-+ },
-+ })
-+ require.NoError(t, err)
-+ })
-+}
-+
- func defaultInstance(t *testing.T) *instance {
- ingesterConfig := defaultIngesterTestConfig(t)
- defaultLimits := defaultLimitsTestConfig()
-diff --git a/pkg/ingester/recovery.go b/pkg/ingester/recovery.go
-index c3bbc9a8af442..4baa2d875527c 100644
---- a/pkg/ingester/recovery.go
-+++ b/pkg/ingester/recovery.go
-@@ -165,7 +165,7 @@ func (r *ingesterRecoverer) Push(userID string, entries RefEntries) error {
- }
-
- // ignore out of order errors here (it's possible for a checkpoint to already have data from the wal segments)
-- bytesAdded, err := s.(*stream).Push(context.Background(), entries.Entries, nil, entries.Counter, true)
-+ bytesAdded, err := s.(*stream).Push(context.Background(), entries.Entries, nil, entries.Counter, true, false)
- r.ing.replayController.Add(int64(bytesAdded))
- if err != nil && err == ErrEntriesExist {
- r.ing.metrics.duplicateEntriesTotal.Add(float64(len(entries.Entries)))
-diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go
-index 616a490a50cce..40105a8f1b5ac 100644
---- a/pkg/ingester/stream.go
-+++ b/pkg/ingester/stream.go
-@@ -150,6 +150,8 @@ func (s *stream) Push(
- // Lock chunkMtx while pushing.
- // If this is false, chunkMtx must be held outside Push.
- lockChunk bool,
-+ // Whether nor not to ingest all at once or not. It is a per-tenant configuration.
-+ rateLimitWholeStream bool,
- ) (int, error) {
- if lockChunk {
- s.chunkMtx.Lock()
-@@ -168,8 +170,8 @@ func (s *stream) Push(
- return 0, ErrEntriesExist
- }
-
-- toStore, invalid := s.validateEntries(entries, isReplay)
-- if s.cfg.RateLimitWholeStream && hasRateLimitErr(invalid) {
-+ toStore, invalid := s.validateEntries(entries, isReplay, rateLimitWholeStream)
-+ if rateLimitWholeStream && hasRateLimitErr(invalid) {
- return 0, errorForFailedEntries(s, invalid, len(entries))
- }
-
-@@ -320,7 +322,7 @@ func (s *stream) storeEntries(ctx context.Context, entries []logproto.Entry) (in
- return bytesAdded, storedEntries, invalid
- }
-
--func (s *stream) validateEntries(entries []logproto.Entry, isReplay bool) ([]logproto.Entry, []entryWithError) {
-+func (s *stream) validateEntries(entries []logproto.Entry, isReplay, rateLimitWholeStream bool) ([]logproto.Entry, []entryWithError) {
- var (
- outOfOrderSamples, outOfOrderBytes int
- rateLimitedSamples, rateLimitedBytes int
-@@ -349,7 +351,7 @@ func (s *stream) validateEntries(entries []logproto.Entry, isReplay bool) ([]log
- totalBytes += lineBytes
-
- now := time.Now()
-- if !s.cfg.RateLimitWholeStream && !s.limiter.AllowN(now, lineBytes) {
-+ if !rateLimitWholeStream && !s.limiter.AllowN(now, len(entries[i].Line)) {
- failedEntriesWithError = append(failedEntriesWithError, entryWithError{&entries[i], &validation.ErrStreamRateLimit{RateLimit: flagext.ByteSize(limit), Labels: s.labelsString, Bytes: flagext.ByteSize(lineBytes)}})
- rateLimitedSamples++
- rateLimitedBytes += lineBytes
-@@ -380,7 +382,7 @@ func (s *stream) validateEntries(entries []logproto.Entry, isReplay bool) ([]log
- // ingestion, the limiter should only be advanced when the whole stream can be
- // sent
- now := time.Now()
-- if s.cfg.RateLimitWholeStream && !s.limiter.AllowN(now, validBytes) {
-+ if rateLimitWholeStream && !s.limiter.AllowN(now, totalBytes) {
- // Report that the whole stream was rate limited
- rateLimitedSamples = len(entries)
- failedEntriesWithError = make([]entryWithError, 0, len(entries))
-diff --git a/pkg/ingester/stream_test.go b/pkg/ingester/stream_test.go
-index 0e6e1d0350c5b..5ec613fc46bd2 100644
---- a/pkg/ingester/stream_test.go
-+++ b/pkg/ingester/stream_test.go
-@@ -66,7 +66,7 @@ func TestMaxReturnedStreamsErrors(t *testing.T) {
-
- _, err := s.Push(context.Background(), []logproto.Entry{
- {Timestamp: time.Unix(int64(numLogs), 0), Line: ""log""},
-- }, recordPool.GetRecord(), 0, true)
-+ }, recordPool.GetRecord(), 0, true, false)
- require.NoError(t, err)
-
- newLines := make([]logproto.Entry, numLogs)
-@@ -86,7 +86,7 @@ func TestMaxReturnedStreamsErrors(t *testing.T) {
- fmt.Fprintf(&expected, ""total ignored: %d out of %d"", numLogs, numLogs)
- expectErr := httpgrpc.Errorf(http.StatusBadRequest, expected.String())
-
-- _, err = s.Push(context.Background(), newLines, recordPool.GetRecord(), 0, true)
-+ _, err = s.Push(context.Background(), newLines, recordPool.GetRecord(), 0, true, false)
- require.Error(t, err)
- require.Equal(t, expectErr.Error(), err.Error())
- })
-@@ -114,7 +114,7 @@ func TestPushDeduplication(t *testing.T) {
- {Timestamp: time.Unix(1, 0), Line: ""test""},
- {Timestamp: time.Unix(1, 0), Line: ""test""},
- {Timestamp: time.Unix(1, 0), Line: ""newer, better test""},
-- }, recordPool.GetRecord(), 0, true)
-+ }, recordPool.GetRecord(), 0, true, false)
- require.NoError(t, err)
- require.Len(t, s.chunks, 1)
- require.Equal(t, s.chunks[0].chunk.Size(), 2,
-@@ -144,7 +144,7 @@ func TestPushRejectOldCounter(t *testing.T) {
- {Timestamp: time.Unix(1, 0), Line: ""test""},
- {Timestamp: time.Unix(1, 0), Line: ""test""},
- {Timestamp: time.Unix(1, 0), Line: ""newer, better test""},
-- }, recordPool.GetRecord(), 0, true)
-+ }, recordPool.GetRecord(), 0, true, false)
- require.NoError(t, err)
- require.Len(t, s.chunks, 1)
- require.Equal(t, s.chunks[0].chunk.Size(), 2,
-@@ -153,13 +153,13 @@ func TestPushRejectOldCounter(t *testing.T) {
- // fail to push with a counter <= the streams internal counter
- _, err = s.Push(context.Background(), []logproto.Entry{
- {Timestamp: time.Unix(1, 0), Line: ""test""},
-- }, recordPool.GetRecord(), 2, true)
-+ }, recordPool.GetRecord(), 2, true, false)
- require.Equal(t, ErrEntriesExist, err)
-
- // succeed with a greater counter
- _, err = s.Push(context.Background(), []logproto.Entry{
- {Timestamp: time.Unix(1, 0), Line: ""test""},
-- }, recordPool.GetRecord(), 3, true)
-+ }, recordPool.GetRecord(), 3, true, false)
- require.Nil(t, err)
-
- }
-@@ -273,7 +273,7 @@ func TestUnorderedPush(t *testing.T) {
- if x.cutBefore {
- _ = s.cutChunk(context.Background())
- }
-- written, err := s.Push(context.Background(), x.entries, recordPool.GetRecord(), 0, true)
-+ written, err := s.Push(context.Background(), x.entries, recordPool.GetRecord(), 0, true, false)
- if x.err {
- require.NotNil(t, err)
- } else {
-@@ -334,7 +334,8 @@ func TestPushRateLimit(t *testing.T) {
- {Timestamp: time.Unix(1, 0), Line: ""aaaaaaaaab""},
- }
- // Counter should be 2 now since the first line will be deduped.
-- _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true)
-+ _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, true)
-+ require.Error(t, err)
- require.Contains(t, err.Error(), (&validation.ErrStreamRateLimit{RateLimit: l.PerStreamRateLimit, Labels: s.labelsString, Bytes: flagext.ByteSize(len(entries[1].Line))}).Error())
- }
-
-@@ -348,7 +349,6 @@ func TestPushRateLimitAllOrNothing(t *testing.T) {
- limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
-
- cfg := defaultConfig()
-- cfg.RateLimitWholeStream = true
-
- s := newStream(
- cfg,
-@@ -368,7 +368,8 @@ func TestPushRateLimitAllOrNothing(t *testing.T) {
- }
-
- // Both entries have errors because rate limiting is done all at once
-- _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true)
-+ _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, true)
-+ require.Error(t, err)
- require.Contains(t, err.Error(), (&validation.ErrStreamRateLimit{RateLimit: l.PerStreamRateLimit, Labels: s.labelsString, Bytes: flagext.ByteSize(len(entries[0].Line))}).Error())
- require.Contains(t, err.Error(), (&validation.ErrStreamRateLimit{RateLimit: l.PerStreamRateLimit, Labels: s.labelsString, Bytes: flagext.ByteSize(len(entries[1].Line))}).Error())
- }
-@@ -400,7 +401,7 @@ func TestReplayAppendIgnoresValidityWindow(t *testing.T) {
- }
-
- // Push a first entry (it doesn't matter if we look like we're replaying or not)
-- _, err = s.Push(context.Background(), entries, nil, 1, true)
-+ _, err = s.Push(context.Background(), entries, nil, 1, true, false)
- require.Nil(t, err)
-
- // Create a sample outside the validity window
-@@ -409,11 +410,11 @@ func TestReplayAppendIgnoresValidityWindow(t *testing.T) {
- }
-
- // Pretend it's not a replay, ensure we error
-- _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true)
-+ _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, false)
- require.NotNil(t, err)
-
- // Now pretend it's a replay. The same write should succeed.
-- _, err = s.Push(context.Background(), entries, nil, 2, true)
-+ _, err = s.Push(context.Background(), entries, nil, 2, true, false)
- require.Nil(t, err)
-
- }
-@@ -455,7 +456,7 @@ func Benchmark_PushStream(b *testing.B) {
-
- for n := 0; n < b.N; n++ {
- rec := recordPool.GetRecord()
-- _, err := s.Push(ctx, e, rec, 0, true)
-+ _, err := s.Push(ctx, e, rec, 0, true, false)
- require.NoError(b, err)
- recordPool.PutRecord(rec)
- }
-diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
-index 9e2ccf59dc23e..a65128965351a 100644
---- a/pkg/loki/modules.go
-+++ b/pkg/loki/modules.go
-@@ -438,7 +438,6 @@ func (t *Loki) initQuerier() (services.Service, error) {
-
- func (t *Loki) initIngester() (_ services.Service, err error) {
- t.Cfg.Ingester.LifecyclerConfig.ListenPort = t.Cfg.Server.GRPCListenPort
-- t.Cfg.Ingester.RateLimitWholeStream = t.Cfg.Distributor.ShardStreams.Enabled
-
- t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Cfg.IngesterClient, t.Store, t.overrides, t.tenantConfigs, prometheus.DefaultRegisterer)
- if err != nil {
-diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go
-index 5100fa5910d71..54eb1dbbd2a41 100644
---- a/pkg/validation/limits.go
-+++ b/pkg/validation/limits.go
-@@ -10,9 +10,6 @@ import (
- ""github.com/go-kit/log/level""
- dskit_flagext ""github.com/grafana/dskit/flagext""
-
-- ""github.com/grafana/loki/pkg/storage/stores/indexshipper/compactor/deletionmode""
-- util_log ""github.com/grafana/loki/pkg/util/log""
--
- ""github.com/pkg/errors""
- ""github.com/prometheus/common/model""
- ""github.com/prometheus/common/sigv4""
-@@ -21,9 +18,12 @@ import (
- ""golang.org/x/time/rate""
- ""gopkg.in/yaml.v2""
-
-+ ""github.com/grafana/loki/pkg/distributor/shardstreams""
- ""github.com/grafana/loki/pkg/logql/syntax""
- ""github.com/grafana/loki/pkg/ruler/util""
-+ ""github.com/grafana/loki/pkg/storage/stores/indexshipper/compactor/deletionmode""
- ""github.com/grafana/loki/pkg/util/flagext""
-+ util_log ""github.com/grafana/loki/pkg/util/log""
- )
-
- const (
-@@ -148,6 +148,8 @@ type Limits struct {
-
- // Deprecated
- CompactorDeletionEnabled bool `yaml:""allow_deletes"" json:""allow_deletes""`
-+
-+ ShardStreams *shardstreams.Config `yaml:""shard_streams"" json:""shard_streams""`
- }
-
- type StreamRetention struct {
-@@ -230,6 +232,9 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
-
- // Deprecated
- dskit_flagext.DeprecatedFlag(f, ""compactor.allow-deletes"", ""Deprecated. Instead, see compactor.deletion-mode which is another per tenant configuration"", util_log.Logger)
-+
-+ l.ShardStreams = &shardstreams.Config{}
-+ l.ShardStreams.RegisterFlagsWithPrefix(""shard-streams"", f)
- }
-
- // UnmarshalYAML implements the yaml.Unmarshaler interface.
-@@ -608,6 +613,10 @@ func (o *Overrides) DeletionMode(userID string) string {
- return o.getOverridesForUser(userID).DeletionMode
- }
-
-+func (o *Overrides) ShardStreams(userID string) *shardstreams.Config {
-+ return o.getOverridesForUser(userID).ShardStreams
-+}
-+
- func (o *Overrides) DefaultLimits() *Limits {
- return o.defaultLimits
- }
-diff --git a/pkg/validation/limits_test.go b/pkg/validation/limits_test.go
-index 41d185946ffbb..72ad9222c5264 100644
---- a/pkg/validation/limits_test.go
-+++ b/pkg/validation/limits_test.go
-@@ -72,6 +72,10 @@ ruler_remote_write_sigv4_config:
- per_tenant_override_config: """"
- per_tenant_override_period: 230s
- query_timeout: 5m
-+shard_streams:
-+ enabled: true
-+ desired_rate: 4mb
-+ logging_enabled: true
- `
- inputJSON := `
- {
-@@ -108,7 +112,12 @@ query_timeout: 5m
- },
- ""per_tenant_override_config"": """",
- ""per_tenant_override_period"": ""230s"",
-- ""query_timeout"": ""5m""
-+ ""query_timeout"": ""5m"",
-+ ""shard_streams"": {
-+ ""desired_rate"": ""4mb"",
-+ ""enabled"": true,
-+ ""logging_enabled"": true
-+ }
- }
- `",Loki,"Per-tenant stream sharding (#7311)
-
-**What this PR does / why we need it**:
-- Move stream sharding configuration to its own package to avoid cyclic
-imports
-- Change stream sharding to be a per-tenant configuration
-- Change ingesters to reject whole streams due to rate-limit based on
-per-tenant stream sharding
-- Change stream sharding flags prefix from `distributor.shard-stream` to
-`shard-stream`"
-4455cd9d7d173896969d1d3589b2e9084af393c2,2023-11-21 20:54:17,Quentin Bisson,"[helm] Fix tracing configuration (#11186)
-
-**What this PR does / why we need it**:
-
-This PR allows user to enable tracing in the new SSD setup and fixes
-incorrect documentation because it is currently impossible to enable
-tracing in this chart (cf.
-https://github.com/grafana/loki/blob/766f27645d2610a36eaaca8418482b740ae14215/cmd/loki/main.go#L81)
-
-**Which issue(s) this PR fixes**:
-Fixes #
-
-**Special notes for your reviewer**:
-
-**Checklist**
-- [x] Reviewed the
-[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md)
-guide (**required**)
-- [x] Documentation added
-- [x] Tests updated
-- [ ] `CHANGELOG.md` updated
-- [ ] If the change is worth mentioning in the release notes, add
-`add-to-release-notes` label
-- [ ] Changes that require user attention or interaction to upgrade are
-documented in `docs/sources/setup/upgrade/_index.md`
-- [ ] For Helm chart changes bump the Helm chart version in
-`production/helm/loki/Chart.yaml` and update
-`production/helm/loki/CHANGELOG.md` and
-`production/helm/loki/README.md`. [Example
-PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213)
-- [ ] If the change is deprecating or removing a configuration option,
-update the `deprecated-config.yaml` and `deleted-config.yaml` files
-respectively in the `tools/deprecated-config-checker` directory.
-[Example
-PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
-
----------
-
-Signed-off-by: QuentinBisson ",False,"diff --git a/docs/sources/operations/troubleshooting.md b/docs/sources/operations/troubleshooting.md
-index fd65e9a4d9a97..9fd4e4b8dcf38 100644
---- a/docs/sources/operations/troubleshooting.md
-+++ b/docs/sources/operations/troubleshooting.md
-@@ -173,7 +173,11 @@ Jaeger is running.
- If you deploy with Helm, use the following command:
-
- ```bash
--$ helm upgrade --install loki loki/loki --set ""loki.tracing.jaegerAgentHost=YOUR_JAEGER_AGENT_HOST""
-+$ helm upgrade --install loki loki/loki --set ""loki.tracing.enabled=true""
-+ --set ""read.extraEnv[0].name=JAEGER_AGENT_HOST"" --set ""read.extraEnv[0].value=""
-+ --set ""write.extraEnv[0].name=JAEGER_AGENT_HOST"" --set ""write.extraEnv[0].value=""
-+ --set ""backend.extraEnv[0].name=JAEGER_AGENT_HOST"" --set ""backend.extraEnv[0].value=""
-+ --set ""gateway.extraEnv[0].name=JAEGER_AGENT_HOST"" --set ""gateway.extraEnv[0].value=""
- ```
-
- ## Running Loki with Istio Sidecars
-diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md
-index 833cc2c77edc8..ede76840c8f6c 100644
---- a/docs/sources/setup/install/helm/reference.md
-+++ b/docs/sources/setup/install/helm/reference.md
-@@ -2297,6 +2297,17 @@ null
-
- []
-
-+ |
-+
-+
-+ | loki.tracing |
-+ object |
-+ Enable tracing |
-+
-+{
-+ ""enabled"": false
-+}
-+
- |
-
-
-@@ -4393,15 +4404,6 @@ null
-
- ""1m""
-
-- |
--
--
-- | tracing.jaegerAgentHost |
-- string |
-- |
--
--""""
--
- |
-
-
-diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
-index 7f45b3155661c..51dd2deb2be54 100644
---- a/production/helm/loki/CHANGELOG.md
-+++ b/production/helm/loki/CHANGELOG.md
-@@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang
-
- [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
-
-+## 5.37.0
-+
-+- [FEATURE] Add support for enabling tracing.
-+
- ## 5.36.2
-
- - [BUGFIX] Add support to run dnsmasq
-diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
-index 06768ba93d2d1..39e800d6193e0 100644
---- a/production/helm/loki/Chart.yaml
-+++ b/production/helm/loki/Chart.yaml
-@@ -3,7 +3,7 @@ name: loki
- description: Helm chart for Grafana Loki in simple, scalable mode
- type: application
- appVersion: 2.9.2
--version: 5.36.3
-+version: 5.37.0
- home: https://grafana.github.io/helm-charts
- sources:
- - https://github.com/grafana/loki
-diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
-index b5cd5883819aa..7fc83086785b3 100644
---- a/production/helm/loki/README.md
-+++ b/production/helm/loki/README.md
-@@ -1,6 +1,6 @@
- # loki
-
--  
-+  
-
- Helm chart for Grafana Loki in simple, scalable mode
-
-diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml
-index de6048aecc712..472882a226c8b 100644
---- a/production/helm/loki/values.yaml
-+++ b/production/helm/loki/values.yaml
-@@ -240,6 +240,9 @@ loki:
- distributor:
- {{- tpl (. | toYaml) $ | nindent 4 }}
- {{- end }}
-+
-+ tracing:
-+ enabled: {{ .Values.loki.tracing.enabled }}
- # Should authentication be enabled
- auth_enabled: true
- # -- memberlist configuration (overrides embedded default)
-@@ -344,6 +347,9 @@ loki:
- scheduler_address: '{{ include ""loki.querySchedulerAddress"" . }}'
- # -- Optional distributor configuration
- distributor: {}
-+ # -- Enable tracing
-+ tracing:
-+ enabled: false
- enterprise:
- # Enable enterprise features, license must be provided
- enabled: false
-@@ -1474,8 +1480,6 @@ networkPolicy:
- podSelector: {}
- # -- Specifies the namespace the discovery Pods are running in
- namespaceSelector: {}
--tracing:
-- jaegerAgentHost: """"
- # -------------------------------------
- # Configuration for `minio` child chart
- # -------------------------------------",unknown,"[helm] Fix tracing configuration (#11186)
-
-**What this PR does / why we need it**:
-
-This PR allows user to enable tracing in the new SSD setup and fixes
-incorrect documentation because it is currently impossible to enable
-tracing in this chart (cf.
-https://github.com/grafana/loki/blob/766f27645d2610a36eaaca8418482b740ae14215/cmd/loki/main.go#L81)
-
-**Which issue(s) this PR fixes**:
-Fixes #
-
-**Special notes for your reviewer**:
-
-**Checklist**
-- [x] Reviewed the
-[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md)
-guide (**required**)
-- [x] Documentation added
-- [x] Tests updated
-- [ ] `CHANGELOG.md` updated
-- [ ] If the change is worth mentioning in the release notes, add
-`add-to-release-notes` label
-- [ ] Changes that require user attention or interaction to upgrade are
-documented in `docs/sources/setup/upgrade/_index.md`
-- [ ] For Helm chart changes bump the Helm chart version in
-`production/helm/loki/Chart.yaml` and update
-`production/helm/loki/CHANGELOG.md` and
-`production/helm/loki/README.md`. [Example
-PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213)
-- [ ] If the change is deprecating or removing a configuration option,
-update the `deprecated-config.yaml` and `deleted-config.yaml` files
-respectively in the `tools/deprecated-config-checker` directory.
-[Example
-PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
-
----------
-
-Signed-off-by: QuentinBisson "
-e65f26d30f9742d407fc6aa1e32dba3320952620,2022-04-07 19:17:50,Tat Chiu Leung,"storage: make Azure blobID chunk delimiter configurable (#5777)
-
-* Make Azure chunk delimiter configurable
-
-* Changelog: #5777
-
-* doc update",False,"diff --git a/CHANGELOG.md b/CHANGELOG.md
-index 406ae6051bee2..4b19e02113a68 100644
---- a/CHANGELOG.md
-+++ b/CHANGELOG.md
-@@ -1,5 +1,6 @@
- ## Main
- * [5780](https://github.com/grafana/loki/pull/5780) **simonswine**: Update alpine image to 3.15.4.
-+* [5777](https://github.com/grafana/loki/pull/5777) **tatchiuleung** storage: make Azure blobID chunk delimiter configurable.
- * [5715](https://github.com/grafana/loki/pull/5715) **chaudum** Add option to push RFC5424 syslog messages from Promtail in syslog scrape target.
- * [5696](https://github.com/grafana/loki/pull/5696) **paullryan** don't block scraping of new logs from cloudflare within promtail if an error is received from cloudflare about too early logs.
- * [5685](https://github.com/grafana/loki/pull/5625) **chaudum** Fix bug in push request parser that allowed users to send arbitrary non-string data as ""log line"".
-diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md
-index ea08d3d6888b9..33612a53ca375 100644
---- a/docs/sources/configuration/_index.md
-+++ b/docs/sources/configuration/_index.md
-@@ -743,6 +743,10 @@ The `azure_storage_config` configures Azure as a general storage for different d
- # CLI flag: -.azure.account-key
- [account_key: | default = """"]
-
-+# Chunk delimiter to build the blobID
-+# CLI flag: -.azure.chunk-delimiter
-+[chunk_delimiter: | default = ""-""]
-+
- # Preallocated buffer size for downloads.
- # CLI flag: -.azure.download-buffer-size
- [download_buffer_size: | default = 512000]
-diff --git a/pkg/storage/chunk/azure/blob_storage_client.go b/pkg/storage/chunk/azure/blob_storage_client.go
-index ce5807103ec84..f475136c169c6 100644
---- a/pkg/storage/chunk/azure/blob_storage_client.go
-+++ b/pkg/storage/chunk/azure/blob_storage_client.go
-@@ -85,6 +85,7 @@ type BlobStorageConfig struct {
- Environment string `yaml:""environment""`
- ContainerName string `yaml:""container_name""`
- AccountName string `yaml:""account_name""`
-+ ChunkDelimiter string `yaml:""chunk_delimiter""`
- AccountKey flagext.Secret `yaml:""account_key""`
- DownloadBufferSize int `yaml:""download_buffer_size""`
- UploadBufferSize int `yaml:""upload_buffer_size""`
-@@ -106,6 +107,7 @@ func (c *BlobStorageConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagS
- f.StringVar(&c.Environment, prefix+""azure.environment"", azureGlobal, fmt.Sprintf(""Azure Cloud environment. Supported values are: %s."", strings.Join(supportedEnvironments, "", "")))
- f.StringVar(&c.ContainerName, prefix+""azure.container-name"", ""cortex"", ""Name of the blob container used to store chunks. This container must be created before running cortex."")
- f.StringVar(&c.AccountName, prefix+""azure.account-name"", """", ""The Microsoft Azure account name to be used"")
-+ f.StringVar(&c.ChunkDelimiter, prefix+""azure.chunk-delimiter"", ""-"", ""Chunk delimiter for blob ID to be used"")
- f.Var(&c.AccountKey, prefix+""azure.account-key"", ""The Microsoft Azure account key to use."")
- f.DurationVar(&c.RequestTimeout, prefix+""azure.request-timeout"", 30*time.Second, ""Timeout for requests made against azure blob storage."")
- f.IntVar(&c.DownloadBufferSize, prefix+""azure.download-buffer-size"", 512000, ""Preallocated buffer size for downloads."")
-@@ -251,7 +253,7 @@ func (b *BlobStorage) PutObject(ctx context.Context, objectKey string, object io
- }
-
- func (b *BlobStorage) getBlobURL(blobID string, hedging bool) (azblob.BlockBlobURL, error) {
-- blobID = strings.Replace(blobID, "":"", ""-"", -1)
-+ blobID = strings.Replace(blobID, "":"", b.cfg.ChunkDelimiter, -1)
-
- // generate url for new chunk blob
- u, err := url.Parse(fmt.Sprintf(b.selectBlobURLFmt(), b.cfg.AccountName, b.cfg.ContainerName, blobID))",storage,"make Azure blobID chunk delimiter configurable (#5777)
-
-* Make Azure chunk delimiter configurable
-
-* Changelog: #5777
-
-* doc update"
-9e19ff006ccf65e3bdba30d348325c0b41825ecd,2023-07-11 22:35:58,Trevor Whitney,"Add targetLabels to SeriesVolume requests (#9878)
-
-Adds optional `targetLabels` parameter to `series_volume` and
-`series_volume_range` requests that controls how volumes are aggregated.
-When provided, volumes are aggregated into the intersections of the
-provided `targetLabels` only.",False,"diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go
-index 01ac5dfd499b6..dc3a5408a952d 100644
---- a/pkg/ingester/flush_test.go
-+++ b/pkg/ingester/flush_test.go
-@@ -352,7 +352,7 @@ func (s *testStore) Stats(_ context.Context, _ string, _, _ model.Time, _ ...*la
- return &stats.Stats{}, nil
- }
-
--func (s *testStore) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+func (s *testStore) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ []string, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) {
- return &logproto.VolumeResponse{}, nil
- }
-
-diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
-index 474ffe51ce535..3968090e816cb 100644
---- a/pkg/ingester/ingester.go
-+++ b/pkg/ingester/ingester.go
-@@ -173,7 +173,7 @@ type ChunkStore interface {
- GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error)
- GetSchemaConfigs() []config.PeriodConfig
- Stats(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) (*index_stats.Stats, error)
-- SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error)
-+ SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error)
- }
-
- // Interface is an interface for the Ingester
-@@ -1164,7 +1164,7 @@ func (i *Ingester) GetSeriesVolume(ctx context.Context, req *logproto.VolumeRequ
- return instance.GetSeriesVolume(ctx, req)
- }),
- f(func() (*logproto.VolumeResponse, error) {
-- return i.store.SeriesVolume(ctx, user, req.From, req.Through, req.Limit, matchers...)
-+ return i.store.SeriesVolume(ctx, user, req.From, req.Through, req.Limit, req.TargetLabels, matchers...)
- }),
- }
- resps := make([]*logproto.VolumeResponse, len(jobs))
-diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go
-index 293885f3689cc..a9b84fb18cb7f 100644
---- a/pkg/ingester/ingester_test.go
-+++ b/pkg/ingester/ingester_test.go
-@@ -470,7 +470,7 @@ func (s *mockStore) Stats(_ context.Context, _ string, _, _ model.Time, _ ...*la
- }, nil
- }
-
--func (s *mockStore) SeriesVolume(_ context.Context, _ string, _, _ model.Time, limit int32, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+func (s *mockStore) SeriesVolume(_ context.Context, _ string, _, _ model.Time, limit int32, _ []string, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) {
- return &logproto.VolumeResponse{
- Volumes: []logproto.Volume{
- {Name: `{foo=""bar""}`, Volume: 38},
-diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go
-index 1e7ce267ed5f6..8f637b626a891 100644
---- a/pkg/ingester/instance.go
-+++ b/pkg/ingester/instance.go
-@@ -629,16 +629,8 @@ func (i *instance) GetSeriesVolume(ctx context.Context, req *logproto.VolumeRequ
- return nil, err
- }
-
-- matchAny := len(matchers) == 0
-- labelsToMatch := make(map[string]struct{})
-- for _, m := range matchers {
-- if m.Name == """" {
-- matchAny = true
-- continue
-- }
--
-- labelsToMatch[m.Name] = struct{}{}
-- }
-+ labelsToMatch, matchers, matchAny := util.PrepareLabelsAndMatchers(req.TargetLabels, matchers)
-+ matchAny = matchAny || len(matchers) == 0
-
- seriesNames := make(map[uint64]string)
- seriesLabels := labels.Labels(make([]labels.Label, 0, len(labelsToMatch)))
-diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go
-index ba74858520dad..c31f2949ceeee 100644
---- a/pkg/ingester/instance_test.go
-+++ b/pkg/ingester/instance_test.go
-@@ -900,6 +900,57 @@ func TestInstance_SeriesVolume(t *testing.T) {
- {Name: `{host=""agent"", job=""3"", log_stream=""dispatcher""}`, Volume: 90},
- }, volumes.Volumes)
- })
-+
-+ t.Run(""with targetLabels"", func(t *testing.T) {
-+ t.Run(""all targetLabels are added to matchers"", func(t *testing.T) {
-+ instance := defaultInstance(t)
-+ volumes, err := instance.GetSeriesVolume(context.Background(), &logproto.VolumeRequest{
-+ From: 0,
-+ Through: 1.1 * 1e3, //milliseconds
-+ Matchers: `{}`,
-+ Limit: 2,
-+ TargetLabels: []string{""log_stream""},
-+ })
-+ require.NoError(t, err)
-+
-+ require.Equal(t, []logproto.Volume{
-+ {Name: `{log_stream=""dispatcher""}`, Volume: 90},
-+ {Name: `{log_stream=""worker""}`, Volume: 70},
-+ }, volumes.Volumes)
-+ })
-+
-+ t.Run(""with a specific equals matcher"", func(t *testing.T) {
-+ instance := defaultInstance(t)
-+ volumes, err := instance.GetSeriesVolume(context.Background(), &logproto.VolumeRequest{
-+ From: 0,
-+ Through: 1.1 * 1e3, //milliseconds
-+ Matchers: `{log_stream=""dispatcher""}`,
-+ Limit: 2,
-+ TargetLabels: []string{""host""},
-+ })
-+ require.NoError(t, err)
-+
-+ require.Equal(t, []logproto.Volume{
-+ {Name: `{host=""agent""}`, Volume: 90},
-+ }, volumes.Volumes)
-+ })
-+
-+ t.Run(""with a specific regexp matcher"", func(t *testing.T) {
-+ instance := defaultInstance(t)
-+ volumes, err := instance.GetSeriesVolume(context.Background(), &logproto.VolumeRequest{
-+ From: 0,
-+ Through: 1.1 * 1e3, //milliseconds
-+ Matchers: `{log_stream=~"".+""}`,
-+ Limit: 2,
-+ TargetLabels: []string{""host"", ""job""},
-+ })
-+ require.NoError(t, err)
-+
-+ require.Equal(t, []logproto.Volume{
-+ {Name: `{host=""agent"", job=""3""}`, Volume: 160},
-+ }, volumes.Volumes)
-+ })
-+ })
- }
-
- func TestGetStats(t *testing.T) {
-diff --git a/pkg/loghttp/query.go b/pkg/loghttp/query.go
-index 03bb05b9d60a0..0f6a49f1982bc 100644
---- a/pkg/loghttp/query.go
-+++ b/pkg/loghttp/query.go
-@@ -4,6 +4,7 @@ import (
- ""errors""
- ""fmt""
- ""net/http""
-+ ""strings""
- ""time""
- ""unsafe""
-
-@@ -348,11 +349,11 @@ func ParseIndexStatsQuery(r *http.Request) (*RangeQuery, error) {
- }
-
- type SeriesVolumeInstantQuery struct {
-- Start time.Time
-- End time.Time
-- Query string
-- Ts time.Time
-- Limit uint32
-+ Start time.Time
-+ End time.Time
-+ Query string
-+ Limit uint32
-+ TargetLabels []string
- }
-
- func ParseSeriesVolumeInstantQuery(r *http.Request) (*SeriesVolumeInstantQuery, error) {
-@@ -367,9 +368,9 @@ func ParseSeriesVolumeInstantQuery(r *http.Request) (*SeriesVolumeInstantQuery,
- }
-
- svInstantQuery := SeriesVolumeInstantQuery{
-- Query: result.Query,
-- Ts: result.Ts,
-- Limit: result.Limit,
-+ Query: result.Query,
-+ Limit: result.Limit,
-+ TargetLabels: targetLabels(r),
- }
-
- svInstantQuery.Start, svInstantQuery.End, err = bounds(r)
-@@ -385,12 +386,12 @@ func ParseSeriesVolumeInstantQuery(r *http.Request) (*SeriesVolumeInstantQuery,
- }
-
- type SeriesVolumeRangeQuery struct {
-- Start time.Time
-- End time.Time
-- Step time.Duration
-- Interval time.Duration
-- Query string
-- Limit uint32
-+ Start time.Time
-+ End time.Time
-+ Step time.Duration
-+ Query string
-+ Limit uint32
-+ TargetLabels []string
- }
-
- func ParseSeriesVolumeRangeQuery(r *http.Request) (*SeriesVolumeRangeQuery, error) {
-@@ -405,15 +406,24 @@ func ParseSeriesVolumeRangeQuery(r *http.Request) (*SeriesVolumeRangeQuery, erro
- }
-
- return &SeriesVolumeRangeQuery{
-- Start: result.Start,
-- End: result.End,
-- Step: result.Step,
-- Interval: result.Interval,
-- Query: result.Query,
-- Limit: result.Limit,
-+ Start: result.Start,
-+ End: result.End,
-+ Step: result.Step,
-+ Query: result.Query,
-+ Limit: result.Limit,
-+ TargetLabels: targetLabels(r),
- }, nil
- }
-
-+func targetLabels(r *http.Request) []string {
-+ lbls := strings.Split(r.Form.Get(""targetLabels""), "","")
-+ if (len(lbls) == 1 && lbls[0] == """") || len(lbls) == 0 {
-+ return nil
-+ }
-+
-+ return lbls
-+}
-+
- func labelVolumeLimit(r *http.Request) error {
- l, err := parseInt(r.Form.Get(""limit""), seriesvolume.DefaultLimit)
- if err != nil {
-diff --git a/pkg/loghttp/query_test.go b/pkg/loghttp/query_test.go
-index eb841c16935d8..96192fcc5a79c 100644
---- a/pkg/loghttp/query_test.go
-+++ b/pkg/loghttp/query_test.go
-@@ -268,3 +268,57 @@ func Test_QueryResponseUnmarshal(t *testing.T) {
- })
- }
- }
-+
-+func Test_ParseSeriesVolumeInstantQuery(t *testing.T) {
-+ req := &http.Request{
-+ URL: mustParseURL(`?query={foo=""bar""}` +
-+ `&start=2017-06-10T21:42:24.760738998Z` +
-+ `&end=2017-07-10T21:42:24.760738998Z` +
-+ `&limit=1000` +
-+ `&targetLabels=foo,bar`,
-+ ),
-+ }
-+
-+ err := req.ParseForm()
-+ require.NoError(t, err)
-+
-+ actual, err := ParseSeriesVolumeInstantQuery(req)
-+ require.NoError(t, err)
-+
-+ expected := &SeriesVolumeInstantQuery{
-+ Start: time.Date(2017, 06, 10, 21, 42, 24, 760738998, time.UTC),
-+ End: time.Date(2017, 07, 10, 21, 42, 24, 760738998, time.UTC),
-+ Query: `{foo=""bar""}`,
-+ Limit: 1000,
-+ TargetLabels: []string{""foo"", ""bar""},
-+ }
-+ require.Equal(t, expected, actual)
-+}
-+
-+func Test_ParseSeriesVolumeRangeQuery(t *testing.T) {
-+ req := &http.Request{
-+ URL: mustParseURL(`?query={foo=""bar""}` +
-+ `&start=2017-06-10T21:42:24.760738998Z` +
-+ `&end=2017-07-10T21:42:24.760738998Z` +
-+ `&limit=1000` +
-+ `&step=3600` +
-+ `&targetLabels=foo,bar`,
-+ ),
-+ }
-+
-+ err := req.ParseForm()
-+ require.NoError(t, err)
-+
-+ actual, err := ParseSeriesVolumeRangeQuery(req)
-+ require.NoError(t, err)
-+
-+ expected := &SeriesVolumeRangeQuery{
-+ Start: time.Date(2017, 06, 10, 21, 42, 24, 760738998, time.UTC),
-+ End: time.Date(2017, 07, 10, 21, 42, 24, 760738998, time.UTC),
-+ Query: `{foo=""bar""}`,
-+ Limit: 1000,
-+ Step: time.Hour,
-+ TargetLabels: []string{""foo"", ""bar""},
-+ }
-+ require.Equal(t, expected, actual)
-+}
-diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go
-index 4962a04f24d4a..5e5991f5ff0fe 100644
---- a/pkg/logproto/logproto.pb.go
-+++ b/pkg/logproto/logproto.pb.go
-@@ -2286,11 +2286,12 @@ func (m *IndexStatsResponse) GetEntries() uint64 {
- }
-
- type VolumeRequest struct {
-- From github_com_prometheus_common_model.Time `protobuf:""varint,1,opt,name=from,proto3,customtype=github.com/prometheus/common/model.Time"" json:""from""`
-- Through github_com_prometheus_common_model.Time `protobuf:""varint,2,opt,name=through,proto3,customtype=github.com/prometheus/common/model.Time"" json:""through""`
-- Matchers string `protobuf:""bytes,3,opt,name=matchers,proto3"" json:""matchers,omitempty""`
-- Limit int32 `protobuf:""varint,4,opt,name=limit,proto3"" json:""limit,omitempty""`
-- Step int64 `protobuf:""varint,5,opt,name=step,proto3"" json:""step,omitempty""`
-+ From github_com_prometheus_common_model.Time `protobuf:""varint,1,opt,name=from,proto3,customtype=github.com/prometheus/common/model.Time"" json:""from""`
-+ Through github_com_prometheus_common_model.Time `protobuf:""varint,2,opt,name=through,proto3,customtype=github.com/prometheus/common/model.Time"" json:""through""`
-+ Matchers string `protobuf:""bytes,3,opt,name=matchers,proto3"" json:""matchers,omitempty""`
-+ Limit int32 `protobuf:""varint,4,opt,name=limit,proto3"" json:""limit,omitempty""`
-+ Step int64 `protobuf:""varint,5,opt,name=step,proto3"" json:""step,omitempty""`
-+ TargetLabels []string `protobuf:""bytes,6,rep,name=targetLabels,proto3"" json:""targetLabels,omitempty""`
- }
-
- func (m *VolumeRequest) Reset() { *m = VolumeRequest{} }
-@@ -2346,6 +2347,13 @@ func (m *VolumeRequest) GetStep() int64 {
- return 0
- }
-
-+func (m *VolumeRequest) GetTargetLabels() []string {
-+ if m != nil {
-+ return m.TargetLabels
-+ }
-+ return nil
-+}
-+
- type VolumeResponse struct {
- Volumes []Volume `protobuf:""bytes,1,rep,name=volumes,proto3"" json:""volumes""`
- Limit int32 `protobuf:""varint,2,opt,name=limit,proto3"" json:""limit,omitempty""`
-@@ -2501,146 +2509,147 @@ func init() {
- func init() { proto.RegisterFile(""pkg/logproto/logproto.proto"", fileDescriptor_c28a5f14f1f4c79a) }
-
- var fileDescriptor_c28a5f14f1f4c79a = []byte{
-- // 2213 bytes of a gzipped FileDescriptorProto
-+ // 2228 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x39, 0xcd, 0x6f, 0x1b, 0xc7,
-- 0xf5, 0x1c, 0x72, 0x49, 0x91, 0x8f, 0xd4, 0x87, 0x47, 0x8c, 0xad, 0x1f, 0x6d, 0x93, 0xf2, 0x20,
-- 0x3f, 0x5b, 0xb0, 0x1d, 0x32, 0x56, 0xda, 0xd4, 0xb1, 0x9b, 0x16, 0xa6, 0x14, 0x3b, 0xf2, 0x77,
-- 0x46, 0xae, 0x5b, 0x04, 0x0d, 0x8c, 0x15, 0x39, 0xfc, 0x80, 0xb9, 0x5c, 0x7a, 0x77, 0x19, 0x47,
-- 0x40, 0x0f, 0xfd, 0x07, 0x02, 0xe4, 0x56, 0xf4, 0x52, 0xf4, 0x50, 0xa0, 0x45, 0x81, 0x5e, 0xfa,
-- 0x07, 0xb4, 0x3d, 0x14, 0xa8, 0x7b, 0x73, 0x6f, 0x41, 0x0f, 0x6c, 0x2d, 0xa3, 0x40, 0xa1, 0x53,
-- 0xfe, 0x81, 0x16, 0xc5, 0x7c, 0xed, 0xce, 0xae, 0xa8, 0x24, 0x74, 0x0d, 0x14, 0xbe, 0x88, 0xfb,
-- 0xde, 0xbc, 0x79, 0xf3, 0xbe, 0xdf, 0xbc, 0x11, 0x1c, 0x1f, 0x3d, 0xec, 0x36, 0x06, 0x6e, 0x77,
-- 0xe4, 0xb9, 0x81, 0x1b, 0x7e, 0xd4, 0xc5, 0x5f, 0x9c, 0xd7, 0x70, 0xa5, 0xdc, 0x75, 0xbb, 0xae,
-- 0xa4, 0xe1, 0x5f, 0x72, 0xbd, 0x52, 0xeb, 0xba, 0x6e, 0x77, 0xc0, 0x1a, 0x02, 0xda, 0x19, 0x77,
-- 0x1a, 0x41, 0xdf, 0x61, 0x7e, 0x60, 0x3b, 0x23, 0x45, 0xb0, 0xaa, 0xb8, 0x3f, 0x1a, 0x38, 0x6e,
-- 0x9b, 0x0d, 0x1a, 0x7e, 0x60, 0x07, 0xbe, 0xfc, 0xab, 0x28, 0x96, 0x39, 0xc5, 0x68, 0xec, 0xf7,
-- 0xc4, 0x1f, 0x89, 0x24, 0x65, 0xc0, 0xdb, 0x81, 0xc7, 0x6c, 0x87, 0xda, 0x01, 0xf3, 0x29, 0x7b,
-- 0x34, 0x66, 0x7e, 0x40, 0x6e, 0xc1, 0x72, 0x0c, 0xeb, 0x8f, 0xdc, 0xa1, 0xcf, 0xf0, 0xdb, 0x50,
-- 0xf4, 0x23, 0xf4, 0x0a, 0x5a, 0xcd, 0xac, 0x15, 0xd7, 0xcb, 0xf5, 0x50, 0x95, 0x68, 0x0f, 0x35,
-- 0x09, 0xc9, 0xcf, 0x10, 0x40, 0xb4, 0x86, 0xab, 0x00, 0x72, 0xf5, 0x7d, 0xdb, 0xef, 0xad, 0xa0,
-- 0x55, 0xb4, 0x66, 0x51, 0x03, 0x83, 0xcf, 0xc3, 0x91, 0x08, 0xba, 0xed, 0x6e, 0xf7, 0x6c, 0xaf,
-- 0xbd, 0x92, 0x16, 0x64, 0x07, 0x17, 0x30, 0x06, 0xcb, 0xb3, 0x03, 0xb6, 0x92, 0x59, 0x45, 0x6b,
-- 0x19, 0x2a, 0xbe, 0xf1, 0x51, 0xc8, 0x05, 0x6c, 0x68, 0x0f, 0x83, 0x15, 0x6b, 0x15, 0xad, 0x15,
-- 0xa8, 0x82, 0x38, 0x9e, 0xeb, 0xce, 0xfc, 0x95, 0xec, 0x2a, 0x5a, 0x9b, 0xa7, 0x0a, 0x22, 0x7f,
-- 0x4a, 0x43, 0xe9, 0x83, 0x31, 0xf3, 0x76, 0x95, 0x01, 0x70, 0x05, 0xf2, 0x3e, 0x1b, 0xb0, 0x56,
-- 0xe0, 0x7a, 0x42, 0xc0, 0x02, 0x0d, 0x61, 0x5c, 0x86, 0xec, 0xa0, 0xef, 0xf4, 0x03, 0x21, 0xd2,
-- 0x3c, 0x95, 0x00, 0xbe, 0x04, 0x59, 0x3f, 0xb0, 0xbd, 0x40, 0xc8, 0x51, 0x5c, 0xaf, 0xd4, 0xa5,
-- 0xc3, 0xea, 0xda, 0x61, 0xf5, 0x7b, 0xda, 0x61, 0xcd, 0xfc, 0x93, 0x49, 0x2d, 0xf5, 0xd9, 0xdf,
-- 0x6a, 0x88, 0xca, 0x2d, 0xf8, 0x6d, 0xc8, 0xb0, 0x61, 0x5b, 0xc8, 0xfa, 0x75, 0x77, 0xf2, 0x0d,
-- 0xf8, 0x02, 0x14, 0xda, 0x7d, 0x8f, 0xb5, 0x82, 0xbe, 0x3b, 0x14, 0x1a, 0x2d, 0xac, 0x2f, 0x47,
-- 0xde, 0xd8, 0xd4, 0x4b, 0x34, 0xa2, 0xc2, 0xe7, 0x21, 0xe7, 0x73, 0xb3, 0xf9, 0x2b, 0x73, 0xab,
-- 0x99, 0xb5, 0x42, 0xb3, 0xbc, 0x3f, 0xa9, 0x2d, 0x49, 0xcc, 0x79, 0xd7, 0xe9, 0x07, 0xcc, 0x19,
-- 0x05, 0xbb, 0x54, 0xd1, 0xe0, 0xb3, 0x30, 0xd7, 0x66, 0x03, 0xc6, 0x9d, 0x9d, 0x17, 0xce, 0x5e,
-- 0x32, 0xd8, 0x8b, 0x05, 0xaa, 0x09, 0xae, 0x5b, 0xf9, 0xdc, 0xd2, 0x1c, 0xf9, 0x37, 0x02, 0xbc,
-- 0x6d, 0x3b, 0xa3, 0x01, 0xfb, 0xda, 0xf6, 0x0c, 0x2d, 0x97, 0x7e, 0x61, 0xcb, 0x65, 0x66, 0xb5,
-- 0x5c, 0x64, 0x06, 0x6b, 0x36, 0x33, 0x64, 0xbf, 0xc2, 0x0c, 0xe4, 0x26, 0xe4, 0x24, 0xea, 0xab,
-- 0x62, 0x28, 0xd2, 0x39, 0xa3, 0xb5, 0x59, 0x8a, 0xb4, 0xc9, 0x08, 0x39, 0xc9, 0xcf, 0x11, 0xcc,
-- 0x2b, 0x43, 0xaa, 0x1c, 0xdc, 0x81, 0x39, 0x99, 0x03, 0x3a, 0xff, 0x8e, 0x25, 0xf3, 0xef, 0x4a,
-- 0xdb, 0x1e, 0x05, 0xcc, 0x6b, 0x36, 0x9e, 0x4c, 0x6a, 0xe8, 0xaf, 0x93, 0xda, 0x99, 0x6e, 0x3f,
-- 0xe8, 0x8d, 0x77, 0xea, 0x2d, 0xd7, 0x69, 0x74, 0x3d, 0xbb, 0x63, 0x0f, 0xed, 0xc6, 0xc0, 0x7d,
-- 0xd8, 0x6f, 0xe8, 0x7a, 0xa0, 0xf3, 0x56, 0x33, 0xc6, 0xe7, 0x84, 0x74, 0x81, 0xaf, 0x3c, 0xb2,
-- 0x58, 0x97, 0x65, 0x64, 0x6b, 0xd8, 0x65, 0x3e, 0xe7, 0x6c, 0x71, 0x63, 0x52, 0x49, 0x43, 0x7e,
-- 0x04, 0xcb, 0x31, 0x87, 0x2b, 0x39, 0x2f, 0x42, 0xce, 0x67, 0x5e, 0x3f, 0x2c, 0x13, 0x86, 0xc9,
-- 0xb6, 0x05, 0xbe, 0xb9, 0xa0, 0xe4, 0xcb, 0x49, 0x98, 0x2a, 0xfa, 0xd9, 0x4e, 0xff, 0x23, 0x82,
-- 0xd2, 0x4d, 0x7b, 0x87, 0x0d, 0x74, 0xa4, 0x61, 0xb0, 0x86, 0xb6, 0xc3, 0x94, 0xc5, 0xc5, 0x37,
-- 0x4f, 0xfb, 0x8f, 0xed, 0xc1, 0x98, 0x49, 0x96, 0x79, 0xaa, 0xa0, 0x59, 0x73, 0x16, 0xbd, 0x70,
-- 0xce, 0xa2, 0x28, 0xf2, 0xca, 0x90, 0x7d, 0xc4, 0x0d, 0x25, 0xf2, 0xb5, 0x40, 0x25, 0x40, 0xce,
-- 0xc0, 0xbc, 0xd2, 0x42, 0x99, 0x2f, 0x12, 0x99, 0x9b, 0xaf, 0xa0, 0x45, 0x26, 0x0e, 0xe4, 0xa4,
-- 0xb5, 0xf1, 0xeb, 0x50, 0x08, 0x7b, 0x80, 0xd0, 0x36, 0xd3, 0xcc, 0xed, 0x4f, 0x6a, 0xe9, 0xc0,
-- 0xa7, 0xd1, 0x02, 0xae, 0x41, 0x56, 0xec, 0x14, 0x9a, 0xa3, 0x66, 0x61, 0x7f, 0x52, 0x93, 0x08,
-- 0x2a, 0x7f, 0xf0, 0x09, 0xb0, 0x7a, 0xbc, 0x0c, 0x73, 0x13, 0x58, 0xcd, 0xfc, 0xfe, 0xa4, 0x26,
-- 0x60, 0x2a, 0xfe, 0x92, 0x6b, 0x50, 0xba, 0xc9, 0xba, 0x76, 0x6b, 0x57, 0x1d, 0x5a, 0xd6, 0xec,
-- 0xf8, 0x81, 0x48, 0xf3, 0x38, 0x05, 0xa5, 0xf0, 0xc4, 0x07, 0x8e, 0xaf, 0x82, 0xba, 0x18, 0xe2,
-- 0x6e, 0xf9, 0xe4, 0xa7, 0x08, 0x94, 0x9f, 0x31, 0x81, 0xdc, 0x80, 0xeb, 0xea, 0x4b, 0x1f, 0x35,
-- 0x61, 0x7f, 0x52, 0x53, 0x18, 0xaa, 0x7e, 0xf1, 0x65, 0x98, 0xf3, 0xc5, 0x89, 0x9c, 0x59, 0x32,
-- 0x7c, 0xc4, 0x42, 0x73, 0x91, 0x87, 0xc1, 0xfe, 0xa4, 0xa6, 0x09, 0xa9, 0xfe, 0xc0, 0xf5, 0x58,
-- 0x7f, 0x91, 0x8a, 0x2d, 0xec, 0x4f, 0x6a, 0x06, 0xd6, 0xec, 0x37, 0xe4, 0x27, 0x08, 0x8a, 0xf7,
-- 0xec, 0x7e, 0x18, 0x42, 0xa1, 0x8b, 0x90, 0xe1, 0x22, 0x9e, 0xce, 0x6d, 0x36, 0xb0, 0x77, 0xaf,
-- 0xba, 0x9e, 0xe0, 0x39, 0x4f, 0x43, 0x38, 0x6a, 0x09, 0xd6, 0xd4, 0x96, 0x90, 0x9d, 0xb9, 0xb0,
-- 0x5d, 0xb7, 0xf2, 0xe9, 0xa5, 0x0c, 0xf9, 0x0d, 0x82, 0x92, 0x94, 0x4c, 0x85, 0xc5, 0x0f, 0x21,
-- 0x27, 0x05, 0x17, 0xb2, 0x7d, 0x49, 0xf2, 0x9f, 0x9b, 0x25, 0xf1, 0x15, 0x4f, 0xfc, 0x5d, 0x58,
-- 0x68, 0x7b, 0xee, 0x68, 0xc4, 0xda, 0xdb, 0xaa, 0xc4, 0xa4, 0x93, 0x25, 0x66, 0xd3, 0x5c, 0xa7,
-- 0x09, 0x72, 0xf2, 0x67, 0x04, 0xf3, 0x2a, 0x9b, 0x95, 0x2d, 0x43, 0x1b, 0xa0, 0x17, 0x2e, 0xee,
-- 0xe9, 0x59, 0x8b, 0xfb, 0x51, 0xc8, 0x75, 0x3d, 0x77, 0x3c, 0xf2, 0x57, 0x32, 0x32, 0x77, 0x24,
-- 0x34, 0x5b, 0xd1, 0x27, 0xd7, 0x61, 0x41, 0xab, 0x72, 0x48, 0x49, 0xab, 0x24, 0x4b, 0xda, 0x56,
-- 0x9b, 0x0d, 0x83, 0x7e, 0xa7, 0x1f, 0x16, 0x29, 0x45, 0x4f, 0x3e, 0x45, 0xb0, 0x94, 0x24, 0xc1,
-- 0xdf, 0x31, 0xf2, 0x80, 0xb3, 0x3b, 0x7d, 0x38, 0xbb, 0xba, 0x28, 0x0e, 0xfe, 0x7b, 0xc3, 0xc0,
-- 0xdb, 0xd5, 0x39, 0x52, 0x79, 0x07, 0x8a, 0x06, 0x9a, 0x37, 0x8f, 0x87, 0x4c, 0xc7, 0x2c, 0xff,
-- 0x8c, 0x92, 0x35, 0x2d, 0xe3, 0x58, 0x00, 0x97, 0xd2, 0x17, 0x11, 0x8f, 0xf8, 0xf9, 0x98, 0x27,
-- 0xf1, 0x45, 0xb0, 0x3a, 0x9e, 0xeb, 0xcc, 0xe4, 0x26, 0xb1, 0x03, 0x7f, 0x03, 0xd2, 0x81, 0x3b,
-- 0x93, 0x93, 0xd2, 0x81, 0xcb, 0x7d, 0xa4, 0x94, 0xcf, 0xc8, 0x1b, 0x9a, 0x84, 0xc8, 0xaf, 0x11,
-- 0x2c, 0xf2, 0x3d, 0xd2, 0x02, 0x1b, 0xbd, 0xf1, 0xf0, 0x21, 0x5e, 0x83, 0x25, 0x7e, 0xd2, 0x83,
-- 0xbe, 0xea, 0x00, 0x0f, 0xfa, 0x6d, 0xa5, 0xe6, 0x02, 0xc7, 0xeb, 0xc6, 0xb0, 0xd5, 0xc6, 0xc7,
-- 0x60, 0x6e, 0xec, 0x4b, 0x02, 0xa9, 0x73, 0x8e, 0x83, 0x5b, 0x6d, 0x7c, 0xce, 0x38, 0x8e, 0xdb,
-- 0xda, 0xb8, 0x26, 0x09, 0x1b, 0xde, 0xb5, 0xfb, 0x5e, 0x58, 0x7c, 0xce, 0x40, 0xae, 0xc5, 0x0f,
-- 0x96, 0x71, 0xc2, 0x3b, 0x50, 0x48, 0x2c, 0x04, 0xa2, 0x6a, 0x99, 0x7c, 0x13, 0x0a, 0xe1, 0xee,
-- 0xa9, 0x8d, 0x67, 0xaa, 0x07, 0xc8, 0x65, 0x58, 0x94, 0x45, 0x75, 0xfa, 0xe6, 0xd2, 0xb4, 0xcd,
-- 0x25, 0xbd, 0xf9, 0x38, 0x64, 0xa5, 0x55, 0x30, 0x58, 0x6d, 0x3b, 0xb0, 0xf5, 0x16, 0xfe, 0x4d,
-- 0x56, 0xe0, 0xe8, 0x3d, 0xcf, 0x1e, 0xfa, 0x1d, 0xe6, 0x09, 0xa2, 0x30, 0x76, 0xc9, 0x6b, 0xb0,
-- 0xcc, 0x0b, 0x09, 0xf3, 0xfc, 0x0d, 0x77, 0x3c, 0x0c, 0xf4, 0x45, 0xff, 0x3c, 0x94, 0xe3, 0x68,
-- 0x15, 0xea, 0x65, 0xc8, 0xb6, 0x38, 0x42, 0x70, 0x9f, 0xa7, 0x12, 0x20, 0xbf, 0x40, 0x80, 0xaf,
-- 0xb1, 0x40, 0xb0, 0xde, 0xda, 0xf4, 0x8d, 0xcb, 0x9d, 0x63, 0x07, 0xad, 0x1e, 0xf3, 0x7c, 0x7d,
-- 0xd1, 0xd1, 0xf0, 0xff, 0xe2, 0x72, 0x47, 0x2e, 0xc0, 0x72, 0x4c, 0x4a, 0xa5, 0x53, 0x05, 0xf2,
-- 0x2d, 0x85, 0x53, 0x4d, 0x35, 0x84, 0xc9, 0x6f, 0xd3, 0x90, 0x97, 0xbe, 0x65, 0x1d, 0x7c, 0x01,
-- 0x8a, 0x1d, 0x1e, 0x6b, 0xde, 0xc8, 0xeb, 0x2b, 0x13, 0x58, 0xcd, 0xc5, 0xfd, 0x49, 0xcd, 0x44,
-- 0x53, 0x13, 0xc0, 0x6f, 0x24, 0x02, 0xaf, 0x59, 0xde, 0x9b, 0xd4, 0x72, 0xdf, 0xe3, 0xc1, 0xb7,
-- 0xc9, 0xdb, 0x9b, 0x08, 0xc3, 0xcd, 0x30, 0x1c, 0x6f, 0xa8, 0x6c, 0x13, 0x37, 0xbd, 0xe6, 0xb7,
-- 0xb8, 0xf8, 0x89, 0x7a, 0x3d, 0xf2, 0x5c, 0x87, 0x05, 0x3d, 0x36, 0xf6, 0x1b, 0x2d, 0xd7, 0x71,
-- 0xdc, 0x61, 0x43, 0x8c, 0x75, 0x42, 0x69, 0xde, 0xa3, 0xf9, 0x76, 0x95, 0x80, 0xf7, 0x60, 0x2e,
-- 0xe8, 0x79, 0xee, 0xb8, 0xdb, 0x13, 0xed, 0x27, 0xd3, 0xbc, 0x34, 0x3b, 0x3f, 0xcd, 0x81, 0xea,
-- 0x0f, 0x7c, 0x8a, 0x5b, 0x8b, 0xb5, 0x1e, 0xfa, 0x63, 0x47, 0x0e, 0x4b, 0xcd, 0xec, 0xfe, 0xa4,
-- 0x86, 0xde, 0xa0, 0x21, 0x9a, 0x7c, 0x9a, 0x86, 0x9a, 0x08, 0xe1, 0xfb, 0xe2, 0x6e, 0x72, 0xd5,
-- 0xf5, 0x6e, 0xb1, 0xc0, 0xeb, 0xb7, 0x6e, 0xdb, 0x0e, 0xd3, 0xb1, 0x51, 0x83, 0xa2, 0x23, 0x90,
-- 0x0f, 0x8c, 0xe4, 0x00, 0x27, 0xa4, 0xc3, 0x27, 0x01, 0x44, 0xda, 0xc9, 0x75, 0x99, 0x27, 0x05,
-- 0x81, 0x11, 0xcb, 0x1b, 0x31, 0x4b, 0x35, 0x66, 0xd4, 0x4c, 0x59, 0x68, 0x2b, 0x69, 0xa1, 0x99,
-- 0xf9, 0x84, 0x66, 0x31, 0x63, 0x3d, 0x1b, 0x8f, 0x75, 0xf2, 0x17, 0x04, 0xd5, 0x9b, 0x5a, 0xf2,
-- 0x17, 0x34, 0x87, 0xd6, 0x37, 0xfd, 0x92, 0xf4, 0xcd, 0xfc, 0x77, 0xfa, 0x92, 0x3f, 0x18, 0x29,
-- 0x4f, 0x59, 0x47, 0xeb, 0xb1, 0x61, 0xb4, 0x8b, 0x97, 0x21, 0x66, 0xfa, 0x25, 0xba, 0x25, 0x93,
-- 0x70, 0xcb, 0xbb, 0x51, 0x39, 0x10, 0x1a, 0xa8, 0x72, 0x70, 0x1a, 0x2c, 0x8f, 0x75, 0x74, 0xf3,
-- 0xc5, 0xc9, 0x1a, 0xcf, 0x3a, 0x54, 0xac, 0x93, 0xdf, 0x21, 0x58, 0xba, 0xc6, 0x82, 0xf8, 0xb5,
-- 0xe6, 0x55, 0xd2, 0xff, 0x7d, 0x38, 0x62, 0xc8, 0xaf, 0xb4, 0x7f, 0x2b, 0x71, 0x97, 0x79, 0x2d,
-- 0xd2, 0x7f, 0x6b, 0xd8, 0x66, 0x9f, 0xa8, 0x19, 0x2d, 0x7e, 0x8d, 0xb9, 0x0b, 0x45, 0x63, 0x11,
-- 0x5f, 0x49, 0x5c, 0x60, 0xa6, 0x35, 0xd5, 0x66, 0x59, 0xe9, 0x24, 0xa7, 0x34, 0x75, 0x3d, 0x0d,
-- 0xdb, 0xfd, 0x36, 0x60, 0x31, 0x36, 0x0a, 0xb6, 0x66, 0xa5, 0x16, 0xd8, 0x1b, 0xe1, 0x7d, 0x26,
-- 0x84, 0xf1, 0x29, 0xb0, 0x3c, 0xf7, 0xb1, 0xbe, 0x99, 0xce, 0x47, 0x47, 0x52, 0xf7, 0x31, 0x15,
-- 0x4b, 0xe4, 0x32, 0x64, 0xa8, 0xfb, 0x18, 0x57, 0x01, 0x3c, 0x7b, 0xd8, 0x65, 0xf7, 0xc3, 0x81,
-- 0xa5, 0x44, 0x0d, 0xcc, 0x21, 0xfd, 0x75, 0x03, 0x8e, 0x98, 0x12, 0x49, 0x77, 0xd7, 0x61, 0x8e,
-- 0x23, 0xfb, 0xd3, 0x1e, 0xbd, 0x04, 0xa1, 0x9c, 0x7d, 0x35, 0x11, 0x8f, 0x19, 0x88, 0xf0, 0xf8,
-- 0x04, 0x14, 0x02, 0x7b, 0x67, 0xc0, 0x6e, 0x47, 0x39, 0x1f, 0x21, 0xf8, 0x2a, 0x9f, 0xb5, 0xee,
-- 0x1b, 0x17, 0x85, 0x08, 0x81, 0xcf, 0xc2, 0x52, 0x24, 0xf3, 0x5d, 0x8f, 0x75, 0xfa, 0x9f, 0x08,
-- 0x0f, 0x97, 0xe8, 0x01, 0x3c, 0x5e, 0x83, 0xc5, 0x08, 0xb7, 0x2d, 0xda, 0xae, 0x25, 0x48, 0x93,
-- 0x68, 0x6e, 0x1b, 0xa1, 0xee, 0x7b, 0x8f, 0xc6, 0xf6, 0x40, 0x14, 0xb2, 0x12, 0x35, 0x30, 0xe4,
-- 0xf7, 0x08, 0x8e, 0x48, 0x57, 0xf3, 0x29, 0xfb, 0x55, 0x8c, 0xfa, 0x5f, 0x22, 0xc0, 0xa6, 0x06,
-- 0x2a, 0xb4, 0xfe, 0xdf, 0x7c, 0x3e, 0xe1, 0x7d, 0xbd, 0x28, 0x46, 0x48, 0x89, 0x8a, 0x5e, 0x40,
-- 0x48, 0x78, 0x05, 0x14, 0xef, 0x8e, 0x72, 0x46, 0x95, 0x18, 0x7d, 0xfb, 0xe3, 0xa3, 0xf5, 0xce,
-- 0x6e, 0xc0, 0x7c, 0x35, 0x61, 0x8a, 0xd1, 0x5a, 0x20, 0xa8, 0xfc, 0xe1, 0x67, 0xb1, 0x61, 0x20,
-- 0xa2, 0xc6, 0x8a, 0xce, 0x52, 0x28, 0xaa, 0x3f, 0xc8, 0x3f, 0x10, 0xcc, 0xdf, 0x77, 0x07, 0xe3,
-- 0xa8, 0x4b, 0xbc, 0x42, 0x76, 0x8e, 0x8f, 0xbe, 0x59, 0x3d, 0xfa, 0x62, 0xb0, 0xfc, 0x80, 0x8d,
-- 0x44, 0x64, 0x65, 0xa8, 0xf8, 0x26, 0x3f, 0x80, 0x05, 0xad, 0xa6, 0x72, 0xc6, 0x9b, 0x30, 0xf7,
-- 0xb1, 0xc0, 0x4c, 0x79, 0x24, 0x92, 0xa4, 0xaa, 0x00, 0x69, 0xb2, 0xf8, 0xdb, 0xab, 0x3e, 0x8d,
-- 0x5c, 0x87, 0x9c, 0x24, 0xc7, 0x27, 0xcc, 0x4b, 0xb8, 0x7c, 0xcd, 0xe0, 0xb0, 0xba, 0x51, 0x13,
-- 0xc8, 0x49, 0x46, 0xca, 0x65, 0xc2, 0xab, 0x12, 0x43, 0xd5, 0xef, 0xd9, 0xd3, 0x50, 0x08, 0x1f,
-- 0x4e, 0x71, 0x11, 0xe6, 0xae, 0xde, 0xa1, 0xdf, 0xbf, 0x42, 0x37, 0x97, 0x52, 0xb8, 0x04, 0xf9,
-- 0xe6, 0x95, 0x8d, 0x1b, 0x02, 0x42, 0xeb, 0xff, 0xb2, 0x74, 0x4d, 0xf0, 0xf0, 0xb7, 0x21, 0x2b,
-- 0x13, 0xfd, 0x68, 0x24, 0xbf, 0xf9, 0xfc, 0x59, 0x39, 0x76, 0x00, 0xaf, 0xae, 0xe5, 0xa9, 0x37,
-- 0x11, 0xbe, 0x0d, 0x45, 0x81, 0x54, 0x4f, 0x2c, 0x27, 0x92, 0x2f, 0x1d, 0x31, 0x4e, 0x27, 0x0f,
-- 0x59, 0x35, 0xf8, 0x5d, 0x82, 0xac, 0xa8, 0xb5, 0xa6, 0x34, 0xe6, 0x13, 0x99, 0x29, 0x4d, 0xec,
-- 0xd1, 0x89, 0xa4, 0xf0, 0x3b, 0x60, 0xf1, 0x79, 0x00, 0x1b, 0xed, 0xc0, 0x78, 0x19, 0xa9, 0x1c,
-- 0x4d, 0xa2, 0x8d, 0x63, 0xdf, 0x0d, 0x1f, 0x78, 0x8e, 0x25, 0x07, 0x59, 0xbd, 0x7d, 0xe5, 0xe0,
-- 0x42, 0x78, 0xf2, 0x1d, 0xf9, 0xd2, 0xa1, 0x27, 0x11, 0x7c, 0x32, 0x7e, 0x54, 0x62, 0x70, 0xa9,
-- 0x54, 0x0f, 0x5b, 0x0e, 0x19, 0xde, 0x84, 0xa2, 0x31, 0x05, 0x98, 0x66, 0x3d, 0x38, 0xc2, 0x98,
-- 0x66, 0x9d, 0x32, 0x3a, 0x90, 0x14, 0xbe, 0x06, 0x79, 0xde, 0x44, 0x79, 0x2d, 0xc1, 0xc7, 0x93,
-- 0xbd, 0xd2, 0xa8, 0x91, 0x95, 0x13, 0xd3, 0x17, 0x43, 0x46, 0x57, 0x61, 0x31, 0xec, 0xc6, 0x2a,
-- 0x68, 0x8f, 0x25, 0xa3, 0x7e, 0x8a, 0xbd, 0xe2, 0x99, 0x43, 0x52, 0xeb, 0x1f, 0x41, 0x5e, 0x0f,
-- 0xbe, 0xf8, 0x03, 0x58, 0x88, 0x8f, 0x7d, 0xf8, 0xff, 0x0c, 0xf3, 0xc4, 0xa7, 0xe9, 0xca, 0xaa,
-- 0xb1, 0x34, 0x7d, 0x56, 0x4c, 0xad, 0xa1, 0xf5, 0x8f, 0xf4, 0x7f, 0x6c, 0x36, 0xed, 0xc0, 0xc6,
-- 0x77, 0x60, 0x41, 0x68, 0x1f, 0xfe, 0x4b, 0x27, 0x16, 0xa5, 0x07, 0xfe, 0x7f, 0x14, 0x8b, 0xd2,
-- 0x83, 0xff, 0x47, 0x22, 0xa9, 0xe6, 0x87, 0x4f, 0x9f, 0x55, 0x53, 0x9f, 0x3f, 0xab, 0xa6, 0xbe,
-- 0x78, 0x56, 0x45, 0x3f, 0xde, 0xab, 0xa2, 0x5f, 0xed, 0x55, 0xd1, 0x93, 0xbd, 0x2a, 0x7a, 0xba,
-- 0x57, 0x45, 0x7f, 0xdf, 0xab, 0xa2, 0x7f, 0xee, 0x55, 0x53, 0x5f, 0xec, 0x55, 0xd1, 0x67, 0xcf,
-- 0xab, 0xa9, 0xa7, 0xcf, 0xab, 0xa9, 0xcf, 0x9f, 0x57, 0x53, 0x1f, 0xbe, 0xfe, 0x65, 0x0f, 0x5a,
-- 0xfa, 0xc4, 0x9d, 0x9c, 0xf8, 0x79, 0xeb, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x23, 0x5c,
-- 0x1e, 0x70, 0x1b, 0x00, 0x00,
-+ 0xf5, 0x1c, 0x72, 0x49, 0x91, 0x8f, 0xd4, 0x87, 0x47, 0x8c, 0xcd, 0x1f, 0xe3, 0x90, 0xf2, 0x20,
-+ 0x3f, 0x5b, 0xb0, 0x1d, 0x31, 0x56, 0xda, 0xd4, 0xb1, 0x9b, 0x16, 0xa6, 0x14, 0x3b, 0xf2, 0x77,
-+ 0x46, 0xae, 0x5b, 0x04, 0x0d, 0x8c, 0x15, 0x39, 0xa4, 0x08, 0x73, 0xb9, 0xf4, 0xee, 0x30, 0x8e,
-+ 0x80, 0x1e, 0x7a, 0xed, 0x21, 0x40, 0x6e, 0x45, 0x2f, 0x45, 0x0f, 0x05, 0x5a, 0x14, 0xe8, 0xa5,
-+ 0x7f, 0x40, 0xdb, 0x43, 0x81, 0xba, 0x37, 0xf7, 0x16, 0xf4, 0xc0, 0xd6, 0xf2, 0xa5, 0xd0, 0x29,
-+ 0xff, 0x40, 0x8b, 0x62, 0xbe, 0x76, 0x67, 0x29, 0x3a, 0x09, 0x5d, 0x03, 0x85, 0x2f, 0xe2, 0xbe,
-+ 0x37, 0x6f, 0xde, 0xbc, 0xef, 0x37, 0x6f, 0x04, 0xaf, 0x0e, 0xef, 0x77, 0x1b, 0x7d, 0xbf, 0x3b,
-+ 0x0c, 0x7c, 0xee, 0x47, 0x1f, 0x6b, 0xf2, 0x2f, 0xce, 0x1b, 0xb8, 0x5a, 0xee, 0xfa, 0x5d, 0x5f,
-+ 0xd1, 0x88, 0x2f, 0xb5, 0x5e, 0xad, 0x77, 0x7d, 0xbf, 0xdb, 0x67, 0x0d, 0x09, 0xed, 0x8c, 0x3a,
-+ 0x0d, 0xde, 0xf3, 0x58, 0xc8, 0x5d, 0x6f, 0xa8, 0x09, 0x56, 0x34, 0xf7, 0x07, 0x7d, 0xcf, 0x6f,
-+ 0xb3, 0x7e, 0x23, 0xe4, 0x2e, 0x0f, 0xd5, 0x5f, 0x4d, 0xb1, 0x2c, 0x28, 0x86, 0xa3, 0x70, 0x57,
-+ 0xfe, 0x51, 0x48, 0x52, 0x06, 0xbc, 0xcd, 0x03, 0xe6, 0x7a, 0xd4, 0xe5, 0x2c, 0xa4, 0xec, 0xc1,
-+ 0x88, 0x85, 0x9c, 0xdc, 0x80, 0xe5, 0x04, 0x36, 0x1c, 0xfa, 0x83, 0x90, 0xe1, 0xb7, 0xa1, 0x18,
-+ 0xc6, 0xe8, 0x0a, 0x5a, 0xc9, 0xac, 0x16, 0xd7, 0xcb, 0x6b, 0x91, 0x2a, 0xf1, 0x1e, 0x6a, 0x13,
-+ 0x92, 0x9f, 0x23, 0x80, 0x78, 0x0d, 0xd7, 0x00, 0xd4, 0xea, 0xfb, 0x6e, 0xb8, 0x5b, 0x41, 0x2b,
-+ 0x68, 0xd5, 0xa1, 0x16, 0x06, 0x9f, 0x85, 0x23, 0x31, 0x74, 0xd3, 0xdf, 0xde, 0x75, 0x83, 0x76,
-+ 0x25, 0x2d, 0xc9, 0x0e, 0x2f, 0x60, 0x0c, 0x4e, 0xe0, 0x72, 0x56, 0xc9, 0xac, 0xa0, 0xd5, 0x0c,
-+ 0x95, 0xdf, 0xf8, 0x28, 0xe4, 0x38, 0x1b, 0xb8, 0x03, 0x5e, 0x71, 0x56, 0xd0, 0x6a, 0x81, 0x6a,
-+ 0x48, 0xe0, 0x85, 0xee, 0x2c, 0xac, 0x64, 0x57, 0xd0, 0xea, 0x3c, 0xd5, 0x10, 0xf9, 0x73, 0x1a,
-+ 0x4a, 0x1f, 0x8c, 0x58, 0xb0, 0xa7, 0x0d, 0x80, 0xab, 0x90, 0x0f, 0x59, 0x9f, 0xb5, 0xb8, 0x1f,
-+ 0x48, 0x01, 0x0b, 0x34, 0x82, 0x71, 0x19, 0xb2, 0xfd, 0x9e, 0xd7, 0xe3, 0x52, 0xa4, 0x79, 0xaa,
-+ 0x00, 0x7c, 0x01, 0xb2, 0x21, 0x77, 0x03, 0x2e, 0xe5, 0x28, 0xae, 0x57, 0xd7, 0x94, 0xc3, 0xd6,
-+ 0x8c, 0xc3, 0xd6, 0xee, 0x18, 0x87, 0x35, 0xf3, 0x8f, 0xc6, 0xf5, 0xd4, 0x67, 0x7f, 0xaf, 0x23,
-+ 0xaa, 0xb6, 0xe0, 0xb7, 0x21, 0xc3, 0x06, 0x6d, 0x29, 0xeb, 0xd7, 0xdd, 0x29, 0x36, 0xe0, 0x73,
-+ 0x50, 0x68, 0xf7, 0x02, 0xd6, 0xe2, 0x3d, 0x7f, 0x20, 0x35, 0x5a, 0x58, 0x5f, 0x8e, 0xbd, 0xb1,
-+ 0x69, 0x96, 0x68, 0x4c, 0x85, 0xcf, 0x42, 0x2e, 0x14, 0x66, 0x0b, 0x2b, 0x73, 0x2b, 0x99, 0xd5,
-+ 0x42, 0xb3, 0x7c, 0x30, 0xae, 0x2f, 0x29, 0xcc, 0x59, 0xdf, 0xeb, 0x71, 0xe6, 0x0d, 0xf9, 0x1e,
-+ 0xd5, 0x34, 0xf8, 0x34, 0xcc, 0xb5, 0x59, 0x9f, 0x09, 0x67, 0xe7, 0xa5, 0xb3, 0x97, 0x2c, 0xf6,
-+ 0x72, 0x81, 0x1a, 0x82, 0xab, 0x4e, 0x3e, 0xb7, 0x34, 0x47, 0xfe, 0x8d, 0x00, 0x6f, 0xbb, 0xde,
-+ 0xb0, 0xcf, 0xbe, 0xb6, 0x3d, 0x23, 0xcb, 0xa5, 0x9f, 0xdb, 0x72, 0x99, 0x59, 0x2d, 0x17, 0x9b,
-+ 0xc1, 0x99, 0xcd, 0x0c, 0xd9, 0xaf, 0x30, 0x03, 0xb9, 0x0e, 0x39, 0x85, 0xfa, 0xaa, 0x18, 0x8a,
-+ 0x75, 0xce, 0x18, 0x6d, 0x96, 0x62, 0x6d, 0x32, 0x52, 0x4e, 0xf2, 0x0b, 0x04, 0xf3, 0xda, 0x90,
-+ 0x3a, 0x07, 0x77, 0x60, 0x4e, 0xe5, 0x80, 0xc9, 0xbf, 0x63, 0x93, 0xf9, 0x77, 0xa9, 0xed, 0x0e,
-+ 0x39, 0x0b, 0x9a, 0x8d, 0x47, 0xe3, 0x3a, 0xfa, 0xdb, 0xb8, 0x7e, 0xaa, 0xdb, 0xe3, 0xbb, 0xa3,
-+ 0x9d, 0xb5, 0x96, 0xef, 0x35, 0xba, 0x81, 0xdb, 0x71, 0x07, 0x6e, 0xa3, 0xef, 0xdf, 0xef, 0x35,
-+ 0x4c, 0x3d, 0x30, 0x79, 0x6b, 0x18, 0xe3, 0x33, 0x52, 0x3a, 0x1e, 0x6a, 0x8f, 0x2c, 0xae, 0xa9,
-+ 0x32, 0xb2, 0x35, 0xe8, 0xb2, 0x50, 0x70, 0x76, 0x84, 0x31, 0xa9, 0xa2, 0x21, 0x3f, 0x82, 0xe5,
-+ 0x84, 0xc3, 0xb5, 0x9c, 0xe7, 0x21, 0x17, 0xb2, 0xa0, 0x17, 0x95, 0x09, 0xcb, 0x64, 0xdb, 0x12,
-+ 0xdf, 0x5c, 0xd0, 0xf2, 0xe5, 0x14, 0x4c, 0x35, 0xfd, 0x6c, 0xa7, 0xff, 0x09, 0x41, 0xe9, 0xba,
-+ 0xbb, 0xc3, 0xfa, 0x26, 0xd2, 0x30, 0x38, 0x03, 0xd7, 0x63, 0xda, 0xe2, 0xf2, 0x5b, 0xa4, 0xfd,
-+ 0xc7, 0x6e, 0x7f, 0xc4, 0x14, 0xcb, 0x3c, 0xd5, 0xd0, 0xac, 0x39, 0x8b, 0x9e, 0x3b, 0x67, 0x51,
-+ 0x1c, 0x79, 0x65, 0xc8, 0x3e, 0x10, 0x86, 0x92, 0xf9, 0x5a, 0xa0, 0x0a, 0x20, 0xa7, 0x60, 0x5e,
-+ 0x6b, 0xa1, 0xcd, 0x17, 0x8b, 0x2c, 0xcc, 0x57, 0x30, 0x22, 0x13, 0x0f, 0x72, 0xca, 0xda, 0xf8,
-+ 0x75, 0x28, 0x44, 0x3d, 0x40, 0x6a, 0x9b, 0x69, 0xe6, 0x0e, 0xc6, 0xf5, 0x34, 0x0f, 0x69, 0xbc,
-+ 0x80, 0xeb, 0x90, 0x95, 0x3b, 0xa5, 0xe6, 0xa8, 0x59, 0x38, 0x18, 0xd7, 0x15, 0x82, 0xaa, 0x1f,
-+ 0x7c, 0x1c, 0x9c, 0x5d, 0x51, 0x86, 0x85, 0x09, 0x9c, 0x66, 0xfe, 0x60, 0x5c, 0x97, 0x30, 0x95,
-+ 0x7f, 0xc9, 0x15, 0x28, 0x5d, 0x67, 0x5d, 0xb7, 0xb5, 0xa7, 0x0f, 0x2d, 0x1b, 0x76, 0xe2, 0x40,
-+ 0x64, 0x78, 0x9c, 0x80, 0x52, 0x74, 0xe2, 0x3d, 0x2f, 0xd4, 0x41, 0x5d, 0x8c, 0x70, 0x37, 0x42,
-+ 0xf2, 0x33, 0x04, 0xda, 0xcf, 0x98, 0x40, 0xae, 0x2f, 0x74, 0x0d, 0x95, 0x8f, 0x9a, 0x70, 0x30,
-+ 0xae, 0x6b, 0x0c, 0xd5, 0xbf, 0xf8, 0x22, 0xcc, 0x85, 0xf2, 0x44, 0xc1, 0x6c, 0x32, 0x7c, 0xe4,
-+ 0x42, 0x73, 0x51, 0x84, 0xc1, 0xc1, 0xb8, 0x6e, 0x08, 0xa9, 0xf9, 0xc0, 0x6b, 0x89, 0xfe, 0xa2,
-+ 0x14, 0x5b, 0x38, 0x18, 0xd7, 0x2d, 0xac, 0xdd, 0x6f, 0xc8, 0x4f, 0x11, 0x14, 0xef, 0xb8, 0xbd,
-+ 0x28, 0x84, 0x22, 0x17, 0x21, 0xcb, 0x45, 0x22, 0x9d, 0xdb, 0xac, 0xef, 0xee, 0x5d, 0xf6, 0x03,
-+ 0xc9, 0x73, 0x9e, 0x46, 0x70, 0xdc, 0x12, 0x9c, 0xa9, 0x2d, 0x21, 0x3b, 0x73, 0x61, 0xbb, 0xea,
-+ 0xe4, 0xd3, 0x4b, 0x19, 0xf2, 0x5b, 0x04, 0x25, 0x25, 0x99, 0x0e, 0x8b, 0x1f, 0x42, 0x4e, 0x09,
-+ 0x2e, 0x65, 0xfb, 0x92, 0xe4, 0x3f, 0x33, 0x4b, 0xe2, 0x6b, 0x9e, 0xf8, 0xbb, 0xb0, 0xd0, 0x0e,
-+ 0xfc, 0xe1, 0x90, 0xb5, 0xb7, 0x75, 0x89, 0x49, 0x4f, 0x96, 0x98, 0x4d, 0x7b, 0x9d, 0x4e, 0x90,
-+ 0x93, 0xbf, 0x20, 0x98, 0xd7, 0xd9, 0xac, 0x6d, 0x19, 0xd9, 0x00, 0x3d, 0x77, 0x71, 0x4f, 0xcf,
-+ 0x5a, 0xdc, 0x8f, 0x42, 0xae, 0x1b, 0xf8, 0xa3, 0x61, 0x58, 0xc9, 0xa8, 0xdc, 0x51, 0xd0, 0x6c,
-+ 0x45, 0x9f, 0x5c, 0x85, 0x05, 0xa3, 0xca, 0x33, 0x4a, 0x5a, 0x75, 0xb2, 0xa4, 0x6d, 0xb5, 0xd9,
-+ 0x80, 0xf7, 0x3a, 0xbd, 0xa8, 0x48, 0x69, 0x7a, 0xf2, 0x29, 0x82, 0xa5, 0x49, 0x12, 0xfc, 0x1d,
-+ 0x2b, 0x0f, 0x04, 0xbb, 0x93, 0xcf, 0x66, 0xb7, 0x26, 0x8b, 0x43, 0xf8, 0xde, 0x80, 0x07, 0x7b,
-+ 0x26, 0x47, 0xaa, 0xef, 0x40, 0xd1, 0x42, 0x8b, 0xe6, 0x71, 0x9f, 0x99, 0x98, 0x15, 0x9f, 0x71,
-+ 0xb2, 0xa6, 0x55, 0x1c, 0x4b, 0xe0, 0x42, 0xfa, 0x3c, 0x12, 0x11, 0x3f, 0x9f, 0xf0, 0x24, 0x3e,
-+ 0x0f, 0x4e, 0x27, 0xf0, 0xbd, 0x99, 0xdc, 0x24, 0x77, 0xe0, 0x6f, 0x40, 0x9a, 0xfb, 0x33, 0x39,
-+ 0x29, 0xcd, 0x7d, 0xe1, 0x23, 0xad, 0x7c, 0x46, 0xdd, 0xd0, 0x14, 0x44, 0x7e, 0x83, 0x60, 0x51,
-+ 0xec, 0x51, 0x16, 0xd8, 0xd8, 0x1d, 0x0d, 0xee, 0xe3, 0x55, 0x58, 0x12, 0x27, 0xdd, 0xeb, 0xe9,
-+ 0x0e, 0x70, 0xaf, 0xd7, 0xd6, 0x6a, 0x2e, 0x08, 0xbc, 0x69, 0x0c, 0x5b, 0x6d, 0x7c, 0x0c, 0xe6,
-+ 0x46, 0xa1, 0x22, 0x50, 0x3a, 0xe7, 0x04, 0xb8, 0xd5, 0xc6, 0x67, 0xac, 0xe3, 0x84, 0xad, 0xad,
-+ 0x6b, 0x92, 0xb4, 0xe1, 0x6d, 0xb7, 0x17, 0x44, 0xc5, 0xe7, 0x14, 0xe4, 0x5a, 0xe2, 0x60, 0x15,
-+ 0x27, 0xa2, 0x03, 0x45, 0xc4, 0x52, 0x20, 0xaa, 0x97, 0xc9, 0x37, 0xa1, 0x10, 0xed, 0x9e, 0xda,
-+ 0x78, 0xa6, 0x7a, 0x80, 0x5c, 0x84, 0x45, 0x55, 0x54, 0xa7, 0x6f, 0x2e, 0x4d, 0xdb, 0x5c, 0x32,
-+ 0x9b, 0x5f, 0x85, 0xac, 0xb2, 0x0a, 0x06, 0xa7, 0xed, 0x72, 0xd7, 0x6c, 0x11, 0xdf, 0xa4, 0x02,
-+ 0x47, 0xef, 0x04, 0xee, 0x20, 0xec, 0xb0, 0x40, 0x12, 0x45, 0xb1, 0x4b, 0x5e, 0x81, 0x65, 0x51,
-+ 0x48, 0x58, 0x10, 0x6e, 0xf8, 0xa3, 0x01, 0x37, 0x17, 0xfd, 0xb3, 0x50, 0x4e, 0xa2, 0x75, 0xa8,
-+ 0x97, 0x21, 0xdb, 0x12, 0x08, 0xc9, 0x7d, 0x9e, 0x2a, 0x80, 0xfc, 0x12, 0x01, 0xbe, 0xc2, 0xb8,
-+ 0x64, 0xbd, 0xb5, 0x19, 0x5a, 0x97, 0x3b, 0xcf, 0xe5, 0xad, 0x5d, 0x16, 0x84, 0xe6, 0xa2, 0x63,
-+ 0xe0, 0xff, 0xc5, 0xe5, 0x8e, 0x9c, 0x83, 0xe5, 0x84, 0x94, 0x5a, 0xa7, 0x2a, 0xe4, 0x5b, 0x1a,
-+ 0xa7, 0x9b, 0x6a, 0x04, 0x93, 0xdf, 0xa5, 0x21, 0xaf, 0x7c, 0xcb, 0x3a, 0xf8, 0x1c, 0x14, 0x3b,
-+ 0x22, 0xd6, 0x82, 0x61, 0xd0, 0xd3, 0x26, 0x70, 0x9a, 0x8b, 0x07, 0xe3, 0xba, 0x8d, 0xa6, 0x36,
-+ 0x80, 0xdf, 0x98, 0x08, 0xbc, 0x66, 0x79, 0x7f, 0x5c, 0xcf, 0x7d, 0x4f, 0x04, 0xdf, 0xa6, 0x68,
-+ 0x6f, 0x32, 0x0c, 0x37, 0xa3, 0x70, 0xbc, 0xa6, 0xb3, 0x4d, 0xde, 0xf4, 0x9a, 0xdf, 0x12, 0xe2,
-+ 0x4f, 0xd4, 0xeb, 0x61, 0xe0, 0x7b, 0x8c, 0xef, 0xb2, 0x51, 0xd8, 0x68, 0xf9, 0x9e, 0xe7, 0x0f,
-+ 0x1a, 0x72, 0xac, 0x93, 0x4a, 0x8b, 0x1e, 0x2d, 0xb6, 0xeb, 0x04, 0xbc, 0x03, 0x73, 0x7c, 0x37,
-+ 0xf0, 0x47, 0xdd, 0x5d, 0xd9, 0x7e, 0x32, 0xcd, 0x0b, 0xb3, 0xf3, 0x33, 0x1c, 0xa8, 0xf9, 0xc0,
-+ 0x27, 0x84, 0xb5, 0x58, 0xeb, 0x7e, 0x38, 0xf2, 0xd4, 0xb0, 0xd4, 0xcc, 0x1e, 0x8c, 0xeb, 0xe8,
-+ 0x0d, 0x1a, 0xa1, 0xc9, 0xa7, 0x69, 0xa8, 0xcb, 0x10, 0xbe, 0x2b, 0xef, 0x26, 0x97, 0xfd, 0xe0,
-+ 0x06, 0xe3, 0x41, 0xaf, 0x75, 0xd3, 0xf5, 0x98, 0x89, 0x8d, 0x3a, 0x14, 0x3d, 0x89, 0xbc, 0x67,
-+ 0x25, 0x07, 0x78, 0x11, 0x1d, 0x7e, 0x0d, 0x40, 0xa6, 0x9d, 0x5a, 0x57, 0x79, 0x52, 0x90, 0x18,
-+ 0xb9, 0xbc, 0x91, 0xb0, 0x54, 0x63, 0x46, 0xcd, 0xb4, 0x85, 0xb6, 0x26, 0x2d, 0x34, 0x33, 0x9f,
-+ 0xc8, 0x2c, 0x76, 0xac, 0x67, 0x93, 0xb1, 0x4e, 0xfe, 0x8a, 0xa0, 0x76, 0xdd, 0x48, 0xfe, 0x9c,
-+ 0xe6, 0x30, 0xfa, 0xa6, 0x5f, 0x90, 0xbe, 0x99, 0xff, 0x4e, 0x5f, 0xf2, 0x47, 0x2b, 0xe5, 0x29,
-+ 0xeb, 0x18, 0x3d, 0x36, 0xac, 0x76, 0xf1, 0x22, 0xc4, 0x4c, 0xbf, 0x40, 0xb7, 0x64, 0x26, 0xdc,
-+ 0xf2, 0x6e, 0x5c, 0x0e, 0xa4, 0x06, 0xba, 0x1c, 0x9c, 0x04, 0x27, 0x60, 0x1d, 0xd3, 0x7c, 0xf1,
-+ 0x64, 0x8d, 0x67, 0x1d, 0x2a, 0xd7, 0xc9, 0xef, 0x11, 0x2c, 0x5d, 0x61, 0x3c, 0x79, 0xad, 0x79,
-+ 0x99, 0xf4, 0x7f, 0x1f, 0x8e, 0x58, 0xf2, 0x6b, 0xed, 0xdf, 0x9a, 0xb8, 0xcb, 0xbc, 0x12, 0xeb,
-+ 0xbf, 0x35, 0x68, 0xb3, 0x4f, 0xf4, 0x8c, 0x96, 0xbc, 0xc6, 0xdc, 0x86, 0xa2, 0xb5, 0x88, 0x2f,
-+ 0x4d, 0x5c, 0x60, 0xa6, 0x35, 0xd5, 0x66, 0x59, 0xeb, 0xa4, 0xa6, 0x34, 0x7d, 0x3d, 0x8d, 0xda,
-+ 0xfd, 0x36, 0x60, 0x39, 0x36, 0x4a, 0xb6, 0x76, 0xa5, 0x96, 0xd8, 0x6b, 0xd1, 0x7d, 0x26, 0x82,
-+ 0xf1, 0x09, 0x70, 0x02, 0xff, 0xa1, 0xb9, 0x99, 0xce, 0xc7, 0x47, 0x52, 0xff, 0x21, 0x95, 0x4b,
-+ 0xe4, 0x22, 0x64, 0xa8, 0xff, 0x10, 0xd7, 0x00, 0x02, 0x77, 0xd0, 0x65, 0x77, 0xa3, 0x81, 0xa5,
-+ 0x44, 0x2d, 0xcc, 0x33, 0xfa, 0xeb, 0x06, 0x1c, 0xb1, 0x25, 0x52, 0xee, 0x5e, 0x83, 0x39, 0x81,
-+ 0xec, 0x4d, 0x7b, 0xf4, 0x92, 0x84, 0x6a, 0xf6, 0x35, 0x44, 0x22, 0x66, 0x20, 0xc6, 0xe3, 0xe3,
-+ 0x50, 0xe0, 0xee, 0x4e, 0x9f, 0xdd, 0x8c, 0x73, 0x3e, 0x46, 0x88, 0x55, 0x31, 0x6b, 0xdd, 0xb5,
-+ 0x2e, 0x0a, 0x31, 0x02, 0x9f, 0x86, 0xa5, 0x58, 0xe6, 0xdb, 0x01, 0xeb, 0xf4, 0x3e, 0x91, 0x1e,
-+ 0x2e, 0xd1, 0x43, 0x78, 0xbc, 0x0a, 0x8b, 0x31, 0x6e, 0x5b, 0xb6, 0x5d, 0x47, 0x92, 0x4e, 0xa2,
-+ 0x85, 0x6d, 0xa4, 0xba, 0xef, 0x3d, 0x18, 0xb9, 0x7d, 0x59, 0xc8, 0x4a, 0xd4, 0xc2, 0x90, 0x3f,
-+ 0x20, 0x38, 0xa2, 0x5c, 0x2d, 0xa6, 0xec, 0x97, 0x31, 0xea, 0x7f, 0x85, 0x00, 0xdb, 0x1a, 0xe8,
-+ 0xd0, 0xfa, 0x7f, 0xfb, 0xf9, 0x44, 0xf4, 0xf5, 0xa2, 0x1c, 0x21, 0x15, 0x2a, 0x7e, 0x01, 0x21,
-+ 0xd1, 0x15, 0x50, 0xbe, 0x3b, 0xaa, 0x19, 0x55, 0x61, 0xcc, 0xed, 0x4f, 0x8c, 0xd6, 0x3b, 0x7b,
-+ 0x9c, 0x85, 0x7a, 0xc2, 0x94, 0xa3, 0xb5, 0x44, 0x50, 0xf5, 0x23, 0xce, 0x62, 0x03, 0x2e, 0xa3,
-+ 0xc6, 0x89, 0xcf, 0xd2, 0x28, 0x6a, 0x3e, 0xc8, 0x4f, 0xd2, 0x30, 0x7f, 0xd7, 0xef, 0x8f, 0xe2,
-+ 0x2e, 0xf1, 0x12, 0xd9, 0x39, 0x39, 0xfa, 0x66, 0xcd, 0xe8, 0x8b, 0xc1, 0x09, 0x39, 0x1b, 0xca,
-+ 0xc8, 0xca, 0x50, 0xf9, 0x8d, 0x09, 0x94, 0xb8, 0x1b, 0x74, 0x19, 0x57, 0x53, 0x4b, 0x25, 0x27,
-+ 0xef, 0x60, 0x09, 0x1c, 0xf9, 0x01, 0x2c, 0x18, 0x53, 0x68, 0x87, 0xbd, 0x09, 0x73, 0x1f, 0x4b,
-+ 0xcc, 0x94, 0x87, 0x24, 0x45, 0xaa, 0x8b, 0x94, 0x21, 0x4b, 0xbe, 0xcf, 0x1a, 0x89, 0xc8, 0x55,
-+ 0xc8, 0x29, 0x72, 0x7c, 0xdc, 0xbe, 0xa8, 0xab, 0x17, 0x0f, 0x01, 0xeb, 0x5b, 0x37, 0x81, 0x9c,
-+ 0x62, 0xa4, 0xdd, 0x2a, 0x3d, 0xaf, 0x30, 0x54, 0xff, 0x9e, 0x3e, 0x09, 0x85, 0xe8, 0x71, 0x15,
-+ 0x17, 0x61, 0xee, 0xf2, 0x2d, 0xfa, 0xfd, 0x4b, 0x74, 0x73, 0x29, 0x85, 0x4b, 0x90, 0x6f, 0x5e,
-+ 0xda, 0xb8, 0x26, 0x21, 0xb4, 0xfe, 0x2f, 0xc7, 0xd4, 0x8d, 0x00, 0x7f, 0x1b, 0xb2, 0xaa, 0x18,
-+ 0x1c, 0x8d, 0xe5, 0xb7, 0x9f, 0x48, 0xab, 0xc7, 0x0e, 0xe1, 0xf5, 0xd5, 0x3d, 0xf5, 0x26, 0xc2,
-+ 0x37, 0xa1, 0x28, 0x91, 0xfa, 0x19, 0xe6, 0xf8, 0xe4, 0x6b, 0x48, 0x82, 0xd3, 0x6b, 0xcf, 0x58,
-+ 0xb5, 0xf8, 0x5d, 0x80, 0xac, 0xb4, 0xb8, 0x2d, 0x8d, 0xfd, 0x8c, 0x66, 0x4b, 0x93, 0x78, 0x98,
-+ 0x22, 0x29, 0xfc, 0x0e, 0x38, 0x62, 0x66, 0xc0, 0x56, 0xcb, 0xb0, 0x5e, 0x4f, 0xaa, 0x47, 0x27,
-+ 0xd1, 0xd6, 0xb1, 0xef, 0x46, 0x8f, 0x40, 0xc7, 0x26, 0x87, 0x5d, 0xb3, 0xbd, 0x72, 0x78, 0x21,
-+ 0x3a, 0xf9, 0x96, 0x7a, 0x0d, 0x31, 0xd3, 0x0a, 0x7e, 0x2d, 0x79, 0xd4, 0xc4, 0x70, 0x53, 0xad,
-+ 0x3d, 0x6b, 0x39, 0x62, 0x78, 0x1d, 0x8a, 0xd6, 0xa4, 0x60, 0x9b, 0xf5, 0xf0, 0x98, 0x63, 0x9b,
-+ 0x75, 0xca, 0x78, 0x41, 0x52, 0xf8, 0x0a, 0xe4, 0x45, 0xa3, 0x15, 0xf5, 0x06, 0xbf, 0x3a, 0xd9,
-+ 0x4f, 0xad, 0x3a, 0x5a, 0x3d, 0x3e, 0x7d, 0x31, 0x62, 0x74, 0x19, 0x16, 0xa3, 0x8e, 0xad, 0x83,
-+ 0xf6, 0xd8, 0x64, 0xd4, 0x4f, 0xb1, 0x57, 0x32, 0x73, 0x48, 0x6a, 0xfd, 0x23, 0xc8, 0x9b, 0xe1,
-+ 0x18, 0x7f, 0x00, 0x0b, 0xc9, 0xd1, 0x10, 0xff, 0x9f, 0x65, 0x9e, 0xe4, 0xc4, 0x5d, 0x5d, 0xb1,
-+ 0x96, 0xa6, 0xcf, 0x93, 0xa9, 0x55, 0xb4, 0xfe, 0x91, 0xf9, 0xaf, 0xce, 0xa6, 0xcb, 0x5d, 0x7c,
-+ 0x0b, 0x16, 0xa4, 0xf6, 0xd1, 0xbf, 0x7d, 0x12, 0x51, 0x7a, 0xe8, 0x7f, 0x4c, 0x89, 0x28, 0x3d,
-+ 0xfc, 0xbf, 0x26, 0x92, 0x6a, 0x7e, 0xf8, 0xf8, 0x49, 0x2d, 0xf5, 0xf9, 0x93, 0x5a, 0xea, 0x8b,
-+ 0x27, 0x35, 0xf4, 0xe3, 0xfd, 0x1a, 0xfa, 0xf5, 0x7e, 0x0d, 0x3d, 0xda, 0xaf, 0xa1, 0xc7, 0xfb,
-+ 0x35, 0xf4, 0x8f, 0xfd, 0x1a, 0xfa, 0xe7, 0x7e, 0x2d, 0xf5, 0xc5, 0x7e, 0x0d, 0x7d, 0xf6, 0xb4,
-+ 0x96, 0x7a, 0xfc, 0xb4, 0x96, 0xfa, 0xfc, 0x69, 0x2d, 0xf5, 0xe1, 0xeb, 0x5f, 0xf6, 0xe8, 0x65,
-+ 0x4e, 0xdc, 0xc9, 0xc9, 0x9f, 0xb7, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x57, 0xa3, 0x10, 0x6c,
-+ 0x94, 0x1b, 0x00, 0x00,
- }
-
- func (x Direction) String() string {
-@@ -4009,6 +4018,14 @@ func (this *VolumeRequest) Equal(that interface{}) bool {
- if this.Step != that1.Step {
- return false
- }
-+ if len(this.TargetLabels) != len(that1.TargetLabels) {
-+ return false
-+ }
-+ for i := range this.TargetLabels {
-+ if this.TargetLabels[i] != that1.TargetLabels[i] {
-+ return false
-+ }
-+ }
- return true
- }
- func (this *VolumeResponse) Equal(that interface{}) bool {
-@@ -4607,13 +4624,14 @@ func (this *VolumeRequest) GoString() string {
- if this == nil {
- return ""nil""
- }
-- s := make([]string, 0, 9)
-+ s := make([]string, 0, 10)
- s = append(s, ""&logproto.VolumeRequest{"")
- s = append(s, ""From: ""+fmt.Sprintf(""%#v"", this.From)+"",\n"")
- s = append(s, ""Through: ""+fmt.Sprintf(""%#v"", this.Through)+"",\n"")
- s = append(s, ""Matchers: ""+fmt.Sprintf(""%#v"", this.Matchers)+"",\n"")
- s = append(s, ""Limit: ""+fmt.Sprintf(""%#v"", this.Limit)+"",\n"")
- s = append(s, ""Step: ""+fmt.Sprintf(""%#v"", this.Step)+"",\n"")
-+ s = append(s, ""TargetLabels: ""+fmt.Sprintf(""%#v"", this.TargetLabels)+"",\n"")
- s = append(s, ""}"")
- return strings.Join(s, """")
- }
-@@ -7117,6 +7135,15 @@ func (m *VolumeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- _ = i
- var l int
- _ = l
-+ if len(m.TargetLabels) > 0 {
-+ for iNdEx := len(m.TargetLabels) - 1; iNdEx >= 0; iNdEx-- {
-+ i -= len(m.TargetLabels[iNdEx])
-+ copy(dAtA[i:], m.TargetLabels[iNdEx])
-+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.TargetLabels[iNdEx])))
-+ i--
-+ dAtA[i] = 0x32
-+ }
-+ }
- if m.Step != 0 {
- i = encodeVarintLogproto(dAtA, i, uint64(m.Step))
- i--
-@@ -8047,6 +8074,12 @@ func (m *VolumeRequest) Size() (n int) {
- if m.Step != 0 {
- n += 1 + sovLogproto(uint64(m.Step))
- }
-+ if len(m.TargetLabels) > 0 {
-+ for _, s := range m.TargetLabels {
-+ l = len(s)
-+ n += 1 + l + sovLogproto(uint64(l))
-+ }
-+ }
- return n
- }
-
-@@ -8655,6 +8688,7 @@ func (this *VolumeRequest) String() string {
- `Matchers:` + fmt.Sprintf(""%v"", this.Matchers) + `,`,
- `Limit:` + fmt.Sprintf(""%v"", this.Limit) + `,`,
- `Step:` + fmt.Sprintf(""%v"", this.Step) + `,`,
-+ `TargetLabels:` + fmt.Sprintf(""%v"", this.TargetLabels) + `,`,
- `}`,
- }, """")
- return s
-@@ -14164,6 +14198,38 @@ func (m *VolumeRequest) Unmarshal(dAtA []byte) error {
- break
- }
- }
-+ case 6:
-+ if wireType != 2 {
-+ return fmt.Errorf(""proto: wrong wireType = %d for field TargetLabels"", wireType)
-+ }
-+ var stringLen uint64
-+ for shift := uint(0); ; shift += 7 {
-+ if shift >= 64 {
-+ return ErrIntOverflowLogproto
-+ }
-+ if iNdEx >= l {
-+ return io.ErrUnexpectedEOF
-+ }
-+ b := dAtA[iNdEx]
-+ iNdEx++
-+ stringLen |= uint64(b&0x7F) << shift
-+ if b < 0x80 {
-+ break
-+ }
-+ }
-+ intStringLen := int(stringLen)
-+ if intStringLen < 0 {
-+ return ErrInvalidLengthLogproto
-+ }
-+ postIndex := iNdEx + intStringLen
-+ if postIndex < 0 {
-+ return ErrInvalidLengthLogproto
-+ }
-+ if postIndex > l {
-+ return io.ErrUnexpectedEOF
-+ }
-+ m.TargetLabels = append(m.TargetLabels, string(dAtA[iNdEx:postIndex]))
-+ iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLogproto(dAtA[iNdEx:])
-diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto
-index 614444d873507..fc701cba8ee2c 100644
---- a/pkg/logproto/logproto.proto
-+++ b/pkg/logproto/logproto.proto
-@@ -393,6 +393,7 @@ message VolumeRequest {
- string matchers = 3;
- int32 limit = 4;
- int64 step = 5;
-+ repeated string targetLabels = 6;
- }
-
- message VolumeResponse {
-diff --git a/pkg/querier/http.go b/pkg/querier/http.go
-index de1b79928dedf..eae7f7eb3dd26 100644
---- a/pkg/querier/http.go
-+++ b/pkg/querier/http.go
-@@ -445,11 +445,12 @@ func (q *QuerierAPI) SeriesVolumeRangeHandler(w http.ResponseWriter, r *http.Req
- }
-
- req := &logproto.VolumeRequest{
-- From: model.TimeFromUnixNano(rawReq.Start.UnixNano()),
-- Through: model.TimeFromUnixNano(rawReq.End.UnixNano()),
-- Matchers: rawReq.Query,
-- Step: rawReq.Step.Milliseconds(),
-- Limit: int32(rawReq.Limit),
-+ From: model.TimeFromUnixNano(rawReq.Start.UnixNano()),
-+ Through: model.TimeFromUnixNano(rawReq.End.UnixNano()),
-+ Matchers: rawReq.Query,
-+ Step: rawReq.Step.Milliseconds(),
-+ Limit: int32(rawReq.Limit),
-+ TargetLabels: rawReq.TargetLabels,
- }
-
- q.seriesVolumeHandler(r.Context(), r, req, w)
-@@ -466,11 +467,12 @@ func (q *QuerierAPI) SeriesVolumeInstantHandler(w http.ResponseWriter, r *http.R
- }
-
- req := &logproto.VolumeRequest{
-- From: model.TimeFromUnixNano(rawReq.Start.UnixNano()),
-- Through: model.TimeFromUnixNano(rawReq.End.UnixNano()),
-- Matchers: rawReq.Query,
-- Step: 0,
-- Limit: int32(rawReq.Limit),
-+ From: model.TimeFromUnixNano(rawReq.Start.UnixNano()),
-+ Through: model.TimeFromUnixNano(rawReq.End.UnixNano()),
-+ Matchers: rawReq.Query,
-+ Step: 0,
-+ Limit: int32(rawReq.Limit),
-+ TargetLabels: rawReq.TargetLabels,
- }
-
- q.seriesVolumeHandler(r.Context(), r, req, w)
-diff --git a/pkg/querier/http_test.go b/pkg/querier/http_test.go
-index d4151ff3a890b..64745d4611696 100644
---- a/pkg/querier/http_test.go
-+++ b/pkg/querier/http_test.go
-@@ -524,7 +524,7 @@ func TestResponseFormat(t *testing.T) {
- logproto.Stream{
- Entries: []logproto.Entry{
- {
-- Timestamp: time.Unix(0, 123456789012345),
-+ Timestamp: time.Unix(0, 123456789012345).UTC(),
- Line: ""super line"",
- },
- },
-@@ -558,7 +558,7 @@ func TestResponseFormat(t *testing.T) {
- logproto.Stream{
- Entries: []logproto.Entry{
- {
-- Timestamp: time.Unix(0, 123456789012345),
-+ Timestamp: time.Unix(0, 123456789012345).UTC(),
- Line: ""super line"",
- },
- },
-diff --git a/pkg/querier/ingester_querier.go b/pkg/querier/ingester_querier.go
-index f448d13a6dfb8..0c00a5b35e9e5 100644
---- a/pkg/querier/ingester_querier.go
-+++ b/pkg/querier/ingester_querier.go
-@@ -319,7 +319,7 @@ func (q *IngesterQuerier) Stats(ctx context.Context, _ string, from, through mod
- return &merged, nil
- }
-
--func (q *IngesterQuerier) SeriesVolume(ctx context.Context, _ string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+func (q *IngesterQuerier) SeriesVolume(ctx context.Context, _ string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
- matcherString := ""{}""
- if len(matchers) > 0 {
- matcherString = syntax.MatchersString(matchers)
-@@ -327,10 +327,11 @@ func (q *IngesterQuerier) SeriesVolume(ctx context.Context, _ string, from, thro
-
- resps, err := q.forAllIngesters(ctx, func(ctx context.Context, querierClient logproto.QuerierClient) (interface{}, error) {
- return querierClient.GetSeriesVolume(ctx, &logproto.VolumeRequest{
-- From: from,
-- Through: through,
-- Matchers: matcherString,
-- Limit: limit,
-+ From: from,
-+ Through: through,
-+ Matchers: matcherString,
-+ Limit: limit,
-+ TargetLabels: targetLabels,
- })
- })
-
-diff --git a/pkg/querier/ingester_querier_test.go b/pkg/querier/ingester_querier_test.go
-index 6426bfee8487d..d75c652132920 100644
---- a/pkg/querier/ingester_querier_test.go
-+++ b/pkg/querier/ingester_querier_test.go
-@@ -365,7 +365,7 @@ func TestIngesterQuerier_SeriesVolume(t *testing.T) {
- )
- require.NoError(t, err)
-
-- volumes, err := ingesterQuerier.SeriesVolume(context.Background(), """", 0, 1, 10)
-+ volumes, err := ingesterQuerier.SeriesVolume(context.Background(), """", 0, 1, 10, nil)
- require.NoError(t, err)
-
- require.Equal(t, []logproto.Volume{
-@@ -385,7 +385,7 @@ func TestIngesterQuerier_SeriesVolume(t *testing.T) {
- )
- require.NoError(t, err)
-
-- volumes, err := ingesterQuerier.SeriesVolume(context.Background(), """", 0, 1, 10)
-+ volumes, err := ingesterQuerier.SeriesVolume(context.Background(), """", 0, 1, 10, nil)
- require.NoError(t, err)
-
- require.Equal(t, []logproto.Volume(nil), volumes.Volumes)
-diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go
-index cbbaf5294ee88..e716ad7e3d51c 100644
---- a/pkg/querier/querier.go
-+++ b/pkg/querier/querier.go
-@@ -800,6 +800,7 @@ func (q *SingleTenantQuerier) SeriesVolume(ctx context.Context, req *logproto.Vo
- ""through"", req.Through.Time(),
- ""matchers"", syntax.MatchersString(matchers),
- ""limit"", req.Limit,
-+ ""targetLabels"", req.TargetLabels,
- )
-
- ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(req.From.Time(), req.Through.Time())
-@@ -826,6 +827,7 @@ func (q *SingleTenantQuerier) SeriesVolume(ctx context.Context, req *logproto.Vo
- model.TimeFromUnix(ingesterQueryInterval.start.Unix()),
- model.TimeFromUnix(ingesterQueryInterval.end.Unix()),
- req.Limit,
-+ req.TargetLabels,
- matchers...,
- )
- if err != nil {
-@@ -842,6 +844,7 @@ func (q *SingleTenantQuerier) SeriesVolume(ctx context.Context, req *logproto.Vo
- model.TimeFromUnix(storeQueryInterval.start.Unix()),
- model.TimeFromUnix(storeQueryInterval.end.Unix()),
- req.Limit,
-+ req.TargetLabels,
- matchers...,
- )
- if err != nil {
-diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go
-index c12f4e3af9b31..a1b17f8334a28 100644
---- a/pkg/querier/querier_mock_test.go
-+++ b/pkg/querier/querier_mock_test.go
-@@ -367,8 +367,8 @@ func (s *storeMock) Stats(_ context.Context, _ string, _, _ model.Time, _ ...*la
- return nil, nil
- }
-
--func (s *storeMock) SeriesVolume(ctx context.Context, userID string, from, through model.Time, _ int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-- args := s.Called(ctx, userID, from, through, matchers)
-+func (s *storeMock) SeriesVolume(ctx context.Context, userID string, from, through model.Time, _ int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+ args := s.Called(ctx, userID, from, through, targetLabels, matchers)
- return args.Get(0).(*logproto.VolumeResponse), args.Error(1)
- }
-
-diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go
-index 2bb842e8df786..c3fb27d9bed7f 100644
---- a/pkg/querier/querier_test.go
-+++ b/pkg/querier/querier_test.go
-@@ -977,7 +977,7 @@ func TestQuerier_SeriesVolumes(t *testing.T) {
-
- ingesterClient := newQuerierClientMock()
- store := newStoreMock()
-- store.On(""SeriesVolume"", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(ret, nil)
-+ store.On(""SeriesVolume"", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(ret, nil)
-
- conf := mockQuerierConfig()
- conf.QueryIngestersWithin = time.Minute * 30
-@@ -1050,7 +1050,7 @@ func TestQuerier_SeriesVolumes(t *testing.T) {
- ingesterClient.On(""GetSeriesVolume"", mock.Anything, mock.Anything, mock.Anything).Return(ret, nil)
-
- store := newStoreMock()
-- store.On(""SeriesVolume"", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(ret, nil)
-+ store.On(""SeriesVolume"", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(ret, nil)
-
- conf := mockQuerierConfig()
- conf.QueryIngestersWithin = time.Minute * 30
-diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go
-index a04514b45afb9..4ba2e36d961ac 100644
---- a/pkg/querier/queryrange/codec.go
-+++ b/pkg/querier/queryrange/codec.go
-@@ -282,11 +282,12 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer
- }
- from, through := util.RoundToMilliseconds(req.Start, req.End)
- return &logproto.VolumeRequest{
-- From: from,
-- Through: through,
-- Matchers: req.Query,
-- Limit: int32(req.Limit),
-- Step: 0,
-+ From: from,
-+ Through: through,
-+ Matchers: req.Query,
-+ Limit: int32(req.Limit),
-+ Step: 0,
-+ TargetLabels: req.TargetLabels,
- }, err
- case SeriesVolumeRangeOp:
- req, err := loghttp.ParseSeriesVolumeRangeQuery(r)
-@@ -295,11 +296,12 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer
- }
- from, through := util.RoundToMilliseconds(req.Start, req.End)
- return &logproto.VolumeRequest{
-- From: from,
-- Through: through,
-- Matchers: req.Query,
-- Limit: int32(req.Limit),
-- Step: req.Step.Milliseconds(),
-+ From: from,
-+ Through: through,
-+ Matchers: req.Query,
-+ Limit: int32(req.Limit),
-+ Step: req.Step.Milliseconds(),
-+ TargetLabels: req.TargetLabels,
- }, err
- default:
- return nil, httpgrpc.Errorf(http.StatusBadRequest, fmt.Sprintf(""unknown request path: %s"", r.URL.Path))
-@@ -440,6 +442,10 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht
- ""limit"": []string{fmt.Sprintf(""%d"", request.Limit)},
- }
-
-+ if len(request.TargetLabels) > 0 {
-+ params[""targetLabels""] = []string{strings.Join(request.TargetLabels, "","")}
-+ }
-+
- var u *url.URL
- if request.Step != 0 {
- params[""step""] = []string{fmt.Sprintf(""%f"", float64(request.Step)/float64(1e3))}
-diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go
-index 18499b75427ca..cf013dfeac01b 100644
---- a/pkg/querier/queryrange/codec_test.go
-+++ b/pkg/querier/queryrange/codec_test.go
-@@ -36,7 +36,7 @@ var (
- end = start.Add(1 * time.Hour)
- )
-
--func Test_codec_DecodeRequest(t *testing.T) {
-+func Test_codec_EncodeDecodeRequest(t *testing.T) {
- tests := []struct {
- name string
- reqBuilder func() (*http.Request, error)
-@@ -108,18 +108,20 @@ func Test_codec_DecodeRequest(t *testing.T) {
- }, false},
- {""series_volume"", func() (*http.Request, error) {
- return DefaultCodec.EncodeRequest(context.Background(), &logproto.VolumeRequest{
-- From: model.TimeFromUnixNano(start.UnixNano()),
-- Through: model.TimeFromUnixNano(end.UnixNano()),
-- Matchers: `{job=""foo""}`,
-- Limit: 3,
-- Step: 0,
-+ From: model.TimeFromUnixNano(start.UnixNano()),
-+ Through: model.TimeFromUnixNano(end.UnixNano()),
-+ Matchers: `{job=""foo""}`,
-+ Limit: 3,
-+ Step: 0,
-+ TargetLabels: []string{""job""},
- })
- }, &logproto.VolumeRequest{
-- From: model.TimeFromUnixNano(start.UnixNano()),
-- Through: model.TimeFromUnixNano(end.UnixNano()),
-- Matchers: `{job=""foo""}`,
-- Limit: 3,
-- Step: 0,
-+ From: model.TimeFromUnixNano(start.UnixNano()),
-+ Through: model.TimeFromUnixNano(end.UnixNano()),
-+ Matchers: `{job=""foo""}`,
-+ Limit: 3,
-+ Step: 0,
-+ TargetLabels: []string{""job""},
- }, false},
- {""series_volume_default_limit"", func() (*http.Request, error) {
- return DefaultCodec.EncodeRequest(context.Background(), &logproto.VolumeRequest{
-@@ -136,18 +138,20 @@ func Test_codec_DecodeRequest(t *testing.T) {
- }, false},
- {""series_volume_range"", func() (*http.Request, error) {
- return DefaultCodec.EncodeRequest(context.Background(), &logproto.VolumeRequest{
-- From: model.TimeFromUnixNano(start.UnixNano()),
-- Through: model.TimeFromUnixNano(end.UnixNano()),
-- Matchers: `{job=""foo""}`,
-- Limit: 3,
-- Step: 30 * 1e3,
-+ From: model.TimeFromUnixNano(start.UnixNano()),
-+ Through: model.TimeFromUnixNano(end.UnixNano()),
-+ Matchers: `{job=""foo""}`,
-+ Limit: 3,
-+ Step: 30 * 1e3,
-+ TargetLabels: []string{""fizz"", ""buzz""},
- })
- }, &logproto.VolumeRequest{
-- From: model.TimeFromUnixNano(start.UnixNano()),
-- Through: model.TimeFromUnixNano(end.UnixNano()),
-- Matchers: `{job=""foo""}`,
-- Limit: 3,
-- Step: 30 * 1e3, // step is expected in ms
-+ From: model.TimeFromUnixNano(start.UnixNano()),
-+ Through: model.TimeFromUnixNano(end.UnixNano()),
-+ Matchers: `{job=""foo""}`,
-+ Limit: 3,
-+ Step: 30 * 1e3, // step is expected in ms
-+ TargetLabels: []string{""fizz"", ""buzz""},
- }, false},
- {""series_volume_range_default_limit"", func() (*http.Request, error) {
- return DefaultCodec.EncodeRequest(context.Background(), &logproto.VolumeRequest{
-@@ -175,7 +179,7 @@ func Test_codec_DecodeRequest(t *testing.T) {
- t.Errorf(""codec.DecodeRequest() error = %v, wantErr %v"", err, tt.wantErr)
- return
- }
-- require.Equal(t, got, tt.want)
-+ require.Equal(t, tt.want, got)
- })
- }
- }
-@@ -698,11 +702,12 @@ func Test_codec_index_stats_EncodeRequest(t *testing.T) {
- func Test_codec_seriesVolume_EncodeRequest(t *testing.T) {
- from, through := util.RoundToMilliseconds(start, end)
- toEncode := &logproto.VolumeRequest{
-- From: from,
-- Through: through,
-- Matchers: `{job=""foo""}`,
-- Limit: 20,
-- Step: 30 * 1e6,
-+ From: from,
-+ Through: through,
-+ Matchers: `{job=""foo""}`,
-+ Limit: 20,
-+ Step: 30 * 1e6,
-+ TargetLabels: []string{""foo"", ""bar""},
- }
- got, err := DefaultCodec.EncodeRequest(context.Background(), toEncode)
- require.Nil(t, err)
-@@ -711,6 +716,7 @@ func Test_codec_seriesVolume_EncodeRequest(t *testing.T) {
- require.Equal(t, `{job=""foo""}`, got.URL.Query().Get(""query""))
- require.Equal(t, ""20"", got.URL.Query().Get(""limit""))
- require.Equal(t, fmt.Sprintf(""%f"", float64(toEncode.Step/1e3)), got.URL.Query().Get(""step""))
-+ require.Equal(t, `foo,bar`, got.URL.Query().Get(""targetLabels""))
- }
-
- func Test_codec_EncodeResponse(t *testing.T) {
-diff --git a/pkg/querier/queryrange/queryrange.pb.go b/pkg/querier/queryrange/queryrange.pb.go
-index 152633ef47262..09fd34933329a 100644
---- a/pkg/querier/queryrange/queryrange.pb.go
-+++ b/pkg/querier/queryrange/queryrange.pb.go
-@@ -17,6 +17,7 @@ import (
- queryrangebase ""github.com/grafana/loki/pkg/querier/queryrange/queryrangebase""
- _ ""github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions""
- github_com_grafana_loki_pkg_querier_queryrange_queryrangebase_definitions ""github.com/grafana/loki/pkg/querier/queryrange/queryrangebase/definitions""
-+ github_com_prometheus_common_model ""github.com/prometheus/common/model""
- io ""io""
- math ""math""
- math_bits ""math/bits""
-@@ -739,9 +740,12 @@ func (m *IndexStatsResponse) XXX_DiscardUnknown() {
- var xxx_messageInfo_IndexStatsResponse proto.InternalMessageInfo
-
- type VolumeRequest struct {
-- Match []string `protobuf:""bytes,1,rep,name=match,proto3"" json:""match,omitempty""`
-- StartTs time.Time `protobuf:""bytes,2,opt,name=startTs,proto3,stdtime"" json:""startTs""`
-- EndTs time.Time `protobuf:""bytes,3,opt,name=endTs,proto3,stdtime"" json:""endTs""`
-+ From github_com_prometheus_common_model.Time `protobuf:""varint,1,opt,name=from,proto3,customtype=github.com/prometheus/common/model.Time"" json:""from""`
-+ Through github_com_prometheus_common_model.Time `protobuf:""varint,2,opt,name=through,proto3,customtype=github.com/prometheus/common/model.Time"" json:""through""`
-+ Matchers string `protobuf:""bytes,3,opt,name=matchers,proto3"" json:""matchers,omitempty""`
-+ Limit int32 `protobuf:""varint,4,opt,name=limit,proto3"" json:""limit,omitempty""`
-+ Step int64 `protobuf:""varint,5,opt,name=step,proto3"" json:""step,omitempty""`
-+ TargetLabels []string `protobuf:""bytes,6,rep,name=targetLabels,proto3"" json:""targetLabels,omitempty""`
- }
-
- func (m *VolumeRequest) Reset() { *m = VolumeRequest{} }
-@@ -776,25 +780,32 @@ func (m *VolumeRequest) XXX_DiscardUnknown() {
-
- var xxx_messageInfo_VolumeRequest proto.InternalMessageInfo
-
--func (m *VolumeRequest) GetMatch() []string {
-+func (m *VolumeRequest) GetMatchers() string {
- if m != nil {
-- return m.Match
-+ return m.Matchers
- }
-- return nil
-+ return """"
- }
-
--func (m *VolumeRequest) GetStartTs() time.Time {
-+func (m *VolumeRequest) GetLimit() int32 {
- if m != nil {
-- return m.StartTs
-+ return m.Limit
- }
-- return time.Time{}
-+ return 0
- }
-
--func (m *VolumeRequest) GetEndTs() time.Time {
-+func (m *VolumeRequest) GetStep() int64 {
- if m != nil {
-- return m.EndTs
-+ return m.Step
- }
-- return time.Time{}
-+ return 0
-+}
-+
-+func (m *VolumeRequest) GetTargetLabels() []string {
-+ if m != nil {
-+ return m.TargetLabels
-+ }
-+ return nil
- }
-
- type VolumeResponse struct {
-@@ -992,80 +1003,85 @@ func init() {
- }
-
- var fileDescriptor_51b9d53b40d11902 = []byte{
-- // 1161 bytes of a gzipped FileDescriptorProto
-- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4d, 0x6f, 0x23, 0x35,
-- 0x18, 0x8e, 0xf3, 0xd5, 0xc6, 0xa5, 0x05, 0xdc, 0xb2, 0x3b, 0x2a, 0xab, 0x99, 0x28, 0x12, 0x6c,
-- 0x90, 0x60, 0x22, 0xda, 0xb2, 0xcb, 0x97, 0x10, 0x3b, 0x14, 0xd4, 0x4a, 0x2b, 0x04, 0xb3, 0x15,
-- 0x77, 0xa7, 0x71, 0x93, 0xa1, 0xf3, 0xd5, 0xb1, 0x53, 0xd1, 0x1b, 0x3f, 0x00, 0xa4, 0xfd, 0x0b,
-- 0x5c, 0x10, 0x12, 0x88, 0x1f, 0x80, 0xc4, 0xbd, 0xc7, 0x1e, 0x57, 0x95, 0x18, 0x68, 0x7a, 0x81,
-- 0x72, 0xe9, 0x4f, 0x40, 0xfe, 0x98, 0xc4, 0x93, 0x7e, 0x6c, 0xd3, 0xbd, 0x14, 0x69, 0x2f, 0x89,
-- 0xed, 0x79, 0x1e, 0xdb, 0xef, 0xf3, 0x3e, 0xaf, 0x3d, 0x03, 0xef, 0xc6, 0xdb, 0xdd, 0xd6, 0x4e,
-- 0x9f, 0x24, 0x1e, 0x49, 0xc4, 0xff, 0x5e, 0x82, 0xc3, 0x2e, 0xd1, 0x9a, 0x76, 0x9c, 0x44, 0x2c,
-- 0x42, 0x70, 0x34, 0xb2, 0xb8, 0xd0, 0x8d, 0xba, 0x91, 0x18, 0x6e, 0xf1, 0x96, 0x44, 0x2c, 0x5a,
-- 0xdd, 0x28, 0xea, 0xfa, 0xa4, 0x25, 0x7a, 0xed, 0xfe, 0x56, 0x8b, 0x79, 0x01, 0xa1, 0x0c, 0x07,
-- 0xb1, 0x02, 0xbc, 0xca, 0xd7, 0xf2, 0xa3, 0xae, 0x64, 0x66, 0x0d, 0xf5, 0xb0, 0xae, 0x1e, 0xee,
-- 0xf8, 0x41, 0xd4, 0x21, 0x7e, 0x8b, 0x32, 0xcc, 0xa8, 0xfc, 0x55, 0x88, 0x79, 0x8e, 0x88, 0xfb,
-- 0xb4, 0x27, 0x7e, 0xd4, 0xe0, 0x27, 0x4f, 0xdd, 0x7f, 0x1b, 0x53, 0xd2, 0xea, 0x90, 0x2d, 0x2f,
-- 0xf4, 0x98, 0x17, 0x85, 0x54, 0x6f, 0xab, 0x49, 0xee, 0x5d, 0x6d, 0x92, 0x71, 0x4d, 0x1a, 0x07,
-- 0x45, 0x38, 0xf3, 0x30, 0xda, 0xf6, 0x5c, 0xb2, 0xd3, 0x27, 0x94, 0xa1, 0x05, 0x58, 0x11, 0x18,
-- 0x03, 0xd4, 0x41, 0xb3, 0xe6, 0xca, 0x0e, 0x1f, 0xf5, 0xbd, 0xc0, 0x63, 0x46, 0xb1, 0x0e, 0x9a,
-- 0xb3, 0xae, 0xec, 0x20, 0x04, 0xcb, 0x94, 0x91, 0xd8, 0x28, 0xd5, 0x41, 0xb3, 0xe4, 0x8a, 0x36,
-- 0x5a, 0x84, 0xd3, 0x5e, 0xc8, 0x48, 0xb2, 0x8b, 0x7d, 0xa3, 0x26, 0xc6, 0x87, 0x7d, 0xf4, 0x11,
-- 0x9c, 0xa2, 0x0c, 0x27, 0x6c, 0x83, 0x1a, 0xe5, 0x3a, 0x68, 0xce, 0x2c, 0x2d, 0xda, 0x52, 0x6f,
-- 0x3b, 0xd3, 0xdb, 0xde, 0xc8, 0xf4, 0x76, 0xa6, 0xf7, 0x53, 0xab, 0xf0, 0xf8, 0x4f, 0x0b, 0xb8,
-- 0x19, 0x09, 0xbd, 0x0f, 0x2b, 0x24, 0xec, 0x6c, 0x50, 0xa3, 0x32, 0x01, 0x5b, 0x52, 0xd0, 0xdb,
-- 0xb0, 0xd6, 0xf1, 0x12, 0xb2, 0xc9, 0x35, 0x33, 0xaa, 0x75, 0xd0, 0x9c, 0x5b, 0x9a, 0xb7, 0x87,
-- 0xf9, 0x5b, 0xcd, 0x1e, 0xb9, 0x23, 0x14, 0x0f, 0x2f, 0xc6, 0xac, 0x67, 0x4c, 0x09, 0x25, 0x44,
-- 0x1b, 0x35, 0x60, 0x95, 0xf6, 0x70, 0xd2, 0xa1, 0xc6, 0x74, 0xbd, 0xd4, 0xac, 0x39, 0xf0, 0x24,
-- 0xb5, 0xd4, 0x88, 0xab, 0xfe, 0x1b, 0xff, 0x00, 0x88, 0xb8, 0xa4, 0xeb, 0x21, 0x65, 0x38, 0x64,
-- 0xd7, 0x51, 0xf6, 0x43, 0x58, 0xe5, 0xce, 0xdb, 0xa0, 0x42, 0xdb, 0xab, 0x86, 0xaa, 0x38, 0xf9,
-- 0x58, 0xcb, 0x13, 0xc5, 0x5a, 0x39, 0x37, 0xd6, 0xea, 0x85, 0xb1, 0xfe, 0x5c, 0x86, 0x2f, 0x48,
-- 0xfb, 0xd0, 0x38, 0x0a, 0x29, 0xe1, 0xa4, 0x47, 0x0c, 0xb3, 0x3e, 0x95, 0x61, 0x2a, 0x92, 0x18,
-- 0x71, 0xd5, 0x13, 0xf4, 0x31, 0x2c, 0xaf, 0x62, 0x86, 0x45, 0xc8, 0x33, 0x4b, 0x0b, 0xb6, 0x66,
-- 0x4a, 0x3e, 0x17, 0x7f, 0xe6, 0xdc, 0xe2, 0x51, 0x9d, 0xa4, 0xd6, 0x5c, 0x07, 0x33, 0xfc, 0x66,
-- 0x14, 0x78, 0x8c, 0x04, 0x31, 0xdb, 0x73, 0x05, 0x13, 0xbd, 0x03, 0x6b, 0x9f, 0x26, 0x49, 0x94,
-- 0x6c, 0xec, 0xc5, 0x44, 0x48, 0x54, 0x73, 0x6e, 0x9f, 0xa4, 0xd6, 0x3c, 0xc9, 0x06, 0x35, 0xc6,
-- 0x08, 0x89, 0xde, 0x80, 0x15, 0xd1, 0x11, 0xa2, 0xd4, 0x9c, 0xf9, 0x93, 0xd4, 0x7a, 0x51, 0x50,
-- 0x34, 0xb8, 0x44, 0xe4, 0x35, 0xac, 0x5c, 0x49, 0xc3, 0x61, 0x2a, 0xab, 0x7a, 0x2a, 0x0d, 0x38,
-- 0xb5, 0x4b, 0x12, 0xca, 0xa7, 0x99, 0x12, 0xe3, 0x59, 0x17, 0x3d, 0x80, 0x90, 0x0b, 0xe3, 0x51,
-- 0xe6, 0x6d, 0x72, 0x3f, 0x71, 0x31, 0x66, 0x6d, 0x79, 0x5c, 0xb8, 0x84, 0xf6, 0x7d, 0xe6, 0x20,
-- 0xa5, 0x82, 0x06, 0x74, 0xb5, 0x36, 0xfa, 0x05, 0xc0, 0xa9, 0x35, 0x82, 0x3b, 0x24, 0xa1, 0x46,
-- 0xad, 0x5e, 0x6a, 0xce, 0x2c, 0xbd, 0x66, 0xeb, 0x67, 0xc3, 0x17, 0x49, 0x14, 0x10, 0xd6, 0x23,
-- 0x7d, 0x9a, 0x25, 0x48, 0xa2, 0x9d, 0xed, 0xc3, 0xd4, 0x6a, 0x77, 0x3d, 0xd6, 0xeb, 0xb7, 0xed,
-- 0xcd, 0x28, 0x68, 0x75, 0x13, 0xbc, 0x85, 0x43, 0xdc, 0xf2, 0xa3, 0x6d, 0xaf, 0x35, 0xf1, 0x79,
-- 0x74, 0xe1, 0x3a, 0x27, 0xa9, 0x05, 0xde, 0x72, 0xb3, 0x2d, 0x36, 0xfe, 0x00, 0xf0, 0x65, 0x9e,
-- 0xe1, 0x47, 0x7c, 0x6e, 0xaa, 0x15, 0x46, 0x80, 0xd9, 0x66, 0xcf, 0x00, 0xdc, 0x66, 0xae, 0xec,
-- 0xe8, 0x87, 0x45, 0xf1, 0x99, 0x0e, 0x8b, 0xd2, 0xe4, 0x87, 0x45, 0x56, 0x0d, 0xe5, 0x73, 0xab,
-- 0xa1, 0x72, 0x61, 0x35, 0x7c, 0x57, 0x92, 0x95, 0x9f, 0xc5, 0x37, 0x41, 0x4d, 0x7c, 0x36, 0xac,
-- 0x89, 0x92, 0xd8, 0xed, 0xd0, 0x6a, 0x72, 0xae, 0xf5, 0x0e, 0x09, 0x99, 0xb7, 0xe5, 0x91, 0xe4,
-- 0x29, 0x95, 0xa1, 0xd9, 0xad, 0x94, 0xb7, 0x9b, 0xee, 0x95, 0xf2, 0x8d, 0xf7, 0xca, 0x58, 0x75,
-- 0x54, 0xae, 0x51, 0x1d, 0x8d, 0xdf, 0x01, 0x7c, 0x85, 0xa7, 0xe3, 0x21, 0x6e, 0x13, 0xff, 0x73,
-- 0x1c, 0x8c, 0x2c, 0xa7, 0x99, 0x0b, 0x3c, 0x93, 0xb9, 0x8a, 0xd7, 0x37, 0x57, 0x49, 0x33, 0xd7,
-- 0xf0, 0x6e, 0x28, 0x6b, 0x77, 0x43, 0xe3, 0xb4, 0x08, 0x6f, 0x8d, 0xef, 0x7f, 0x02, 0x4b, 0xbd,
-- 0xae, 0x59, 0xaa, 0xe6, 0xa0, 0xe7, 0x96, 0xb9, 0x82, 0x65, 0x7e, 0x04, 0x70, 0x3a, 0xbb, 0x83,
-- 0x90, 0x0d, 0xa1, 0xa4, 0x89, 0x6b, 0x46, 0x0a, 0x3d, 0xc7, 0xc9, 0xc9, 0x70, 0xd4, 0xd5, 0x10,
-- 0xe8, 0x6b, 0x58, 0x95, 0x3d, 0x55, 0xc5, 0xb7, 0xb5, 0x2a, 0x66, 0x09, 0xc1, 0xc1, 0x83, 0x0e,
-- 0x8e, 0x19, 0x49, 0x9c, 0xf7, 0xf8, 0x2e, 0x0e, 0x53, 0xeb, 0xee, 0x65, 0x12, 0x89, 0x37, 0x44,
-- 0xc9, 0xe3, 0xc9, 0x95, 0x6b, 0xba, 0x6a, 0x85, 0xc6, 0xf7, 0x00, 0xbe, 0xc4, 0x37, 0xca, 0xa5,
-- 0x19, 0xba, 0x62, 0x15, 0x4e, 0x27, 0xaa, 0xad, 0x7c, 0xdd, 0xb0, 0xf3, 0xb2, 0x9e, 0x23, 0xa5,
-- 0x53, 0xde, 0x4f, 0x2d, 0xe0, 0x0e, 0x99, 0x68, 0x39, 0x27, 0x63, 0xf1, 0x3c, 0x19, 0x39, 0xa5,
-- 0x90, 0x13, 0xee, 0xb7, 0x22, 0x44, 0xeb, 0x61, 0x87, 0x7c, 0xc3, 0xcd, 0x37, 0xf2, 0x69, 0xff,
-- 0xcc, 0x8e, 0xee, 0x8c, 0x44, 0x39, 0x8b, 0x77, 0x3e, 0x38, 0x4c, 0xad, 0xfb, 0x97, 0xa9, 0x72,
-- 0x09, 0x59, 0x0b, 0x41, 0x37, 0x6e, 0xf1, 0xe6, 0xdf, 0x8b, 0x3f, 0x00, 0x38, 0xfb, 0x55, 0xe4,
-- 0xf7, 0x03, 0x72, 0x63, 0xef, 0xc4, 0xc6, 0xaf, 0x45, 0x38, 0x97, 0xed, 0x51, 0xa9, 0x1c, 0x9c,
-- 0x49, 0xae, 0x31, 0x4a, 0x6e, 0x1e, 0xeb, 0xdc, 0x3f, 0x4c, 0xad, 0xe5, 0x2b, 0x25, 0x36, 0x4f,
-- 0xfc, 0xff, 0x26, 0xf5, 0xdf, 0x22, 0x9c, 0xfd, 0x92, 0xcf, 0x32, 0xd4, 0xeb, 0x5d, 0x58, 0xa5,
-- 0xe2, 0x36, 0x57, 0x6a, 0x99, 0xe3, 0x6f, 0xbe, 0xf9, 0xf7, 0x86, 0xb5, 0x82, 0xab, 0xf0, 0xfc,
-- 0x7b, 0xc0, 0xe7, 0x97, 0x40, 0x96, 0xf7, 0xc6, 0x38, 0xf3, 0xec, 0x15, 0xc1, 0xd9, 0x92, 0x83,
-- 0xee, 0xc1, 0x8a, 0xa8, 0x5e, 0x95, 0xf6, 0xdc, 0xb2, 0x67, 0xcb, 0x68, 0xad, 0xe0, 0x4a, 0x38,
-- 0x5a, 0x82, 0xe5, 0x38, 0x89, 0x02, 0xf5, 0xb1, 0x76, 0x67, 0x7c, 0x4d, 0xfd, 0xe8, 0x59, 0x2b,
-- 0xb8, 0x02, 0x8b, 0x56, 0xb8, 0x45, 0xf9, 0x99, 0x95, 0x1d, 0xc0, 0xc6, 0x38, 0x4d, 0xa3, 0x64,
-- 0x50, 0xb4, 0x02, 0xab, 0xbb, 0x22, 0xed, 0xe2, 0xdd, 0x99, 0x3b, 0x53, 0x23, 0xe5, 0x0d, 0xc1,
-- 0xe3, 0x92, 0x58, 0x07, 0x8e, 0xfc, 0xe7, 0xac, 0x1c, 0x1c, 0x99, 0x85, 0x27, 0x47, 0x66, 0xe1,
-- 0xf4, 0xc8, 0x04, 0xdf, 0x0e, 0x4c, 0xf0, 0xd3, 0xc0, 0x04, 0xfb, 0x03, 0x13, 0x1c, 0x0c, 0x4c,
-- 0xf0, 0xd7, 0xc0, 0x04, 0x7f, 0x0f, 0xcc, 0xc2, 0xe9, 0xc0, 0x04, 0x8f, 0x8f, 0xcd, 0xc2, 0xc1,
-- 0xb1, 0x59, 0x78, 0x72, 0x6c, 0x16, 0xda, 0x55, 0x61, 0xb9, 0xe5, 0xff, 0x02, 0x00, 0x00, 0xff,
-- 0xff, 0xf8, 0x4c, 0x44, 0xf0, 0x43, 0x10, 0x00, 0x00,
-+ // 1245 bytes of a gzipped FileDescriptorProto
-+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcf, 0x6f, 0x1b, 0xc5,
-+ 0x17, 0xf7, 0xac, 0x7f, 0xc5, 0x93, 0x26, 0xdf, 0x2f, 0x93, 0xd2, 0xae, 0x42, 0xb5, 0x6b, 0x59,
-+ 0x82, 0x1a, 0x09, 0x76, 0x45, 0x5a, 0x5a, 0x7e, 0x09, 0xd1, 0x6d, 0x40, 0x89, 0x54, 0x21, 0xd8,
-+ 0x46, 0xdc, 0xc7, 0xf1, 0xc4, 0x5e, 0xe2, 0xdd, 0x75, 0x66, 0xc6, 0x11, 0xb9, 0x71, 0x45, 0x02,
-+ 0xa9, 0x7f, 0x05, 0x42, 0x02, 0xf1, 0x07, 0x20, 0x71, 0xcf, 0x31, 0xc7, 0x2a, 0x12, 0x86, 0x38,
-+ 0x17, 0x08, 0x97, 0xfc, 0x09, 0x68, 0x66, 0x76, 0xd7, 0xb3, 0x8e, 0x93, 0xc6, 0xe9, 0xa5, 0x48,
-+ 0x5c, 0xec, 0x99, 0xb7, 0xef, 0x33, 0x3b, 0xef, 0xf3, 0x3e, 0xef, 0xcd, 0x2c, 0xbc, 0xdd, 0xdf,
-+ 0xee, 0xb8, 0x3b, 0x03, 0x42, 0x03, 0x42, 0xe5, 0xff, 0x1e, 0xc5, 0x51, 0x87, 0x68, 0x43, 0xa7,
-+ 0x4f, 0x63, 0x1e, 0x23, 0x38, 0xb6, 0x2c, 0x5f, 0xef, 0xc4, 0x9d, 0x58, 0x9a, 0x5d, 0x31, 0x52,
-+ 0x1e, 0xcb, 0x76, 0x27, 0x8e, 0x3b, 0x3d, 0xe2, 0xca, 0x59, 0x6b, 0xb0, 0xe5, 0xf2, 0x20, 0x24,
-+ 0x8c, 0xe3, 0xb0, 0x9f, 0x38, 0xbc, 0x22, 0xde, 0xd5, 0x8b, 0x3b, 0x0a, 0x99, 0x0e, 0x92, 0x87,
-+ 0xf5, 0xe4, 0xe1, 0x4e, 0x2f, 0x8c, 0xdb, 0xa4, 0xe7, 0x32, 0x8e, 0x39, 0x53, 0xbf, 0x89, 0xc7,
-+ 0x92, 0xf0, 0xe8, 0x0f, 0x58, 0x57, 0xfe, 0x24, 0xc6, 0x87, 0xcf, 0xdc, 0x7f, 0x0b, 0x33, 0xe2,
-+ 0xb6, 0xc9, 0x56, 0x10, 0x05, 0x3c, 0x88, 0x23, 0xa6, 0x8f, 0x93, 0x45, 0xee, 0x5d, 0x6e, 0x91,
-+ 0x49, 0x4e, 0x1a, 0x07, 0x06, 0x9c, 0x7f, 0x14, 0x6f, 0x07, 0x3e, 0xd9, 0x19, 0x10, 0xc6, 0xd1,
-+ 0x75, 0x58, 0x96, 0x3e, 0x26, 0xa8, 0x83, 0x66, 0xcd, 0x57, 0x13, 0x61, 0xed, 0x05, 0x61, 0xc0,
-+ 0x4d, 0xa3, 0x0e, 0x9a, 0x0b, 0xbe, 0x9a, 0x20, 0x04, 0x4b, 0x8c, 0x93, 0xbe, 0x59, 0xac, 0x83,
-+ 0x66, 0xd1, 0x97, 0x63, 0xb4, 0x0c, 0xe7, 0x82, 0x88, 0x13, 0xba, 0x8b, 0x7b, 0x66, 0x4d, 0xda,
-+ 0xb3, 0x39, 0xfa, 0x10, 0x56, 0x19, 0xc7, 0x94, 0x6f, 0x30, 0xb3, 0x54, 0x07, 0xcd, 0xf9, 0x95,
-+ 0x65, 0x47, 0xf1, 0xed, 0xa4, 0x7c, 0x3b, 0x1b, 0x29, 0xdf, 0xde, 0xdc, 0xfe, 0xd0, 0x2e, 0x3c,
-+ 0xf9, 0xdd, 0x06, 0x7e, 0x0a, 0x42, 0xef, 0xc1, 0x32, 0x89, 0xda, 0x1b, 0xcc, 0x2c, 0xcf, 0x80,
-+ 0x56, 0x10, 0xf4, 0x16, 0xac, 0xb5, 0x03, 0x4a, 0x36, 0x05, 0x67, 0x66, 0xa5, 0x0e, 0x9a, 0x8b,
-+ 0x2b, 0x4b, 0x4e, 0x96, 0xbf, 0xd5, 0xf4, 0x91, 0x3f, 0xf6, 0x12, 0xe1, 0xf5, 0x31, 0xef, 0x9a,
-+ 0x55, 0xc9, 0x84, 0x1c, 0xa3, 0x06, 0xac, 0xb0, 0x2e, 0xa6, 0x6d, 0x66, 0xce, 0xd5, 0x8b, 0xcd,
-+ 0x9a, 0x07, 0x4f, 0x86, 0x76, 0x62, 0xf1, 0x93, 0xff, 0xc6, 0x5f, 0x00, 0x22, 0x41, 0xe9, 0x7a,
-+ 0xc4, 0x38, 0x8e, 0xf8, 0x55, 0x98, 0xfd, 0x00, 0x56, 0x84, 0xf2, 0x36, 0x98, 0xe4, 0xf6, 0xb2,
-+ 0xa1, 0x26, 0x98, 0x7c, 0xac, 0xa5, 0x99, 0x62, 0x2d, 0x4f, 0x8d, 0xb5, 0x72, 0x6e, 0xac, 0x3f,
-+ 0x96, 0xe0, 0x35, 0x25, 0x1f, 0xd6, 0x8f, 0x23, 0x46, 0x04, 0xe8, 0x31, 0xc7, 0x7c, 0xc0, 0x54,
-+ 0x98, 0x09, 0x48, 0x5a, 0xfc, 0xe4, 0x09, 0xfa, 0x08, 0x96, 0x56, 0x31, 0xc7, 0x32, 0xe4, 0xf9,
-+ 0x95, 0xeb, 0x8e, 0x26, 0x4a, 0xb1, 0x96, 0x78, 0xe6, 0xdd, 0x10, 0x51, 0x9d, 0x0c, 0xed, 0xc5,
-+ 0x36, 0xe6, 0xf8, 0x8d, 0x38, 0x0c, 0x38, 0x09, 0xfb, 0x7c, 0xcf, 0x97, 0x48, 0xf4, 0x36, 0xac,
-+ 0x7d, 0x4c, 0x69, 0x4c, 0x37, 0xf6, 0xfa, 0x44, 0x52, 0x54, 0xf3, 0x6e, 0x9e, 0x0c, 0xed, 0x25,
-+ 0x92, 0x1a, 0x35, 0xc4, 0xd8, 0x13, 0xbd, 0x0e, 0xcb, 0x72, 0x22, 0x49, 0xa9, 0x79, 0x4b, 0x27,
-+ 0x43, 0xfb, 0x7f, 0x12, 0xa2, 0xb9, 0x2b, 0x8f, 0x3c, 0x87, 0xe5, 0x4b, 0x71, 0x98, 0xa5, 0xb2,
-+ 0xa2, 0xa7, 0xd2, 0x84, 0xd5, 0x5d, 0x42, 0x99, 0x58, 0xa6, 0x2a, 0xed, 0xe9, 0x14, 0x3d, 0x80,
-+ 0x50, 0x10, 0x13, 0x30, 0x1e, 0x6c, 0x0a, 0x3d, 0x09, 0x32, 0x16, 0x1c, 0xd5, 0x2e, 0x7c, 0xc2,
-+ 0x06, 0x3d, 0xee, 0xa1, 0x84, 0x05, 0xcd, 0xd1, 0xd7, 0xc6, 0xe8, 0x27, 0x00, 0xab, 0x6b, 0x04,
-+ 0xb7, 0x09, 0x65, 0x66, 0xad, 0x5e, 0x6c, 0xce, 0xaf, 0xbc, 0xea, 0xe8, 0xbd, 0xe1, 0x33, 0x1a,
-+ 0x87, 0x84, 0x77, 0xc9, 0x80, 0xa5, 0x09, 0x52, 0xde, 0xde, 0xf6, 0xe1, 0xd0, 0x6e, 0x75, 0x02,
-+ 0xde, 0x1d, 0xb4, 0x9c, 0xcd, 0x38, 0x74, 0x3b, 0x14, 0x6f, 0xe1, 0x08, 0xbb, 0xbd, 0x78, 0x3b,
-+ 0x70, 0x67, 0xee, 0x47, 0xe7, 0xbe, 0xe7, 0x64, 0x68, 0x83, 0x37, 0xfd, 0x74, 0x8b, 0x8d, 0xdf,
-+ 0x00, 0x7c, 0x49, 0x64, 0xf8, 0xb1, 0x58, 0x9b, 0x69, 0x85, 0x11, 0x62, 0xbe, 0xd9, 0x35, 0x81,
-+ 0x90, 0x99, 0xaf, 0x26, 0x7a, 0xb3, 0x30, 0x9e, 0xab, 0x59, 0x14, 0x67, 0x6f, 0x16, 0x69, 0x35,
-+ 0x94, 0xa6, 0x56, 0x43, 0xf9, 0xdc, 0x6a, 0xf8, 0xb6, 0xa8, 0x2a, 0x3f, 0x8d, 0x6f, 0x86, 0x9a,
-+ 0xf8, 0x24, 0xab, 0x89, 0xa2, 0xdc, 0x6d, 0x26, 0x35, 0xb5, 0xd6, 0x7a, 0x9b, 0x44, 0x3c, 0xd8,
-+ 0x0a, 0x08, 0x7d, 0x46, 0x65, 0x68, 0x72, 0x2b, 0xe6, 0xe5, 0xa6, 0x6b, 0xa5, 0xf4, 0xc2, 0x6b,
-+ 0x65, 0xa2, 0x3a, 0xca, 0x57, 0xa8, 0x8e, 0xc6, 0xaf, 0x00, 0xbe, 0x2c, 0xd2, 0xf1, 0x08, 0xb7,
-+ 0x48, 0xef, 0x53, 0x1c, 0x8e, 0x25, 0xa7, 0x89, 0x0b, 0x3c, 0x97, 0xb8, 0x8c, 0xab, 0x8b, 0xab,
-+ 0xa8, 0x89, 0x2b, 0x3b, 0x1b, 0x4a, 0xda, 0xd9, 0xd0, 0x38, 0x35, 0xe0, 0x8d, 0xc9, 0xfd, 0xcf,
-+ 0x20, 0xa9, 0xd7, 0x34, 0x49, 0xd5, 0x3c, 0xf4, 0x9f, 0x64, 0x2e, 0x21, 0x99, 0xef, 0x01, 0x9c,
-+ 0x4b, 0xcf, 0x20, 0xe4, 0x40, 0xa8, 0x60, 0xf2, 0x98, 0x51, 0x44, 0x2f, 0x0a, 0x30, 0xcd, 0xac,
-+ 0xbe, 0xe6, 0x81, 0xbe, 0x84, 0x15, 0x35, 0x4b, 0xaa, 0xf8, 0xa6, 0x56, 0xc5, 0x9c, 0x12, 0x1c,
-+ 0x3e, 0x68, 0xe3, 0x3e, 0x27, 0xd4, 0x7b, 0x57, 0xec, 0xe2, 0x70, 0x68, 0xdf, 0xbe, 0x88, 0x22,
-+ 0x79, 0x43, 0x54, 0x38, 0x91, 0x5c, 0xf5, 0x4e, 0x3f, 0x79, 0x43, 0xe3, 0x3b, 0x00, 0xff, 0x2f,
-+ 0x36, 0x2a, 0xa8, 0xc9, 0x54, 0xb1, 0x0a, 0xe7, 0x68, 0x32, 0x4e, 0x74, 0xdd, 0x70, 0xf2, 0xb4,
-+ 0x4e, 0xa1, 0xd2, 0x2b, 0xed, 0x0f, 0x6d, 0xe0, 0x67, 0x48, 0x74, 0x27, 0x47, 0xa3, 0x31, 0x8d,
-+ 0x46, 0x01, 0x29, 0xe4, 0x88, 0xfb, 0xc5, 0x80, 0x68, 0x3d, 0x6a, 0x93, 0xaf, 0x84, 0xf8, 0xc6,
-+ 0x3a, 0x1d, 0x9c, 0xd9, 0xd1, 0xad, 0x31, 0x29, 0x67, 0xfd, 0xbd, 0xf7, 0x0f, 0x87, 0xf6, 0xfd,
-+ 0x8b, 0x58, 0xb9, 0x00, 0xac, 0x85, 0xa0, 0x0b, 0xd7, 0x78, 0xf1, 0xcf, 0xc5, 0x6f, 0x0c, 0xb8,
-+ 0xf0, 0x45, 0xdc, 0x1b, 0x84, 0x24, 0x6d, 0x50, 0x0f, 0x61, 0x69, 0x8b, 0xc6, 0xa1, 0xe4, 0xac,
-+ 0xe8, 0xb9, 0x53, 0xf4, 0xd2, 0xcf, 0x96, 0x76, 0x37, 0xe3, 0x30, 0x8c, 0x23, 0x57, 0x7e, 0x74,
-+ 0xc8, 0xce, 0xe3, 0x4b, 0x30, 0x5a, 0x87, 0x55, 0xde, 0xa5, 0xf1, 0xa0, 0xd3, 0x95, 0x59, 0xbc,
-+ 0xc2, 0x3a, 0x29, 0x5e, 0x5c, 0xeb, 0xe5, 0xb1, 0x2c, 0x08, 0x55, 0x8d, 0x2b, 0x9b, 0x8f, 0xef,
-+ 0x3d, 0xa2, 0x79, 0x95, 0x27, 0x3f, 0x0e, 0xca, 0xda, 0xc7, 0x41, 0x03, 0x5e, 0xe3, 0x98, 0x76,
-+ 0x08, 0x97, 0x1d, 0x2d, 0xb9, 0x57, 0xfa, 0x39, 0x5b, 0xe3, 0x67, 0x03, 0x2e, 0xa6, 0x5c, 0x24,
-+ 0xd9, 0x0c, 0xcf, 0x88, 0xc8, 0x1c, 0x8b, 0x28, 0xef, 0xeb, 0xdd, 0x3f, 0x1c, 0xda, 0x77, 0x2e,
-+ 0x25, 0xa0, 0x3c, 0xf0, 0xdf, 0x2b, 0x9e, 0xbf, 0x0d, 0xb8, 0xf0, 0xb9, 0x58, 0x25, 0xe3, 0xeb,
-+ 0x1d, 0x58, 0x61, 0xf2, 0xd6, 0x90, 0xb0, 0x65, 0x4d, 0xde, 0xb0, 0xf3, 0xf7, 0x93, 0xb5, 0x82,
-+ 0x9f, 0xf8, 0x8b, 0xef, 0x8e, 0x9e, 0x4a, 0x8d, 0x71, 0xa6, 0x7d, 0x38, 0xd3, 0x8f, 0x22, 0x81,
-+ 0x56, 0x18, 0x74, 0x0f, 0x96, 0x65, 0x97, 0x48, 0xae, 0x5c, 0xb9, 0xd7, 0x9e, 0x2d, 0xd7, 0xb5,
-+ 0x82, 0xaf, 0xdc, 0xd1, 0x0a, 0x2c, 0x09, 0x15, 0x26, 0x1f, 0x85, 0xb7, 0x26, 0xdf, 0xa9, 0xb7,
-+ 0xb8, 0xb5, 0x82, 0x2f, 0x7d, 0xd1, 0x5d, 0x71, 0x82, 0x8b, 0xde, 0x98, 0x36, 0x7a, 0x73, 0x12,
-+ 0xa6, 0x41, 0x52, 0x57, 0x74, 0x17, 0x56, 0x76, 0x65, 0xda, 0xe5, 0x1d, 0x5d, 0x1c, 0xdc, 0x1a,
-+ 0x28, 0x2f, 0x08, 0x11, 0x97, 0xf2, 0xf5, 0xe0, 0x58, 0x7f, 0xde, 0xdd, 0x83, 0x23, 0xab, 0xf0,
-+ 0xf4, 0xc8, 0x2a, 0x9c, 0x1e, 0x59, 0xe0, 0xeb, 0x91, 0x05, 0x7e, 0x18, 0x59, 0x60, 0x7f, 0x64,
-+ 0x81, 0x83, 0x91, 0x05, 0xfe, 0x18, 0x59, 0xe0, 0xcf, 0x91, 0x55, 0x38, 0x1d, 0x59, 0xe0, 0xc9,
-+ 0xb1, 0x55, 0x38, 0x38, 0xb6, 0x0a, 0x4f, 0x8f, 0xad, 0x42, 0xab, 0x22, 0x25, 0x77, 0xe7, 0x9f,
-+ 0x00, 0x00, 0x00, 0xff, 0xff, 0x60, 0x11, 0x37, 0x7d, 0xab, 0x10, 0x00, 0x00,
- }
-
- func (this *LokiRequest) Equal(that interface{}) bool {
-@@ -1503,20 +1519,29 @@ func (this *VolumeRequest) Equal(that interface{}) bool {
- } else if this == nil {
- return false
- }
-- if len(this.Match) != len(that1.Match) {
-+ if !this.From.Equal(that1.From) {
- return false
- }
-- for i := range this.Match {
-- if this.Match[i] != that1.Match[i] {
-- return false
-- }
-+ if !this.Through.Equal(that1.Through) {
-+ return false
- }
-- if !this.StartTs.Equal(that1.StartTs) {
-+ if this.Matchers != that1.Matchers {
- return false
- }
-- if !this.EndTs.Equal(that1.EndTs) {
-+ if this.Limit != that1.Limit {
-+ return false
-+ }
-+ if this.Step != that1.Step {
-+ return false
-+ }
-+ if len(this.TargetLabels) != len(that1.TargetLabels) {
- return false
- }
-+ for i := range this.TargetLabels {
-+ if this.TargetLabels[i] != that1.TargetLabels[i] {
-+ return false
-+ }
-+ }
- return true
- }
- func (this *VolumeResponse) Equal(that interface{}) bool {
-@@ -1880,11 +1905,14 @@ func (this *VolumeRequest) GoString() string {
- if this == nil {
- return ""nil""
- }
-- s := make([]string, 0, 7)
-+ s := make([]string, 0, 10)
- s = append(s, ""&queryrange.VolumeRequest{"")
-- s = append(s, ""Match: ""+fmt.Sprintf(""%#v"", this.Match)+"",\n"")
-- s = append(s, ""StartTs: ""+fmt.Sprintf(""%#v"", this.StartTs)+"",\n"")
-- s = append(s, ""EndTs: ""+fmt.Sprintf(""%#v"", this.EndTs)+"",\n"")
-+ s = append(s, ""From: ""+fmt.Sprintf(""%#v"", this.From)+"",\n"")
-+ s = append(s, ""Through: ""+fmt.Sprintf(""%#v"", this.Through)+"",\n"")
-+ s = append(s, ""Matchers: ""+fmt.Sprintf(""%#v"", this.Matchers)+"",\n"")
-+ s = append(s, ""Limit: ""+fmt.Sprintf(""%#v"", this.Limit)+"",\n"")
-+ s = append(s, ""Step: ""+fmt.Sprintf(""%#v"", this.Step)+"",\n"")
-+ s = append(s, ""TargetLabels: ""+fmt.Sprintf(""%#v"", this.TargetLabels)+"",\n"")
- s = append(s, ""}"")
- return strings.Join(s, """")
- }
-@@ -2622,31 +2650,42 @@ func (m *VolumeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- _ = i
- var l int
- _ = l
-- n15, err15 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):])
-- if err15 != nil {
-- return 0, err15
-- }
-- i -= n15
-- i = encodeVarintQueryrange(dAtA, i, uint64(n15))
-- i--
-- dAtA[i] = 0x1a
-- n16, err16 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):])
-- if err16 != nil {
-- return 0, err16
-- }
-- i -= n16
-- i = encodeVarintQueryrange(dAtA, i, uint64(n16))
-- i--
-- dAtA[i] = 0x12
-- if len(m.Match) > 0 {
-- for iNdEx := len(m.Match) - 1; iNdEx >= 0; iNdEx-- {
-- i -= len(m.Match[iNdEx])
-- copy(dAtA[i:], m.Match[iNdEx])
-- i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Match[iNdEx])))
-+ if len(m.TargetLabels) > 0 {
-+ for iNdEx := len(m.TargetLabels) - 1; iNdEx >= 0; iNdEx-- {
-+ i -= len(m.TargetLabels[iNdEx])
-+ copy(dAtA[i:], m.TargetLabels[iNdEx])
-+ i = encodeVarintQueryrange(dAtA, i, uint64(len(m.TargetLabels[iNdEx])))
- i--
-- dAtA[i] = 0xa
-+ dAtA[i] = 0x32
- }
- }
-+ if m.Step != 0 {
-+ i = encodeVarintQueryrange(dAtA, i, uint64(m.Step))
-+ i--
-+ dAtA[i] = 0x28
-+ }
-+ if m.Limit != 0 {
-+ i = encodeVarintQueryrange(dAtA, i, uint64(m.Limit))
-+ i--
-+ dAtA[i] = 0x20
-+ }
-+ if len(m.Matchers) > 0 {
-+ i -= len(m.Matchers)
-+ copy(dAtA[i:], m.Matchers)
-+ i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Matchers)))
-+ i--
-+ dAtA[i] = 0x1a
-+ }
-+ if m.Through != 0 {
-+ i = encodeVarintQueryrange(dAtA, i, uint64(m.Through))
-+ i--
-+ dAtA[i] = 0x10
-+ }
-+ if m.From != 0 {
-+ i = encodeVarintQueryrange(dAtA, i, uint64(m.From))
-+ i--
-+ dAtA[i] = 0x8
-+ }
- return len(dAtA) - i, nil
- }
-
-@@ -3141,16 +3180,28 @@ func (m *VolumeRequest) Size() (n int) {
- }
- var l int
- _ = l
-- if len(m.Match) > 0 {
-- for _, s := range m.Match {
-+ if m.From != 0 {
-+ n += 1 + sovQueryrange(uint64(m.From))
-+ }
-+ if m.Through != 0 {
-+ n += 1 + sovQueryrange(uint64(m.Through))
-+ }
-+ l = len(m.Matchers)
-+ if l > 0 {
-+ n += 1 + l + sovQueryrange(uint64(l))
-+ }
-+ if m.Limit != 0 {
-+ n += 1 + sovQueryrange(uint64(m.Limit))
-+ }
-+ if m.Step != 0 {
-+ n += 1 + sovQueryrange(uint64(m.Step))
-+ }
-+ if len(m.TargetLabels) > 0 {
-+ for _, s := range m.TargetLabels {
- l = len(s)
- n += 1 + l + sovQueryrange(uint64(l))
- }
- }
-- l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs)
-- n += 1 + l + sovQueryrange(uint64(l))
-- l = github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs)
-- n += 1 + l + sovQueryrange(uint64(l))
- return n
- }
-
-@@ -3413,9 +3464,12 @@ func (this *VolumeRequest) String() string {
- return ""nil""
- }
- s := strings.Join([]string{`&VolumeRequest{`,
-- `Match:` + fmt.Sprintf(""%v"", this.Match) + `,`,
-- `StartTs:` + strings.Replace(strings.Replace(fmt.Sprintf(""%v"", this.StartTs), ""Timestamp"", ""types.Timestamp"", 1), `&`, ``, 1) + `,`,
-- `EndTs:` + strings.Replace(strings.Replace(fmt.Sprintf(""%v"", this.EndTs), ""Timestamp"", ""types.Timestamp"", 1), `&`, ``, 1) + `,`,
-+ `From:` + fmt.Sprintf(""%v"", this.From) + `,`,
-+ `Through:` + fmt.Sprintf(""%v"", this.Through) + `,`,
-+ `Matchers:` + fmt.Sprintf(""%v"", this.Matchers) + `,`,
-+ `Limit:` + fmt.Sprintf(""%v"", this.Limit) + `,`,
-+ `Step:` + fmt.Sprintf(""%v"", this.Step) + `,`,
-+ `TargetLabels:` + fmt.Sprintf(""%v"", this.TargetLabels) + `,`,
- `}`,
- }, """")
- return s
-@@ -5526,8 +5580,46 @@ func (m *VolumeRequest) Unmarshal(dAtA []byte) error {
- }
- switch fieldNum {
- case 1:
-+ if wireType != 0 {
-+ return fmt.Errorf(""proto: wrong wireType = %d for field From"", wireType)
-+ }
-+ m.From = 0
-+ for shift := uint(0); ; shift += 7 {
-+ if shift >= 64 {
-+ return ErrIntOverflowQueryrange
-+ }
-+ if iNdEx >= l {
-+ return io.ErrUnexpectedEOF
-+ }
-+ b := dAtA[iNdEx]
-+ iNdEx++
-+ m.From |= github_com_prometheus_common_model.Time(b&0x7F) << shift
-+ if b < 0x80 {
-+ break
-+ }
-+ }
-+ case 2:
-+ if wireType != 0 {
-+ return fmt.Errorf(""proto: wrong wireType = %d for field Through"", wireType)
-+ }
-+ m.Through = 0
-+ for shift := uint(0); ; shift += 7 {
-+ if shift >= 64 {
-+ return ErrIntOverflowQueryrange
-+ }
-+ if iNdEx >= l {
-+ return io.ErrUnexpectedEOF
-+ }
-+ b := dAtA[iNdEx]
-+ iNdEx++
-+ m.Through |= github_com_prometheus_common_model.Time(b&0x7F) << shift
-+ if b < 0x80 {
-+ break
-+ }
-+ }
-+ case 3:
- if wireType != 2 {
-- return fmt.Errorf(""proto: wrong wireType = %d for field Match"", wireType)
-+ return fmt.Errorf(""proto: wrong wireType = %d for field Matchers"", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
-@@ -5555,13 +5647,13 @@ func (m *VolumeRequest) Unmarshal(dAtA []byte) error {
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
-- m.Match = append(m.Match, string(dAtA[iNdEx:postIndex]))
-+ m.Matchers = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
-- case 2:
-- if wireType != 2 {
-- return fmt.Errorf(""proto: wrong wireType = %d for field StartTs"", wireType)
-+ case 4:
-+ if wireType != 0 {
-+ return fmt.Errorf(""proto: wrong wireType = %d for field Limit"", wireType)
- }
-- var msglen int
-+ m.Limit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQueryrange
-@@ -5571,30 +5663,35 @@ func (m *VolumeRequest) Unmarshal(dAtA []byte) error {
- }
- b := dAtA[iNdEx]
- iNdEx++
-- msglen |= int(b&0x7F) << shift
-+ m.Limit |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
-- if msglen < 0 {
-- return ErrInvalidLengthQueryrange
-- }
-- postIndex := iNdEx + msglen
-- if postIndex < 0 {
-- return ErrInvalidLengthQueryrange
-- }
-- if postIndex > l {
-- return io.ErrUnexpectedEOF
-+ case 5:
-+ if wireType != 0 {
-+ return fmt.Errorf(""proto: wrong wireType = %d for field Step"", wireType)
- }
-- if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartTs, dAtA[iNdEx:postIndex]); err != nil {
-- return err
-+ m.Step = 0
-+ for shift := uint(0); ; shift += 7 {
-+ if shift >= 64 {
-+ return ErrIntOverflowQueryrange
-+ }
-+ if iNdEx >= l {
-+ return io.ErrUnexpectedEOF
-+ }
-+ b := dAtA[iNdEx]
-+ iNdEx++
-+ m.Step |= int64(b&0x7F) << shift
-+ if b < 0x80 {
-+ break
-+ }
- }
-- iNdEx = postIndex
-- case 3:
-+ case 6:
- if wireType != 2 {
-- return fmt.Errorf(""proto: wrong wireType = %d for field EndTs"", wireType)
-+ return fmt.Errorf(""proto: wrong wireType = %d for field TargetLabels"", wireType)
- }
-- var msglen int
-+ var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQueryrange
-@@ -5604,24 +5701,23 @@ func (m *VolumeRequest) Unmarshal(dAtA []byte) error {
- }
- b := dAtA[iNdEx]
- iNdEx++
-- msglen |= int(b&0x7F) << shift
-+ stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
-- if msglen < 0 {
-+ intStringLen := int(stringLen)
-+ if intStringLen < 0 {
- return ErrInvalidLengthQueryrange
- }
-- postIndex := iNdEx + msglen
-+ postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthQueryrange
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
-- if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.EndTs, dAtA[iNdEx:postIndex]); err != nil {
-- return err
-- }
-+ m.TargetLabels = append(m.TargetLabels, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
-diff --git a/pkg/querier/queryrange/queryrange.proto b/pkg/querier/queryrange/queryrange.proto
-index c3174d7fa3085..fdfccd1d6fc0a 100644
---- a/pkg/querier/queryrange/queryrange.proto
-+++ b/pkg/querier/queryrange/queryrange.proto
-@@ -146,15 +146,18 @@ message IndexStatsResponse {
- }
-
- message VolumeRequest {
-- repeated string match = 1;
-- google.protobuf.Timestamp startTs = 2 [
-- (gogoproto.stdtime) = true,
-+ int64 from = 1 [
-+ (gogoproto.customtype) = ""github.com/prometheus/common/model.Time"",
- (gogoproto.nullable) = false
- ];
-- google.protobuf.Timestamp endTs = 3 [
-- (gogoproto.stdtime) = true,
-+ int64 through = 2 [
-+ (gogoproto.customtype) = ""github.com/prometheus/common/model.Time"",
- (gogoproto.nullable) = false
- ];
-+ string matchers = 3;
-+ int32 limit = 4;
-+ int64 step = 5;
-+ repeated string targetLabels = 6;
- }
-
- message VolumeResponse {
-diff --git a/pkg/querier/queryrange/series_volume.go b/pkg/querier/queryrange/series_volume.go
-index f5bd563250b39..b5904685adf9c 100644
---- a/pkg/querier/queryrange/series_volume.go
-+++ b/pkg/querier/queryrange/series_volume.go
-@@ -64,11 +64,12 @@ func NewSeriesVolumeMiddleware() queryrangebase.Middleware {
- }
-
- reqs[bucket] = &logproto.VolumeRequest{
-- From: model.TimeFromUnix(start.Unix()),
-- Through: model.TimeFromUnix(end.Unix()),
-- Matchers: volReq.Matchers,
-- Limit: volReq.Limit,
-- Step: volReq.Step,
-+ From: model.TimeFromUnix(start.Unix()),
-+ Through: model.TimeFromUnix(end.Unix()),
-+ Matchers: volReq.Matchers,
-+ Limit: volReq.Limit,
-+ Step: volReq.Step,
-+ TargetLabels: volReq.TargetLabels,
- }
- })
-
-diff --git a/pkg/querier/queryrange/split_by_interval.go b/pkg/querier/queryrange/split_by_interval.go
-index d1a1bd4a1da87..904b72f1c1049 100644
---- a/pkg/querier/queryrange/split_by_interval.go
-+++ b/pkg/querier/queryrange/split_by_interval.go
-@@ -295,10 +295,11 @@ func splitByTime(req queryrangebase.Request, interval time.Duration) ([]queryran
- endTS := model.Time(r.GetEnd()).Time()
- util.ForInterval(interval, startTS, endTS, true, func(start, end time.Time) {
- reqs = append(reqs, &logproto.VolumeRequest{
-- From: model.TimeFromUnix(start.Unix()),
-- Through: model.TimeFromUnix(end.Unix()),
-- Matchers: r.GetMatchers(),
-- Limit: r.Limit,
-+ From: model.TimeFromUnix(start.Unix()),
-+ Through: model.TimeFromUnix(end.Unix()),
-+ Matchers: r.GetMatchers(),
-+ Limit: r.Limit,
-+ TargetLabels: r.TargetLabels,
- })
- })
- default:
-diff --git a/pkg/storage/async_store.go b/pkg/storage/async_store.go
-index b62663285e85e..53d639e24b020 100644
---- a/pkg/storage/async_store.go
-+++ b/pkg/storage/async_store.go
-@@ -28,7 +28,7 @@ import (
- type IngesterQuerier interface {
- GetChunkIDs(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]string, error)
- Stats(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) (*stats.Stats, error)
-- SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error)
-+ SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error)
- }
-
- type AsyncStoreCfg struct {
-@@ -165,7 +165,7 @@ func (a *AsyncStore) Stats(ctx context.Context, userID string, from, through mod
- return &merged, nil
- }
-
--func (a *AsyncStore) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+func (a *AsyncStore) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
- sp, ctx := opentracing.StartSpanFromContext(ctx, ""AsyncStore.SeriesVolume"")
- defer sp.Finish()
-
-@@ -176,7 +176,7 @@ func (a *AsyncStore) SeriesVolume(ctx context.Context, userID string, from, thro
-
- if a.shouldQueryIngesters(through, model.Now()) {
- jobs = append(jobs, func() (*logproto.VolumeResponse, error) {
-- vols, err := a.ingesterQuerier.SeriesVolume(ctx, userID, from, through, limit, matchers...)
-+ vols, err := a.ingesterQuerier.SeriesVolume(ctx, userID, from, through, limit, targetLabels, matchers...)
- level.Debug(logger).Log(
- ""msg"", ""queried label volumes"",
- ""matchers"", matchersStr,
-@@ -186,7 +186,7 @@ func (a *AsyncStore) SeriesVolume(ctx context.Context, userID string, from, thro
- })
- }
- jobs = append(jobs, func() (*logproto.VolumeResponse, error) {
-- vols, err := a.Store.SeriesVolume(ctx, userID, from, through, limit, matchers...)
-+ vols, err := a.Store.SeriesVolume(ctx, userID, from, through, limit, targetLabels, matchers...)
- level.Debug(logger).Log(
- ""msg"", ""queried label volume"",
- ""matchers"", matchersStr,
-diff --git a/pkg/storage/async_store_test.go b/pkg/storage/async_store_test.go
-index 2abb2c1e45abc..b0a48a0bb9dc8 100644
---- a/pkg/storage/async_store_test.go
-+++ b/pkg/storage/async_store_test.go
-@@ -39,8 +39,8 @@ func (s *storeMock) GetChunkFetcher(tm model.Time) *fetcher.Fetcher {
- return args.Get(0).(*fetcher.Fetcher)
- }
-
--func (s *storeMock) SeriesVolume(_ context.Context, userID string, from, through model.Time, _ int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-- args := s.Called(userID, from, through, matchers)
-+func (s *storeMock) SeriesVolume(_ context.Context, userID string, from, through model.Time, _ int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+ args := s.Called(userID, from, through, targetLabels, matchers)
-
- if args.Get(0) == nil {
- return nil, args.Error(1)
-@@ -63,8 +63,8 @@ func (i *ingesterQuerierMock) GetChunkIDs(ctx context.Context, from, through mod
- return args.Get(0).([]string), args.Error(1)
- }
-
--func (i *ingesterQuerierMock) SeriesVolume(_ context.Context, userID string, from, through model.Time, _ int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-- args := i.Called(userID, from, through, matchers)
-+func (i *ingesterQuerierMock) SeriesVolume(_ context.Context, userID string, from, through model.Time, _ int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+ args := i.Called(userID, from, through, targetLabels, matchers)
-
- if args.Get(0) == nil {
- return nil, args.Error(1)
-@@ -334,7 +334,7 @@ func TestSeriesVolume(t *testing.T) {
- }
- asyncStore := NewAsyncStore(asyncStoreCfg, store, config.SchemaConfig{})
-
-- vol, err := asyncStore.SeriesVolume(context.Background(), ""test"", model.Now().Add(-2*time.Hour), model.Now(), 10, nil...)
-+ vol, err := asyncStore.SeriesVolume(context.Background(), ""test"", model.Now().Add(-2*time.Hour), model.Now(), 10, nil, nil...)
- require.NoError(t, err)
-
- require.Equal(t, &logproto.VolumeResponse{
-diff --git a/pkg/storage/stores/composite_store.go b/pkg/storage/stores/composite_store.go
-index 98d7ba527b675..a0892cd2a416f 100644
---- a/pkg/storage/stores/composite_store.go
-+++ b/pkg/storage/stores/composite_store.go
-@@ -195,10 +195,10 @@ func (c compositeStore) Stats(ctx context.Context, userID string, from, through
- return &res, err
- }
-
--func (c compositeStore) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+func (c compositeStore) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
- volumes := make([]*logproto.VolumeResponse, 0, len(c.stores))
- err := c.forStores(ctx, from, through, func(innerCtx context.Context, from, through model.Time, store Store) error {
-- volume, err := store.SeriesVolume(innerCtx, userID, from, through, limit, matchers...)
-+ volume, err := store.SeriesVolume(innerCtx, userID, from, through, limit, targetLabels, matchers...)
- volumes = append(volumes, volume)
- return err
- })
-diff --git a/pkg/storage/stores/composite_store_entry.go b/pkg/storage/stores/composite_store_entry.go
-index a3f56bf83c497..9c88f9c70caab 100644
---- a/pkg/storage/stores/composite_store_entry.go
-+++ b/pkg/storage/stores/composite_store_entry.go
-@@ -132,7 +132,7 @@ func (c *storeEntry) Stats(ctx context.Context, userID string, from, through mod
- return c.indexReader.Stats(ctx, userID, from, through, matchers...)
- }
-
--func (c *storeEntry) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+func (c *storeEntry) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
- sp, ctx := opentracing.StartSpanFromContext(ctx, ""SeriesStore.Volume"")
- defer sp.Finish()
-
-@@ -152,7 +152,7 @@ func (c *storeEntry) SeriesVolume(ctx context.Context, userID string, from, thro
- ""limit"", limit,
- )
-
-- return c.indexReader.SeriesVolume(ctx, userID, from, through, limit, matchers...)
-+ return c.indexReader.SeriesVolume(ctx, userID, from, through, limit, targetLabels, matchers...)
- }
-
- func (c *storeEntry) validateQueryTimeRange(ctx context.Context, userID string, from *model.Time, through *model.Time) (bool, error) {
-diff --git a/pkg/storage/stores/composite_store_test.go b/pkg/storage/stores/composite_store_test.go
-index 0ae370e047d9e..0f2d61dfec07d 100644
---- a/pkg/storage/stores/composite_store_test.go
-+++ b/pkg/storage/stores/composite_store_test.go
-@@ -56,7 +56,7 @@ func (m mockStore) Stats(_ context.Context, _ string, _, _ model.Time, _ ...*lab
- return nil, nil
- }
-
--func (m mockStore) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+func (m mockStore) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ []string, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) {
- return nil, nil
- }
-
-@@ -305,7 +305,7 @@ type mockStoreSeriesVolume struct {
- err error
- }
-
--func (m mockStoreSeriesVolume) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+func (m mockStoreSeriesVolume) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ []string, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) {
- return m.value, m.err
- }
-
-diff --git a/pkg/storage/stores/index/index.go b/pkg/storage/stores/index/index.go
-index 59aa802b7e9dd..d466d5120ff11 100644
---- a/pkg/storage/stores/index/index.go
-+++ b/pkg/storage/stores/index/index.go
-@@ -23,7 +23,7 @@ type BaseReader interface {
- LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string, matchers ...*labels.Matcher) ([]string, error)
- LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error)
- Stats(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) (*stats.Stats, error)
-- SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error)
-+ SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error)
- }
-
- type Reader interface {
-@@ -119,11 +119,11 @@ func (m monitoredReaderWriter) Stats(ctx context.Context, userID string, from, t
- return sts, nil
- }
-
--func (m monitoredReaderWriter) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+func (m monitoredReaderWriter) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
- var vol *logproto.VolumeResponse
- if err := instrument.CollectedRequest(ctx, ""series_volume"", instrument.NewHistogramCollector(m.metrics.indexQueryLatency), instrument.ErrorCode, func(ctx context.Context) error {
- var err error
-- vol, err = m.rw.SeriesVolume(ctx, userID, from, through, limit, matchers...)
-+ vol, err = m.rw.SeriesVolume(ctx, userID, from, through, limit, targetLabels, matchers...)
- return err
- }); err != nil {
- return nil, err
-diff --git a/pkg/storage/stores/series/series_index_gateway_store.go b/pkg/storage/stores/series/series_index_gateway_store.go
-index be0a47ad70666..cdd08acf7688b 100644
---- a/pkg/storage/stores/series/series_index_gateway_store.go
-+++ b/pkg/storage/stores/series/series_index_gateway_store.go
-@@ -130,12 +130,13 @@ func (c *IndexGatewayClientStore) Stats(ctx context.Context, userID string, from
- return resp, nil
- }
-
--func (c *IndexGatewayClientStore) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+func (c *IndexGatewayClientStore) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
- resp, err := c.client.GetSeriesVolume(ctx, &logproto.VolumeRequest{
-- From: from,
-- Through: through,
-- Matchers: (&syntax.MatchersExpr{Mts: matchers}).String(),
-- Limit: limit,
-+ From: from,
-+ Through: through,
-+ Matchers: (&syntax.MatchersExpr{Mts: matchers}).String(),
-+ Limit: limit,
-+ TargetLabels: targetLabels,
- })
- if err != nil {
- if isUnimplementedCallError(err) && c.fallbackStore != nil {
-@@ -143,7 +144,7 @@ func (c *IndexGatewayClientStore) SeriesVolume(ctx context.Context, userID strin
- // Note: this is likely a noop anyway since only
- // tsdb+ enables this and the prior index returns an
- // empty response.
-- return c.fallbackStore.SeriesVolume(ctx, userID, from, through, limit, matchers...)
-+ return c.fallbackStore.SeriesVolume(ctx, userID, from, through, limit, targetLabels, matchers...)
- }
- return nil, err
- }
-diff --git a/pkg/storage/stores/series/series_index_store.go b/pkg/storage/stores/series/series_index_store.go
-index a6178d235494f..5e924b5752dc7 100644
---- a/pkg/storage/stores/series/series_index_store.go
-+++ b/pkg/storage/stores/series/series_index_store.go
-@@ -717,6 +717,6 @@ func (c *indexReaderWriter) Stats(_ context.Context, _ string, _, _ model.Time,
- }
-
- // old index stores do not implement label volume -- skip
--func (c *indexReaderWriter) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+func (c *indexReaderWriter) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ []string, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) {
- return nil, nil
- }
-diff --git a/pkg/storage/stores/shipper/indexgateway/gateway.go b/pkg/storage/stores/shipper/indexgateway/gateway.go
-index 5dd202c840c37..d1b4dfd361f2f 100644
---- a/pkg/storage/stores/shipper/indexgateway/gateway.go
-+++ b/pkg/storage/stores/shipper/indexgateway/gateway.go
-@@ -303,7 +303,7 @@ func (g *Gateway) GetSeriesVolume(ctx context.Context, req *logproto.VolumeReque
- return nil, err
- }
-
-- return g.indexQuerier.SeriesVolume(ctx, instanceID, req.From, req.Through, req.GetLimit(), matchers...)
-+ return g.indexQuerier.SeriesVolume(ctx, instanceID, req.From, req.Through, req.GetLimit(), req.TargetLabels, matchers...)
- }
-
- type failingIndexClient struct{}
-diff --git a/pkg/storage/stores/shipper/indexgateway/gateway_test.go b/pkg/storage/stores/shipper/indexgateway/gateway_test.go
-index e014269d81ab1..b1b25cce388ae 100644
---- a/pkg/storage/stores/shipper/indexgateway/gateway_test.go
-+++ b/pkg/storage/stores/shipper/indexgateway/gateway_test.go
-@@ -272,7 +272,7 @@ func newIngesterQuerierMock() *indexQuerierMock {
- return &indexQuerierMock{}
- }
-
--func (i *indexQuerierMock) SeriesVolume(_ context.Context, userID string, from, through model.Time, _ int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+func (i *indexQuerierMock) SeriesVolume(_ context.Context, userID string, from, through model.Time, _ int32, _ []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
- args := i.Called(userID, from, through, matchers)
-
- if args.Get(0) == nil {
-diff --git a/pkg/storage/stores/shipper/indexgateway/shufflesharding.go b/pkg/storage/stores/shipper/indexgateway/shufflesharding.go
-index 03dfb61b200fb..b563869d3719c 100644
---- a/pkg/storage/stores/shipper/indexgateway/shufflesharding.go
-+++ b/pkg/storage/stores/shipper/indexgateway/shufflesharding.go
-@@ -101,7 +101,7 @@ func (s *NoopStrategy) FilterTenants(tenantIDs []string) ([]string, error) {
- return tenantIDs, nil
- }
-
--// GetShardingStrategy returns the correct ShardingStrategy implementaion based
-+// GetShardingStrategy returns the correct ShardingStrategy implementation based
- // on provided configuration.
- func GetShardingStrategy(cfg Config, indexGatewayRingManager *RingManager, o Limits) ShardingStrategy {
- if cfg.Mode != RingMode || indexGatewayRingManager.Mode == ClientMode {
-diff --git a/pkg/storage/stores/tsdb/head_manager.go b/pkg/storage/stores/tsdb/head_manager.go
-index 281e569af5f37..261612fbc5444 100644
---- a/pkg/storage/stores/tsdb/head_manager.go
-+++ b/pkg/storage/stores/tsdb/head_manager.go
-@@ -783,12 +783,12 @@ func (t *tenantHeads) Stats(ctx context.Context, userID string, from, through mo
- return idx.Stats(ctx, userID, from, through, acc, shard, shouldIncludeChunk, matchers...)
- }
-
--func (t *tenantHeads) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error {
-+func (t *tenantHeads) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, targetLabels []string, matchers ...*labels.Matcher) error {
- idx, ok := t.tenantIndex(userID, from, through)
- if !ok {
- return nil
- }
-- return idx.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, matchers...)
-+ return idx.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, targetLabels, matchers...)
- }
-
- // helper only used in building TSDBs
-diff --git a/pkg/storage/stores/tsdb/index.go b/pkg/storage/stores/tsdb/index.go
-index e72c70c9c02de..5ccccb7ecdaae 100644
---- a/pkg/storage/stores/tsdb/index.go
-+++ b/pkg/storage/stores/tsdb/index.go
-@@ -53,7 +53,7 @@ type Index interface {
- LabelNames(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]string, error)
- LabelValues(ctx context.Context, userID string, from, through model.Time, name string, matchers ...*labels.Matcher) ([]string, error)
- Stats(ctx context.Context, userID string, from, through model.Time, acc IndexStatsAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error
-- SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error
-+ SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, targetLabels []string, matchers ...*labels.Matcher) error
- }
-
- type NoopIndex struct{}
-@@ -81,6 +81,6 @@ func (NoopIndex) Stats(_ context.Context, _ string, _, _ model.Time, _ IndexStat
-
- func (NoopIndex) SetChunkFilterer(_ chunk.RequestChunkFilterer) {}
-
--func (NoopIndex) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ SeriesVolumeAccumulator, _ *index.ShardAnnotation, _ shouldIncludeChunk, _ ...*labels.Matcher) error {
-+func (NoopIndex) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ SeriesVolumeAccumulator, _ *index.ShardAnnotation, _ shouldIncludeChunk, _ []string, _ ...*labels.Matcher) error {
- return nil
- }
-diff --git a/pkg/storage/stores/tsdb/index_client.go b/pkg/storage/stores/tsdb/index_client.go
-index 1484c38a846a2..b8a3d80425a82 100644
---- a/pkg/storage/stores/tsdb/index_client.go
-+++ b/pkg/storage/stores/tsdb/index_client.go
-@@ -246,7 +246,7 @@ func (c *IndexClient) Stats(ctx context.Context, userID string, from, through mo
- return &res, nil
- }
-
--func (c *IndexClient) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+func (c *IndexClient) SeriesVolume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
- sp, ctx := opentracing.StartSpanFromContext(ctx, ""IndexClient.SeriesVolume"")
- defer sp.Finish()
-
-@@ -266,7 +266,7 @@ func (c *IndexClient) SeriesVolume(ctx context.Context, userID string, from, thr
-
- acc := seriesvolume.NewAccumulator(limit, c.limits.VolumeMaxSeries(userID))
- for _, interval := range intervals {
-- if err := c.idx.SeriesVolume(ctx, userID, interval.Start, interval.End, acc, shard, nil, matchers...); err != nil {
-+ if err := c.idx.SeriesVolume(ctx, userID, interval.Start, interval.End, acc, shard, nil, targetLabels, matchers...); err != nil {
- return nil, err
- }
- }
-diff --git a/pkg/storage/stores/tsdb/index_client_test.go b/pkg/storage/stores/tsdb/index_client_test.go
-index cf3cc2ee15844..d8c5a6d422fa9 100644
---- a/pkg/storage/stores/tsdb/index_client_test.go
-+++ b/pkg/storage/stores/tsdb/index_client_test.go
-@@ -279,7 +279,7 @@ func TestIndexClient_SeriesVolume(t *testing.T) {
- through := indexStartToday + 1000
-
- t.Run(""it returns series volumes from the whole index"", func(t *testing.T) {
-- vol, err := indexClient.SeriesVolume(context.Background(), """", from, through, 10, nil...)
-+ vol, err := indexClient.SeriesVolume(context.Background(), """", from, through, 10, nil, nil...)
- require.NoError(t, err)
-
- require.Equal(t, &logproto.VolumeResponse{
-@@ -294,7 +294,7 @@ func TestIndexClient_SeriesVolume(t *testing.T) {
- })
-
- t.Run(""it returns largest series from the index"", func(t *testing.T) {
-- vol, err := indexClient.SeriesVolume(context.Background(), """", from, through, 1, nil...)
-+ vol, err := indexClient.SeriesVolume(context.Background(), """", from, through, 1, nil, nil...)
- require.NoError(t, err)
-
- require.Equal(t, &logproto.VolumeResponse{
-@@ -307,7 +307,7 @@ func TestIndexClient_SeriesVolume(t *testing.T) {
-
- t.Run(""it returns an error when the number of selected series exceeds the limit"", func(t *testing.T) {
- limits.volumeMaxSeries = 0
-- _, err := indexClient.SeriesVolume(context.Background(), """", from, through, 1, nil...)
-+ _, err := indexClient.SeriesVolume(context.Background(), """", from, through, 1, nil, nil...)
- require.EqualError(t, err, fmt.Sprintf(seriesvolume.ErrVolumeMaxSeriesHit, 0))
- })
- }
-diff --git a/pkg/storage/stores/tsdb/index_shipper_querier.go b/pkg/storage/stores/tsdb/index_shipper_querier.go
-index 34591b101fdfa..115544e5d8354 100644
---- a/pkg/storage/stores/tsdb/index_shipper_querier.go
-+++ b/pkg/storage/stores/tsdb/index_shipper_querier.go
-@@ -125,13 +125,13 @@ func (i *indexShipperQuerier) Stats(ctx context.Context, userID string, from, th
- return idx.Stats(ctx, userID, from, through, acc, shard, shouldIncludeChunk, matchers...)
- }
-
--func (i *indexShipperQuerier) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error {
-+func (i *indexShipperQuerier) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, targetLabels []string, matchers ...*labels.Matcher) error {
- idx, err := i.indices(ctx, from, through, userID)
- if err != nil {
- return err
- }
-
-- return idx.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, matchers...)
-+ return idx.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, targetLabels, matchers...)
- }
-
- type resultAccumulator struct {
-diff --git a/pkg/storage/stores/tsdb/lazy_index.go b/pkg/storage/stores/tsdb/lazy_index.go
-index 17b8cf051da1b..5a862a407a26f 100644
---- a/pkg/storage/stores/tsdb/lazy_index.go
-+++ b/pkg/storage/stores/tsdb/lazy_index.go
-@@ -73,10 +73,10 @@ func (f LazyIndex) Stats(ctx context.Context, userID string, from, through model
- return i.Stats(ctx, userID, from, through, acc, shard, shouldIncludeChunk, matchers...)
- }
-
--func (f LazyIndex) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error {
-+func (f LazyIndex) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, targetLabels []string, matchers ...*labels.Matcher) error {
- i, err := f()
- if err != nil {
- return err
- }
-- return i.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, matchers...)
-+ return i.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, targetLabels, matchers...)
- }
-diff --git a/pkg/storage/stores/tsdb/multi_file_index.go b/pkg/storage/stores/tsdb/multi_file_index.go
-index fee2a0783c165..7630a3648af74 100644
---- a/pkg/storage/stores/tsdb/multi_file_index.go
-+++ b/pkg/storage/stores/tsdb/multi_file_index.go
-@@ -337,8 +337,8 @@ func (i *MultiIndex) Stats(ctx context.Context, userID string, from, through mod
- })
- }
-
--func (i *MultiIndex) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error {
-+func (i *MultiIndex) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, targetLabels []string, matchers ...*labels.Matcher) error {
- return i.forMatchingIndices(ctx, from, through, func(ctx context.Context, idx Index) error {
-- return idx.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, matchers...)
-+ return idx.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, targetLabels, matchers...)
- })
- }
-diff --git a/pkg/storage/stores/tsdb/multitenant.go b/pkg/storage/stores/tsdb/multitenant.go
-index 4085000ba4310..7689f778253e2 100644
---- a/pkg/storage/stores/tsdb/multitenant.go
-+++ b/pkg/storage/stores/tsdb/multitenant.go
-@@ -93,6 +93,6 @@ func (m *MultiTenantIndex) Stats(ctx context.Context, userID string, from, throu
- return m.idx.Stats(ctx, userID, from, through, acc, shard, shouldIncludeChunk, withTenantLabelMatcher(userID, matchers)...)
- }
-
--func (m *MultiTenantIndex) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, matchers ...*labels.Matcher) error {
-- return m.idx.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, withTenantLabelMatcher(userID, matchers)...)
-+func (m *MultiTenantIndex) SeriesVolume(ctx context.Context, userID string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, shouldIncludeChunk shouldIncludeChunk, targetLabels []string, matchers ...*labels.Matcher) error {
-+ return m.idx.SeriesVolume(ctx, userID, from, through, acc, shard, shouldIncludeChunk, targetLabels, withTenantLabelMatcher(userID, matchers)...)
- }
-diff --git a/pkg/storage/stores/tsdb/single_file_index.go b/pkg/storage/stores/tsdb/single_file_index.go
-index f38513c79c72b..62c1a77b6f504 100644
---- a/pkg/storage/stores/tsdb/single_file_index.go
-+++ b/pkg/storage/stores/tsdb/single_file_index.go
-@@ -17,6 +17,7 @@ import (
- ""github.com/grafana/loki/pkg/storage/chunk""
- index_shipper ""github.com/grafana/loki/pkg/storage/stores/indexshipper/index""
- ""github.com/grafana/loki/pkg/storage/stores/tsdb/index""
-+ ""github.com/grafana/loki/pkg/util""
- util_log ""github.com/grafana/loki/pkg/util/log""
- )
-
-@@ -324,24 +325,18 @@ func (i *TSDBIndex) Stats(ctx context.Context, _ string, from, through model.Tim
- // {foo=""a"", fizz=""b""}
- // {foo=""b"", fizz=""a""}
- // {foo=""b"", fizz=""b""}
--func (i *TSDBIndex) SeriesVolume(ctx context.Context, _ string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, _ shouldIncludeChunk, matchers ...*labels.Matcher) error {
-+//
-+// SeriesVolume optionally accepts a slice of target labels. If provided, volumes are aggregated
-+// into those labels only. For example, given the matcher {fizz=~"".+""} and target labels of []string{""foo""},
-+// volumes would be aggregated as follows:
-+//
-+// {foo=""a""} which would be the sum of {foo=""a"", fizz=""a""} and {foo=""a"", fizz=""b""}
-+// {foo=""b""} which would be the sum of {foo=""b"", fizz=""a""} and {foo=""b"", fizz=""b""}
-+func (i *TSDBIndex) SeriesVolume(ctx context.Context, _ string, from, through model.Time, acc SeriesVolumeAccumulator, shard *index.ShardAnnotation, _ shouldIncludeChunk, targetLabels []string, matchers ...*labels.Matcher) error {
- sp, ctx := opentracing.StartSpanFromContext(ctx, ""Index.SeriesVolume"")
- defer sp.Finish()
-
-- var matchAll bool
-- labelsToMatch := make(map[string]struct{})
-- for _, m := range matchers {
-- if m.Name == """" {
-- matchAll = true
-- continue
-- }
--
-- if m.Name == TenantLabel {
-- continue
-- }
--
-- labelsToMatch[m.Name] = struct{}{}
-- }
-+ labelsToMatch, matchers, includeAll := util.PrepareLabelsAndMatchers(targetLabels, matchers, TenantLabel)
-
- seriesNames := make(map[uint64]string)
- seriesLabels := labels.Labels(make([]labels.Label, 0, len(labelsToMatch)))
-@@ -371,7 +366,7 @@ func (i *TSDBIndex) SeriesVolume(ctx context.Context, _ string, from, through mo
- if stats.Entries > 0 {
- seriesLabels = seriesLabels[:0]
- for _, l := range ls {
-- if _, ok := labelsToMatch[l.Name]; l.Name != TenantLabel && matchAll || ok {
-+ if _, ok := labelsToMatch[l.Name]; l.Name != TenantLabel && includeAll || ok {
- seriesLabels = append(seriesLabels, l)
- }
- }
-diff --git a/pkg/storage/stores/tsdb/single_file_index_test.go b/pkg/storage/stores/tsdb/single_file_index_test.go
-index 08c5c4ac9f3b4..8d258feee0e98 100644
---- a/pkg/storage/stores/tsdb/single_file_index_test.go
-+++ b/pkg/storage/stores/tsdb/single_file_index_test.go
-@@ -427,7 +427,7 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) {
- t.Run(""it matches all the series when the match all matcher is passed"", func(t *testing.T) {
- matcher := labels.MustNewMatcher(labels.MatchEqual, """", """")
- acc := seriesvolume.NewAccumulator(10, 10)
-- err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, matcher)
-+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, nil, matcher)
- require.NoError(t, err)
- require.Equal(t, &logproto.VolumeResponse{
- Volumes: []logproto.Volume{
-@@ -444,7 +444,7 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) {
- labels.MustNewMatcher(labels.MatchRegexp, ""foo"", "".+""),
- }
- acc := seriesvolume.NewAccumulator(10, 10)
-- err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, withTenantLabelMatcher(""fake"", matcher)...)
-+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, nil, withTenantLabelMatcher(""fake"", matcher)...)
- require.NoError(t, err)
- require.Equal(t, &logproto.VolumeResponse{
- Volumes: []logproto.Volume{
-@@ -458,7 +458,7 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) {
- t.Run(""it matches none of the series"", func(t *testing.T) {
- matcher := labels.MustNewMatcher(labels.MatchEqual, ""foo"", ""baz"")
- acc := seriesvolume.NewAccumulator(10, 10)
-- err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, matcher)
-+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, nil, matcher)
- require.NoError(t, err)
- require.Equal(t, &logproto.VolumeResponse{
- Volumes: []logproto.Volume{},
-@@ -469,7 +469,7 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) {
- t.Run(""it only returns results for the labels in the matcher"", func(t *testing.T) {
- matcher := labels.MustNewMatcher(labels.MatchEqual, ""foo"", ""bar"")
- acc := seriesvolume.NewAccumulator(10, 10)
-- err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, matcher)
-+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, nil, matcher)
- require.NoError(t, err)
- require.Equal(t, &logproto.VolumeResponse{
- Volumes: []logproto.Volume{
-@@ -485,7 +485,7 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) {
- labels.MustNewMatcher(labels.MatchRegexp, ""fizz"", "".+""),
- }
- acc := seriesvolume.NewAccumulator(10, 10)
-- err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, matchers...)
-+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, nil, matchers...)
- require.NoError(t, err)
- require.Equal(t, &logproto.VolumeResponse{
- Volumes: []logproto.Volume{
-@@ -502,7 +502,7 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) {
- labels.MustNewMatcher(labels.MatchRegexp, ""fizz"", "".+""),
- }
- acc := seriesvolume.NewAccumulator(10, 10)
-- err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, matchers...)
-+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, nil, matchers...)
- require.NoError(t, err)
- require.Equal(t, &logproto.VolumeResponse{
- Volumes: []logproto.Volume{
-@@ -519,7 +519,7 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) {
-
- matcher := labels.MustNewMatcher(labels.MatchEqual, """", """")
- acc := seriesvolume.NewAccumulator(10, 10)
-- err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, matcher)
-+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, nil, matcher)
-
- require.NoError(t, err)
- require.Equal(t, &logproto.VolumeResponse{
-@@ -531,7 +531,7 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) {
- t.Run(""only gets factor of stream size within time bounds"", func(t *testing.T) {
- matcher := labels.MustNewMatcher(labels.MatchEqual, """", """")
- acc := seriesvolume.NewAccumulator(10, 10)
-- err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through.Add(-30*time.Minute), acc, nil, nil, matcher)
-+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through.Add(-30*time.Minute), acc, nil, nil, nil, matcher)
- require.NoError(t, err)
- require.Equal(t, &logproto.VolumeResponse{
- Volumes: []logproto.Volume{
-@@ -541,6 +541,49 @@ func TestTSDBIndex_SeriesVolume(t *testing.T) {
- Limit: 10,
- }, acc.Volumes())
- })
-+
-+ t.Run(""when targetLabels provided, it aggregates by those labels only"", func(t *testing.T) {
-+ t.Run(""all targetLabels are added to matchers"", func(t *testing.T) {
-+ matcher := labels.MustNewMatcher(labels.MatchEqual, """", """")
-+ acc := seriesvolume.NewAccumulator(10, 10)
-+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, []string{""fizz""}, matcher)
-+ require.NoError(t, err)
-+ require.Equal(t, &logproto.VolumeResponse{
-+ Volumes: []logproto.Volume{
-+ {Name: `{fizz=""fizz""}`, Volume: (30 + 40) * 1024},
-+ {Name: `{fizz=""buzz""}`, Volume: (10 + 20) * 1024},
-+ },
-+ Limit: 10,
-+ }, acc.Volumes())
-+ })
-+
-+ t.Run(""with a specific equals matcher"", func(t *testing.T) {
-+ matcher := labels.MustNewMatcher(labels.MatchEqual, ""foo"", ""bar"")
-+ acc := seriesvolume.NewAccumulator(10, 10)
-+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, []string{""fizz""}, matcher)
-+ require.NoError(t, err)
-+ require.Equal(t, &logproto.VolumeResponse{
-+ Volumes: []logproto.Volume{
-+ {Name: `{fizz=""fizz""}`, Volume: (30 + 40) * 1024},
-+ {Name: `{fizz=""buzz""}`, Volume: (10 + 20) * 1024},
-+ },
-+ Limit: 10,
-+ }, acc.Volumes())
-+ })
-+
-+ t.Run(""with a specific regexp matcher"", func(t *testing.T) {
-+ matcher := labels.MustNewMatcher(labels.MatchRegexp, ""fizz"", "".+"")
-+ acc := seriesvolume.NewAccumulator(10, 10)
-+ err := tsdbIndex.SeriesVolume(context.Background(), ""fake"", from, through, acc, nil, nil, []string{""foo""}, matcher)
-+ require.NoError(t, err)
-+ require.Equal(t, &logproto.VolumeResponse{
-+ Volumes: []logproto.Volume{
-+ {Name: `{foo=""bar""}`, Volume: (100) * 1024},
-+ },
-+ Limit: 10,
-+ }, acc.Volumes())
-+ })
-+ })
- }
-
- type filterAll struct{}
-diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go
-index 83cf688f9c81a..a7ae4c1c281d9 100644
---- a/pkg/storage/util_test.go
-+++ b/pkg/storage/util_test.go
-@@ -259,7 +259,7 @@ func (m *mockChunkStore) Stats(_ context.Context, _ string, _, _ model.Time, _ .
- return nil, nil
- }
-
--func (m *mockChunkStore) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) {
-+func (m *mockChunkStore) SeriesVolume(_ context.Context, _ string, _, _ model.Time, _ int32, _ []string, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) {
- return nil, nil
- }
-
-diff --git a/pkg/util/series_volume.go b/pkg/util/series_volume.go
-new file mode 100644
-index 0000000000000..206dfc18ac902
---- /dev/null
-+++ b/pkg/util/series_volume.go
-@@ -0,0 +1,73 @@
-+package util
-+
-+import ""github.com/prometheus/prometheus/model/labels""
-+
-+// PrepareLabelsAndMatchers is used by the ingester and index gateway to service series volume requests.
-+// It returns a map of labels to aggregate into, a list of matchers to match streams against,
-+// as well a boolean to indicate if a match all selector was provided.
-+//
-+// The last argument, tenantLabel, is optional. If povided, a single string of the internal tenant label namne is expected.
-+func PrepareLabelsAndMatchers(targetLabels []string, matchers []*labels.Matcher, tenantLabel ...string) (map[string]struct{}, []*labels.Matcher, bool) {
-+ if len(targetLabels) > 0 {
-+ return prepareLabelsAndMatchersWithTargets(targetLabels, matchers, tenantLabel...)
-+ }
-+
-+ var includeAll bool
-+ labelsToMatch := make(map[string]struct{})
-+
-+ for _, m := range matchers {
-+ if m.Name == """" {
-+ includeAll = true
-+ continue
-+ }
-+
-+ if len(tenantLabel) == 1 && m.Name == tenantLabel[0] {
-+ continue
-+ }
-+
-+ labelsToMatch[m.Name] = struct{}{}
-+ }
-+
-+ return labelsToMatch, matchers, includeAll
-+}
-+
-+func prepareLabelsAndMatchersWithTargets(targetLabels []string, matchers []*labels.Matcher, tenantLabel ...string) (map[string]struct{}, []*labels.Matcher, bool) {
-+ matchAllIndex := -1
-+ labelsToMatch := make(map[string]struct{})
-+ targetsFound := make(map[string]bool, len(targetLabels))
-+
-+ for _, target := range targetLabels {
-+ labelsToMatch[target] = struct{}{}
-+ targetsFound[target] = false
-+ }
-+
-+ for i, m := range matchers {
-+ if m.Name == """" {
-+ matchAllIndex = i
-+ continue
-+ }
-+
-+ if len(tenantLabel) == 1 && m.Name == tenantLabel[0] {
-+ continue
-+ }
-+
-+ if _, ok := targetsFound[m.Name]; ok {
-+ targetsFound[m.Name] = true
-+ }
-+ }
-+
-+ // Make sure all target labels are included in the matchers.
-+ for target, found := range targetsFound {
-+ if !found {
-+ matcher := labels.MustNewMatcher(labels.MatchRegexp, target, "".+"")
-+ matchers = append(matchers, matcher)
-+ }
-+ }
-+
-+ // If target labels has added a matcher, we can remove the all matcher
-+ if matchAllIndex > -1 && len(matchers) > 1 {
-+ matchers = append(matchers[:matchAllIndex], matchers[matchAllIndex+1:]...)
-+ }
-+
-+ return labelsToMatch, matchers, false
-+}",unknown,"Add targetLabels to SeriesVolume requests (#9878)
-
-Adds optional `targetLabels` parameter to `series_volume` and
-`series_volume_range` requests that controls how volumes are aggregated.
-When provided, volumes are aggregated into the intersections of the
-provided `targetLabels` only."
-5ab9515020658860053590c9e38e0262be78a9b1,2023-09-12 13:19:33,Salva Corts,"Fix regression when parsing numbers in Push request (#10550)
-
-**What this PR does / why we need it**:
-Even though the[ Loki HTTP API docs for the push endpoint][1] state that
-the stream label values should be strings, we previously didn't enforce
-this requirement. With https://github.com/grafana/loki/pull/9694, we
-started enforcing this requirement, and that broke some users.
-
-In this PR we are reverting this type of assertion and adding a bunch of
-tests to avoid the regression in the future.
-
-
-[1]:
-https://grafana.com/docs/loki/latest/reference/api/#push-log-entries-to-loki",False,"diff --git a/pkg/loghttp/query.go b/pkg/loghttp/query.go
-index d5b35ec6b69f6..287fd6a312129 100644
---- a/pkg/loghttp/query.go
-+++ b/pkg/loghttp/query.go
-@@ -69,15 +69,8 @@ func (s *LogProtoStream) UnmarshalJSON(data []byte) error {
- err := jsonparser.ObjectEach(data, func(key, val []byte, ty jsonparser.ValueType, _ int) error {
- switch string(key) {
- case ""stream"":
-- labels := make(LabelSet)
-- err := jsonparser.ObjectEach(val, func(key, val []byte, dataType jsonparser.ValueType, _ int) error {
-- if dataType != jsonparser.String {
-- return jsonparser.MalformedStringError
-- }
-- labels[string(key)] = string(val)
-- return nil
-- })
-- if err != nil {
-+ var labels LabelSet
-+ if err := labels.UnmarshalJSON(val); err != nil {
- return err
- }
- s.Labels = labels.String()
-diff --git a/pkg/util/unmarshal/unmarshal_test.go b/pkg/util/unmarshal/unmarshal_test.go
-index 34e4c2dbf3503..9fdaf27512127 100644
---- a/pkg/util/unmarshal/unmarshal_test.go
-+++ b/pkg/util/unmarshal/unmarshal_test.go
-@@ -7,6 +7,7 @@ import (
- ""testing""
- ""time""
-
-+ ""github.com/prometheus/prometheus/model/labels""
- ""github.com/stretchr/testify/require""
-
- ""github.com/grafana/loki/pkg/loghttp""
-@@ -15,24 +16,28 @@ import (
- ""github.com/grafana/loki/pkg/util/marshal""
- )
-
--// covers requests to /loki/api/v1/push
--var pushTests = []struct {
-- expected []logproto.Stream
-- actual string
--}{
-- {
-- []logproto.Stream{
-- {
-- Entries: []logproto.Entry{
-- {
-- Timestamp: time.Unix(0, 123456789012345),
-- Line: ""super line"",
-+func Test_DecodePushRequest(t *testing.T) {
-+ // covers requests to /loki/api/v1/push
-+ for _, tc := range []struct {
-+ name string
-+ expected []logproto.Stream
-+ expectedErr bool
-+ actual string
-+ }{
-+ {
-+ name: ""basic"",
-+ expected: []logproto.Stream{
-+ {
-+ Entries: []logproto.Entry{
-+ {
-+ Timestamp: time.Unix(0, 123456789012345),
-+ Line: ""super line"",
-+ },
- },
-+ Labels: labels.FromStrings(""test"", ""test"").String(),
- },
-- Labels: `{test=""test""}`,
- },
-- },
-- `{
-+ actual: `{
- ""streams"": [
- {
- ""stream"": {
-@@ -44,24 +49,25 @@ var pushTests = []struct {
- }
- ]
- }`,
-- },
-- {
-- []logproto.Stream{
-- {
-- Entries: []logproto.Entry{
-- {
-- Timestamp: time.Unix(0, 123456789012345),
-- Line: ""super line"",
-- StructuredMetadata: []logproto.LabelAdapter{
-- {Name: ""a"", Value: ""1""},
-- {Name: ""b"", Value: ""2""},
-+ },
-+ {
-+ name: ""with structured metadata"",
-+ expected: []logproto.Stream{
-+ {
-+ Entries: []logproto.Entry{
-+ {
-+ Timestamp: time.Unix(0, 123456789012345),
-+ Line: ""super line"",
-+ StructuredMetadata: []logproto.LabelAdapter{
-+ {Name: ""a"", Value: ""1""},
-+ {Name: ""b"", Value: ""2""},
-+ },
- },
- },
-+ Labels: labels.FromStrings(""test"", ""test"").String(),
- },
-- Labels: `{test=""test""}`,
- },
-- },
-- `{
-+ actual: `{
- ""streams"": [
- {
- ""stream"": {
-@@ -73,18 +79,100 @@ var pushTests = []struct {
- }
- ]
- }`,
-- },
--}
-+ },
-
--func Test_DecodePushRequest(t *testing.T) {
-- for i, pushTest := range pushTests {
-- var actual logproto.PushRequest
-- closer := io.NopCloser(strings.NewReader(pushTest.actual))
-+ // The following test cases are added to cover a regression. Even though the Loki HTTP API
-+ // docs for the push endpoint state that the stream label values should be strings, we
-+ // previously didn't enforce this requirement.
-+ // With https://github.com/grafana/loki/pull/9694, we started enforcing this requirement
-+ // and that broke some users. We are adding these test cases to ensure that we don't
-+ // enforce this requirement in the future. Note that we may want to enforce this requirement
-+ // in a future major release, in which case we should modify these test cases.
-+ {
-+ name: ""number in stream label value"",
-+ expected: []logproto.Stream{
-+ {
-+ Entries: []logproto.Entry{
-+ {
-+ Timestamp: time.Unix(0, 123456789012345),
-+ Line: ""super line"",
-+ },
-+ },
-+ Labels: labels.FromStrings(""test"", ""test"", ""number"", ""123"").String(),
-+ },
-+ },
-+ actual: `{
-+ ""streams"": [
-+ {
-+ ""stream"": {
-+ ""test"": ""test"",
-+ ""number"": 123
-+ },
-+ ""values"":[
-+ [ ""123456789012345"", ""super line"" ]
-+ ]
-+ }
-+ ]
-+ }`,
-+ },
-+ {
-+ name: ""string without quotes in stream label value"",
-+ expectedErr: true,
-+ actual: `{
-+ ""streams"": [
-+ {
-+ ""stream"": {
-+ ""test"": ""test"",
-+ ""text"": None
-+ },
-+ ""values"":[
-+ [ ""123456789012345"", ""super line"" ]
-+ ]
-+ }
-+ ]
-+ }`,
-+ },
-+ {
-+ name: ""json object in stream label value"",
-+ expected: []logproto.Stream{
-+ {
-+ Entries: []logproto.Entry{
-+ {
-+ Timestamp: time.Unix(0, 123456789012345),
-+ Line: ""super line"",
-+ },
-+ },
-+ Labels: labels.FromStrings(""test"", ""test"", ""text"", ""{ \""a\"": \""b\"" }"").String(),
-+ },
-+ },
-+ actual: `{
-+ ""streams"": [
-+ {
-+ ""stream"": {
-+ ""test"": ""test"",
-+ ""text"": { ""a"": ""b"" }
-+ },
-+ ""values"":[
-+ [ ""123456789012345"", ""super line"" ]
-+ ]
-+ }
-+ ]
-+ }`,
-+ },
-+ } {
-+ t.Run(tc.name, func(t *testing.T) {
-+ var actual logproto.PushRequest
-+ closer := io.NopCloser(strings.NewReader(tc.actual))
-
-- err := DecodePushRequest(closer, &actual)
-- require.NoError(t, err)
-+ err := DecodePushRequest(closer, &actual)
-+ if tc.expectedErr {
-+ require.Error(t, err)
-+ return
-+ }
-+ require.NoError(t, err)
-
-- require.Equalf(t, pushTest.expected, actual.Streams, ""Push Test %d failed"", i)
-+ require.Equal(t, tc.expected, actual.Streams)
-+ })
- }
- }",unknown,"Fix regression when parsing numbers in Push request (#10550)
-
-**What this PR does / why we need it**:
-Even though the[ Loki HTTP API docs for the push endpoint][1] state that
-the stream label values should be strings, we previously didn't enforce
-this requirement. With https://github.com/grafana/loki/pull/9694, we
-started enforcing this requirement, and that broke some users.
-
-In this PR we are reverting this type of assertion and adding a bunch of
-tests to avoid the regression in the future.
-
-
-[1]:
-https://grafana.com/docs/loki/latest/reference/api/#push-log-entries-to-loki"
-4e4359e67c6c760e175d2b517dc87ec76a385e42,2023-02-16 14:48:02,Vladyslav Diachenko,"Fixed XSS on LogQL Analyzer page (#8521)
-
-Signed-off-by: Vladyslav Diachenko ",False,"diff --git a/docs/sources/logql/analyzer.md b/docs/sources/logql/analyzer.md
-index 079b27a05d8c4..0d4cbf77de10a 100644
---- a/docs/sources/logql/analyzer.md
-+++ b/docs/sources/logql/analyzer.md
-@@ -2,7 +2,6 @@
- title: LogQL Analyzer
- menuTitle: LoqQL Analyzer
- description: The LogQL Analyzer is an inline educational tool for experimenting with writing LogQL queries.
--draft: true
- weight: 60
- ---
-
-diff --git a/docs/sources/logql/analyzer/script.js b/docs/sources/logql/analyzer/script.js
-index eabfda4e21161..0c043ce721582 100644
---- a/docs/sources/logql/analyzer/script.js
-+++ b/docs/sources/logql/analyzer/script.js
-@@ -83,7 +83,8 @@ async function handleResponse(response) {
- }
-
- function handleError(error) {
-- document.getElementById(""query-error"").innerHTML = error
-+ const template = Handlebars.compile(""{{error_text}}"");
-+ document.getElementById(""query-error"").innerHTML = template({error_text:error})
- document.getElementById(""query-error"").classList.remove(""hide"");
- resultsElement.classList.add(""hide"");
- }",unknown,"Fixed XSS on LogQL Analyzer page (#8521)
-
-Signed-off-by: Vladyslav Diachenko "
-d97724e8dd12214092932f7e82cd46a3bb6bfd74,2022-05-04 20:20:09,Travis Patterson,"Introduce coverage to PR pipelines (#5357)
-
-* Add code coverage diff to CI
-
-* remove test package from coverage diff
-
-* review feedback
-
-* lint
-
-* Env variable test
-
-* faster experiment
-
-* drone vars
-
-* drone vars
-
-* how does the shell work?
-
-* add pr comment
-
-* escape json
-
-* report diff in comment
-
-* properly format json
-
-* quote post body
-
-* review feedback
-
-* add querier/queryrange to coverage",False,"diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet
-index 32cd9bf017b93..4c8e145b764f1 100644
---- a/.drone/drone.jsonnet
-+++ b/.drone/drone.jsonnet
-@@ -46,15 +46,19 @@ local github_secret = secret('github_token', 'infra/data/ci/github/grafanabot',
- // Injected in a secret because this is a public repository and having the config here would leak our environment names
- local deploy_configuration = secret('deploy_config', 'secret/data/common/loki_ci_autodeploy', 'config.json');
-
--
--local run(name, commands) = {
-+local run(name, commands, env={}) = {
- name: name,
- image: 'grafana/loki-build-image:%s' % build_image_version,
- commands: commands,
-+ environment: env,
- };
-
--local make(target, container=true) = run(target, [
-- 'make ' + (if !container then 'BUILD_IN_CONTAINER=false ' else '') + target,
-+local make(target, container=true, args=[]) = run(target, [
-+ std.join(' ', [
-+ 'make',
-+ 'BUILD_IN_CONTAINER=' + container,
-+ target,
-+ ] + args),
- ]);
-
- local docker(arch, app) = {
-@@ -369,7 +373,23 @@ local manifest(apps) = pipeline('manifest') {
- steps: [
- make('check-drone-drift', container=false) { depends_on: ['clone'] },
- make('check-generated-files', container=false) { depends_on: ['clone'] },
-- make('test', container=false) { depends_on: ['clone', 'check-generated-files'] },
-+ make('test', container=false) { depends_on: ['clone'] },
-+ run('clone-main', commands=['cd ..', 'git clone $CI_REPO_REMOTE loki-main', 'cd -']),
-+ run('test-main', commands=['cd ../loki-main', 'BUILD_IN_CONTAINER=false make test']) { depends_on: ['clone-main'] },
-+ make('compare-coverage', container=false, args=[
-+ 'old=../loki-main/test_results.txt',
-+ 'new=test_results.txt',
-+ 'packages=ingester,distributor,querier,querier/queryrange,iter,storage,chunkenc,logql,loki',
-+ '> diff.txt',
-+ ]) { depends_on: ['test', 'test-main'] },
-+ run('report-coverage', commands=[
-+ ""pull=$(echo $CI_COMMIT_REF | awk -F '/' '{print $3}')"",
-+ ""body=$(jq -Rs '{body: . }' diff.txt)"",
-+ 'curl -X POST -u $USER:$TOKEN -H ""Accept: application/vnd.github.v3+json"" https://api.github.com/repos/grafana/loki/issues/$pull/comments -d ""$body"" > /dev/null',
-+ ], env={
-+ USER: 'grafanabot',
-+ TOKEN: { from_secret: github_secret.name },
-+ }) { depends_on: ['compare-coverage'] },
- make('lint', container=false) { depends_on: ['clone', 'check-generated-files'] },
- make('check-mod', container=false) { depends_on: ['clone', 'test', 'lint'] },
- {
-diff --git a/.drone/drone.yml b/.drone/drone.yml
-index 90a2691ee8388..a9a1b250eb5fc 100644
---- a/.drone/drone.yml
-+++ b/.drone/drone.yml
-@@ -40,26 +40,67 @@ steps:
- - make BUILD_IN_CONTAINER=false check-drone-drift
- depends_on:
- - clone
-+ environment: {}
- image: grafana/loki-build-image:0.20.4
- name: check-drone-drift
- - commands:
- - make BUILD_IN_CONTAINER=false check-generated-files
- depends_on:
- - clone
-+ environment: {}
- image: grafana/loki-build-image:0.20.4
- name: check-generated-files
- - commands:
- - make BUILD_IN_CONTAINER=false test
- depends_on:
- - clone
-- - check-generated-files
-+ environment: {}
- image: grafana/loki-build-image:0.20.4
- name: test
-+- commands:
-+ - cd ..
-+ - git clone $CI_REPO_REMOTE loki-main
-+ - cd -
-+ environment: {}
-+ image: grafana/loki-build-image:0.20.4
-+ name: clone-main
-+- commands:
-+ - cd ../loki-main
-+ - BUILD_IN_CONTAINER=false make test
-+ depends_on:
-+ - clone-main
-+ environment: {}
-+ image: grafana/loki-build-image:0.20.4
-+ name: test-main
-+- commands:
-+ - make BUILD_IN_CONTAINER=false compare-coverage old=../loki-main/test_results.txt
-+ new=test_results.txt packages=ingester,distributor,querier,querier/queryrange,iter,storage,chunkenc,logql,loki
-+ > diff.txt
-+ depends_on:
-+ - test
-+ - test-main
-+ environment: {}
-+ image: grafana/loki-build-image:0.20.4
-+ name: compare-coverage
-+- commands:
-+ - pull=$(echo $CI_COMMIT_REF | awk -F '/' '{print $3}')
-+ - 'body=$(jq -Rs ''{body: . }'' diff.txt)'
-+ - 'curl -X POST -u $USER:$TOKEN -H ""Accept: application/vnd.github.v3+json"" https://api.github.com/repos/grafana/loki/issues/$pull/comments
-+ -d ""$body"" > /dev/null'
-+ depends_on:
-+ - compare-coverage
-+ environment:
-+ TOKEN:
-+ from_secret: github_token
-+ USER: grafanabot
-+ image: grafana/loki-build-image:0.20.4
-+ name: report-coverage
- - commands:
- - make BUILD_IN_CONTAINER=false lint
- depends_on:
- - clone
- - check-generated-files
-+ environment: {}
- image: grafana/loki-build-image:0.20.4
- name: lint
- - commands:
-@@ -68,6 +109,7 @@ steps:
- - clone
- - test
- - lint
-+ environment: {}
- image: grafana/loki-build-image:0.20.4
- name: check-mod
- - commands:
-@@ -79,18 +121,21 @@ steps:
- depends_on:
- - clone
- - check-generated-files
-+ environment: {}
- image: grafana/loki-build-image:0.20.4
- name: loki
- - commands:
- - make BUILD_IN_CONTAINER=false validate-example-configs
- depends_on:
- - loki
-+ environment: {}
- image: grafana/loki-build-image:0.20.4
- name: validate-example-configs
- - commands:
- - make BUILD_IN_CONTAINER=false check-example-config-doc
- depends_on:
- - clone
-+ environment: {}
- image: grafana/loki-build-image:0.20.4
- name: check-example-config-doc
- trigger:
-@@ -109,6 +154,7 @@ steps:
- - make BUILD_IN_CONTAINER=false lint-jsonnet
- depends_on:
- - clone
-+ environment: {}
- image: grafana/jsonnet-build:c8b75df
- name: lint-jsonnet
- trigger:
-@@ -1118,6 +1164,6 @@ kind: secret
- name: deploy_config
- ---
- kind: signature
--hmac: 96966b3eec7f8976408f2c2f2c36a29b87a694c8a32afb2fdb16209e1f9e7521
-+hmac: 4596e741ac788d461b3bbb2429c1f61efabaf943aeec6b3cd59eeff8d769de5e
-
- ...
-diff --git a/.gitignore b/.gitignore
-index a017012c7ed4b..dc3b86a0317ce 100644
---- a/.gitignore
-+++ b/.gitignore
-@@ -27,6 +27,7 @@ dlv
- rootfs/
- dist
- coverage.txt
-+test_results.txt
- .DS_Store
- .aws-sam
- .idea
-@@ -40,4 +41,4 @@ coverage.txt
- *.tfvars
-
- # vscode
--.vscode
-\ No newline at end of file
-+.vscode
-diff --git a/Makefile b/Makefile
-index 278aec0cd3629..053ea578add7f 100644
---- a/Makefile
-+++ b/Makefile
-@@ -10,7 +10,7 @@
- .PHONY: validate-example-configs generate-example-config-doc check-example-config-doc
- .PHONY: clean clean-protos
-
--SHELL = /usr/bin/env bash
-+SHELL = /usr/bin/env bash -o pipefail
-
- GOTEST ?= go test
-
-@@ -260,7 +260,10 @@ lint:
- ########
-
- test: all
-- $(GOTEST) -covermode=atomic -coverprofile=coverage.txt -p=4 ./...
-+ $(GOTEST) -covermode=atomic -coverprofile=coverage.txt -p=4 ./... | tee test_results.txt
-+
-+compare-coverage:
-+ ./tools/diff_coverage.sh $(old) $(new) $(packages)
-
- #########
- # Clean #
-diff --git a/loki-build-image/Dockerfile b/loki-build-image/Dockerfile
-index 99a7d05ac4529..9579edb2e0e67 100644
---- a/loki-build-image/Dockerfile
-+++ b/loki-build-image/Dockerfile
-@@ -68,7 +68,7 @@ RUN apt-get update && \
- musl gnupg ragel \
- file zip unzip jq gettext\
- protobuf-compiler libprotobuf-dev \
-- libsystemd-dev && \
-+ libsystemd-dev jq && \
- rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
-
- COPY --from=docker /usr/bin/docker /usr/bin/docker
-diff --git a/tools/diff_coverage.sh b/tools/diff_coverage.sh
-new file mode 100755
-index 0000000000000..188821e91248b
---- /dev/null
-+++ b/tools/diff_coverage.sh
-@@ -0,0 +1,18 @@
-+#!/bin/bash
-+
-+if [[ ! -f ""$1"" ]] || [[ ! -f ""$2"" ]]; then
-+ echo ""unable to compare test coverage: both old and new files must exist""
-+ exit 0
-+fi
-+
-+echo '```diff'
-+for pkg in ${3//,/ }; do
-+ old=$(grep ""pkg/${pkg}\s"" ""$1"" | sed s/%// | awk '{print $5}')
-+ new=$(grep ""pkg/${pkg}\s"" ""$2"" | sed s/%// | awk '{print $5}')
-+ echo | awk -v pkg=""${pkg}"" -v old=""${old:-0}"" -v new=""${new:-0}"" \
-+ '{
-+ sign=new - old < 0 ? ""-"" : ""+""
-+ printf (""%s %11s\t%s\n"", sign, pkg, new - old)
-+ }'
-+done
-+echo '```'",unknown,"Introduce coverage to PR pipelines (#5357)
-
-* Add code coverage diff to CI
-
-* remove test package from coverage diff
-
-* review feedback
-
-* lint
-
-* Env variable test
-
-* faster experiment
-
-* drone vars
-
-* drone vars
-
-* how does the shell work?
-
-* add pr comment
-
-* escape json
-
-* report diff in comment
-
-* properly format json
-
-* quote post body
-
-* review feedback
-
-* add querier/queryrange to coverage"
-85f7baaeda326c1f2df228c871f28cde9a4386cc,2024-02-19 22:31:59,Owen Diehl,Blooms/integration fixes (#11979),False,"diff --git a/pkg/bloomcompactor/batch.go b/pkg/bloomcompactor/batch.go
-index bed0834a86b74..920bff1decc8f 100644
---- a/pkg/bloomcompactor/batch.go
-+++ b/pkg/bloomcompactor/batch.go
-@@ -286,11 +286,10 @@ func (i *blockLoadingIter) loadNext() bool {
- // check if there are more overlapping groups to load
- if !i.overlapping.Next() {
- i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
-- return false
-- }
-+ if i.overlapping.Err() != nil {
-+ i.err = i.overlapping.Err()
-+ }
-
-- if i.overlapping.Err() != nil {
-- i.err = i.overlapping.Err()
- return false
- }
-
-@@ -300,7 +299,7 @@ func (i *blockLoadingIter) loadNext() bool {
- filtered := v1.NewFilterIter[*bloomshipper.CloseableBlockQuerier](loader, i.filter)
-
- iters := make([]v1.PeekingIterator[*v1.SeriesWithBloom], 0, len(blockRefs))
-- for filtered.Next() && filtered.Err() == nil {
-+ for filtered.Next() {
- bq := loader.At()
- if _, ok := i.loaded[bq]; !ok {
- i.loaded[bq] = struct{}{}
-@@ -309,8 +308,9 @@ func (i *blockLoadingIter) loadNext() bool {
- iters = append(iters, iter)
- }
-
-- if loader.Err() != nil {
-- i.err = loader.Err()
-+ if err := filtered.Err(); err != nil {
-+ i.err = err
-+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
- return false
- }
-
-diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
-index 3bb1c815e8295..cc96cc7219e8d 100644
---- a/pkg/bloomcompactor/bloomcompactor.go
-+++ b/pkg/bloomcompactor/bloomcompactor.go
-@@ -214,6 +214,7 @@ func (c *Compactor) ownsTenant(tenant string) (v1.FingerprintBounds, bool, error
-
- // runs a single round of compaction for all relevant tenants and tables
- func (c *Compactor) runOne(ctx context.Context) error {
-+ level.Info(c.logger).Log(""msg"", ""running bloom compaction"", ""workers"", c.cfg.WorkerParallelism)
- var workersErr error
- var wg sync.WaitGroup
- ch := make(chan tenantTable)
-@@ -226,7 +227,11 @@ func (c *Compactor) runOne(ctx context.Context) error {
- err := c.loadWork(ctx, ch)
-
- wg.Wait()
-- return multierror.New(workersErr, err, ctx.Err()).Err()
-+ err = multierror.New(workersErr, err, ctx.Err()).Err()
-+ if err != nil {
-+ level.Error(c.logger).Log(""msg"", ""compaction iteration failed"", ""err"", err)
-+ }
-+ return err
- }
-
- func (c *Compactor) tables(ts time.Time) *dayRangeIterator {
-@@ -241,6 +246,7 @@ func (c *Compactor) tables(ts time.Time) *dayRangeIterator {
-
- fromDay := config.NewDayTime(model.TimeFromUnixNano(from))
- throughDay := config.NewDayTime(model.TimeFromUnixNano(through))
-+ level.Debug(c.logger).Log(""msg"", ""loaded tables for compaction"", ""from"", fromDay, ""through"", throughDay)
- return newDayRangeIterator(fromDay, throughDay, c.schemaCfg)
- }
-
-@@ -250,6 +256,8 @@ func (c *Compactor) loadWork(ctx context.Context, ch chan<- tenantTable) error {
- for tables.Next() && tables.Err() == nil && ctx.Err() == nil {
- table := tables.At()
-
-+ level.Debug(c.logger).Log(""msg"", ""loading work for table"", ""table"", table)
-+
- tenants, err := c.tenants(ctx, table)
- if err != nil {
- return errors.Wrap(err, ""getting tenants"")
-@@ -262,6 +270,7 @@ func (c *Compactor) loadWork(ctx context.Context, ch chan<- tenantTable) error {
- if err != nil {
- return errors.Wrap(err, ""checking tenant ownership"")
- }
-+ level.Debug(c.logger).Log(""msg"", ""enqueueing work for tenant"", ""tenant"", tenant, ""table"", table, ""ownership"", ownershipRange.String(), ""owns"", owns)
- if !owns {
- c.metrics.tenantsSkipped.Inc()
- continue
-@@ -280,12 +289,14 @@ func (c *Compactor) loadWork(ctx context.Context, ch chan<- tenantTable) error {
- }
-
- if err := tenants.Err(); err != nil {
-+ level.Error(c.logger).Log(""msg"", ""error iterating tenants"", ""err"", err)
- return errors.Wrap(err, ""iterating tenants"")
- }
-
- }
-
- if err := tables.Err(); err != nil {
-+ level.Error(c.logger).Log(""msg"", ""error iterating tables"", ""err"", err)
- return errors.Wrap(err, ""iterating tables"")
- }
-
-@@ -330,7 +341,7 @@ func (c *Compactor) runWorkers(ctx context.Context, ch <-chan tenantTable) error
- }
-
- func (c *Compactor) compactTenantTable(ctx context.Context, tt tenantTable) error {
-- level.Info(c.logger).Log(""msg"", ""compacting"", ""org_id"", tt.tenant, ""table"", tt.table, ""ownership"", tt.ownershipRange)
-+ level.Info(c.logger).Log(""msg"", ""compacting"", ""org_id"", tt.tenant, ""table"", tt.table, ""ownership"", tt.ownershipRange.String())
- return c.controller.compactTenant(ctx, tt.table, tt.tenant, tt.ownershipRange)
- }
-
-diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go
-index ef41ec2d8efbb..2a4ff6cd45242 100644
---- a/pkg/bloomcompactor/controller.go
-+++ b/pkg/bloomcompactor/controller.go
-@@ -70,7 +70,7 @@ func (s *SimpleBloomController) compactTenant(
- tenant string,
- ownershipRange v1.FingerprintBounds,
- ) error {
-- logger := log.With(s.logger, ""ownership"", ownershipRange, ""org_id"", tenant, ""table"", table.Addr())
-+ logger := log.With(s.logger, ""org_id"", tenant, ""table"", table.Addr(), ""ownership"", ownershipRange.String())
-
- client, err := s.bloomStore.Client(table.ModelTime())
- if err != nil {
-@@ -92,6 +92,15 @@ func (s *SimpleBloomController) compactTenant(
- return errors.Wrap(err, ""failed to get metas"")
- }
-
-+ level.Debug(logger).Log(""msg"", ""found relevant metas"", ""metas"", len(metas))
-+
-+ // fetch all metas overlapping our ownership range so we can safely
-+ // check which metas can be deleted even if they only partially overlap out ownership range
-+ superset, err := s.fetchSuperSet(ctx, tenant, table, ownershipRange, metas, logger)
-+ if err != nil {
-+ return errors.Wrap(err, ""failed to fetch superset"")
-+ }
-+
- // build compaction plans
- work, err := s.findOutdatedGaps(ctx, tenant, table, ownershipRange, metas, logger)
- if err != nil {
-@@ -104,6 +113,63 @@ func (s *SimpleBloomController) compactTenant(
- return errors.Wrap(err, ""failed to build gaps"")
- }
-
-+ // combine built and superset metas
-+ // in preparation for removing outdated ones
-+ combined := append(superset, built...)
-+
-+ outdated := outdatedMetas(combined)
-+ level.Debug(logger).Log(""msg"", ""found outdated metas"", ""outdated"", len(outdated))
-+
-+ var (
-+ deletedMetas int
-+ deletedBlocks int
-+ )
-+ defer func() {
-+ s.metrics.metasDeleted.Add(float64(deletedMetas))
-+ s.metrics.blocksDeleted.Add(float64(deletedBlocks))
-+ }()
-+
-+ for _, meta := range outdated {
-+ for _, block := range meta.Blocks {
-+ err := client.DeleteBlocks(ctx, []bloomshipper.BlockRef{block})
-+ if err != nil {
-+ if client.IsObjectNotFoundErr(err) {
-+ level.Debug(logger).Log(""msg"", ""block not found while attempting delete, continuing"", ""block"", block.String())
-+ } else {
-+ level.Error(logger).Log(""msg"", ""failed to delete block"", ""err"", err, ""block"", block.String())
-+ return errors.Wrap(err, ""failed to delete block"")
-+ }
-+ }
-+ deletedBlocks++
-+ level.Debug(logger).Log(""msg"", ""removed outdated block"", ""block"", block.String())
-+ }
-+
-+ err = client.DeleteMetas(ctx, []bloomshipper.MetaRef{meta.MetaRef})
-+ if err != nil {
-+ if client.IsObjectNotFoundErr(err) {
-+ level.Debug(logger).Log(""msg"", ""meta not found while attempting delete, continuing"", ""meta"", meta.MetaRef.String())
-+ } else {
-+ level.Error(logger).Log(""msg"", ""failed to delete meta"", ""err"", err, ""meta"", meta.MetaRef.String())
-+ return errors.Wrap(err, ""failed to delete meta"")
-+ }
-+ }
-+ deletedMetas++
-+ level.Debug(logger).Log(""msg"", ""removed outdated meta"", ""meta"", meta.MetaRef.String())
-+ }
-+
-+ level.Debug(logger).Log(""msg"", ""finished compaction"")
-+ return nil
-+}
-+
-+// fetchSuperSet fetches all metas which overlap the ownership range of the first set of metas we've resolved
-+func (s *SimpleBloomController) fetchSuperSet(
-+ ctx context.Context,
-+ tenant string,
-+ table config.DayTable,
-+ ownershipRange v1.FingerprintBounds,
-+ metas []bloomshipper.Meta,
-+ logger log.Logger,
-+) ([]bloomshipper.Meta, error) {
- // in order to delete outdates metas which only partially fall within the ownership range,
- // we need to fetcha all metas in the entire bound range of the first set of metas we've resolved
- /*
-@@ -121,12 +187,28 @@ func (s *SimpleBloomController) compactTenant(
- union := superset.Union(meta.Bounds)
- if len(union) > 1 {
- level.Error(logger).Log(""msg"", ""meta bounds union is not a single range"", ""union"", union)
-- return errors.New(""meta bounds union is not a single range"")
-+ return nil, errors.New(""meta bounds union is not a single range"")
- }
- superset = union[0]
- }
-
-- metas, err = s.bloomStore.FetchMetas(
-+ within := superset.Within(ownershipRange)
-+ level.Debug(logger).Log(
-+ ""msg"", ""looking for superset metas"",
-+ ""superset"", superset.String(),
-+ ""superset_within"", within,
-+ )
-+
-+ if within {
-+ // we don't need to fetch any more metas
-+ // NB(owen-d): here we copy metas into the output. This is slightly inefficient, but
-+ // helps prevent mutability bugs by returning the same slice as the input.
-+ results := make([]bloomshipper.Meta, len(metas))
-+ copy(results, metas)
-+ return results, nil
-+ }
-+
-+ supersetMetas, err := s.bloomStore.FetchMetas(
- ctx,
- bloomshipper.MetaSearchParams{
- TenantID: tenant,
-@@ -134,42 +216,20 @@ func (s *SimpleBloomController) compactTenant(
- Keyspace: superset,
- },
- )
-+
- if err != nil {
- level.Error(logger).Log(""msg"", ""failed to get meta superset range"", ""err"", err, ""superset"", superset)
-- return errors.Wrap(err, ""failed to get meta supseret range"")
-+ return nil, errors.Wrap(err, ""failed to get meta supseret range"")
- }
-
-- // combine built and pre-existing metas
-- // in preparation for removing outdated metas
-- metas = append(metas, built...)
--
-- outdated := outdatedMetas(metas)
-- for _, meta := range outdated {
-- for _, block := range meta.Blocks {
-- if err := client.DeleteBlocks(ctx, []bloomshipper.BlockRef{block}); err != nil {
-- if client.IsObjectNotFoundErr(err) {
-- level.Debug(logger).Log(""msg"", ""block not found while attempting delete, continuing"", ""block"", block)
-- continue
-- }
--
-- level.Error(logger).Log(""msg"", ""failed to delete blocks"", ""err"", err)
-- return errors.Wrap(err, ""failed to delete blocks"")
-- }
-- }
--
-- if err := client.DeleteMetas(ctx, []bloomshipper.MetaRef{meta.MetaRef}); err != nil {
-- if client.IsObjectNotFoundErr(err) {
-- level.Debug(logger).Log(""msg"", ""meta not found while attempting delete, continuing"", ""meta"", meta.MetaRef)
-- } else {
-- level.Error(logger).Log(""msg"", ""failed to delete metas"", ""err"", err)
-- return errors.Wrap(err, ""failed to delete metas"")
-- }
-- }
-- }
--
-- level.Debug(logger).Log(""msg"", ""finished compaction"")
-- return nil
-+ level.Debug(logger).Log(
-+ ""msg"", ""found superset metas"",
-+ ""metas"", len(metas),
-+ ""fresh_metas"", len(supersetMetas),
-+ ""delta"", len(supersetMetas)-len(metas),
-+ )
-
-+ return supersetMetas, nil
- }
-
- func (s *SimpleBloomController) findOutdatedGaps(
-@@ -271,6 +331,7 @@ func (s *SimpleBloomController) buildGaps(
-
- for i := range plan.gaps {
- gap := plan.gaps[i]
-+ logger := log.With(logger, ""gap"", gap.bounds.String(), ""tsdb"", plan.tsdb.Name())
-
- meta := bloomshipper.Meta{
- MetaRef: bloomshipper.MetaRef{
-@@ -304,9 +365,11 @@ func (s *SimpleBloomController) buildGaps(
- blocksIter,
- s.rwFn,
- s.metrics,
-- log.With(logger, ""tsdb"", plan.tsdb.Name(), ""ownership"", gap),
-+ logger,
- )
-
-+ level.Debug(logger).Log(""msg"", ""generating blocks"", ""overlapping_blocks"", len(gap.blocks))
-+
- newBlocks := gen.Generate(ctx)
- if err != nil {
- level.Error(logger).Log(""msg"", ""failed to generate bloom"", ""err"", err)
-@@ -333,6 +396,16 @@ func (s *SimpleBloomController) buildGaps(
- blocksIter.Close()
- return nil, errors.Wrap(err, ""failed to write block"")
- }
-+ s.metrics.blocksCreated.Inc()
-+
-+ totalGapKeyspace := (gap.bounds.Max - gap.bounds.Min)
-+ progress := (built.Bounds.Max - gap.bounds.Min)
-+ pct := float64(progress) / float64(totalGapKeyspace) * 100
-+ level.Debug(logger).Log(
-+ ""msg"", ""uploaded block"",
-+ ""block"", built.BlockRef.String(),
-+ ""progress_pct"", fmt.Sprintf(""%.2f"", pct),
-+ )
-
- meta.Blocks = append(meta.Blocks, built.BlockRef)
- }
-@@ -346,6 +419,7 @@ func (s *SimpleBloomController) buildGaps(
- blocksIter.Close()
-
- // Write the new meta
-+ // TODO(owen-d): put total size in log, total time in metrics+log
- ref, err := bloomshipper.MetaRefFrom(tenant, table.Addr(), gap.bounds, meta.Sources, meta.Blocks)
- if err != nil {
- level.Error(logger).Log(""msg"", ""failed to checksum meta"", ""err"", err)
-@@ -357,8 +431,10 @@ func (s *SimpleBloomController) buildGaps(
- level.Error(logger).Log(""msg"", ""failed to write meta"", ""err"", err)
- return nil, errors.Wrap(err, ""failed to write meta"")
- }
-- created = append(created, meta)
-+ s.metrics.metasCreated.Inc()
-+ level.Debug(logger).Log(""msg"", ""uploaded meta"", ""meta"", meta.MetaRef.String())
-
-+ created = append(created, meta)
- totalSeries += uint64(seriesItrWithCounter.Count())
- }
- }
-diff --git a/pkg/bloomcompactor/metrics.go b/pkg/bloomcompactor/metrics.go
-index 350e3ed7e480e..74378cb786429 100644
---- a/pkg/bloomcompactor/metrics.go
-+++ b/pkg/bloomcompactor/metrics.go
-@@ -31,6 +31,11 @@ type Metrics struct {
- tenantsCompleted *prometheus.CounterVec
- tenantsCompletedTime *prometheus.HistogramVec
- tenantsSeries prometheus.Histogram
-+
-+ blocksCreated prometheus.Counter
-+ blocksDeleted prometheus.Counter
-+ metasCreated prometheus.Counter
-+ metasDeleted prometheus.Counter
- }
-
- func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics {
-@@ -53,13 +58,13 @@ func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics {
- compactionsStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{
- Namespace: metricsNamespace,
- Subsystem: metricsSubsystem,
-- Name: ""compactions_started"",
-+ Name: ""compactions_started_total"",
- Help: ""Total number of compactions started"",
- }),
- compactionCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
- Namespace: metricsNamespace,
- Subsystem: metricsSubsystem,
-- Name: ""compactions_completed"",
-+ Name: ""compactions_completed_total"",
- Help: ""Total number of compactions completed"",
- }, []string{""status""}),
- compactionTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
-@@ -73,7 +78,7 @@ func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics {
- tenantsDiscovered: promauto.With(r).NewCounter(prometheus.CounterOpts{
- Namespace: metricsNamespace,
- Subsystem: metricsSubsystem,
-- Name: ""tenants_discovered"",
-+ Name: ""tenants_discovered_total"",
- Help: ""Number of tenants discovered during the current compaction run"",
- }),
- tenantsOwned: promauto.With(r).NewCounter(prometheus.CounterOpts{
-@@ -85,19 +90,19 @@ func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics {
- tenantsSkipped: promauto.With(r).NewCounter(prometheus.CounterOpts{
- Namespace: metricsNamespace,
- Subsystem: metricsSubsystem,
-- Name: ""tenants_skipped"",
-+ Name: ""tenants_skipped_total"",
- Help: ""Number of tenants skipped since they are not owned by this instance"",
- }),
- tenantsStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{
- Namespace: metricsNamespace,
- Subsystem: metricsSubsystem,
-- Name: ""tenants_started"",
-+ Name: ""tenants_started_total"",
- Help: ""Number of tenants started to process during the current compaction run"",
- }),
- tenantsCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
- Namespace: metricsNamespace,
- Subsystem: metricsSubsystem,
-- Name: ""tenants_completed"",
-+ Name: ""tenants_completed_total"",
- Help: ""Number of tenants successfully processed during the current compaction run"",
- }, []string{""status""}),
- tenantsCompletedTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
-@@ -115,6 +120,30 @@ func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics {
- // Up to 10M series per tenant, way more than what we expect given our max_global_streams_per_user limits
- Buckets: prometheus.ExponentialBucketsRange(1, 10000000, 10),
- }),
-+ blocksCreated: promauto.With(r).NewCounter(prometheus.CounterOpts{
-+ Namespace: metricsNamespace,
-+ Subsystem: metricsSubsystem,
-+ Name: ""blocks_created_total"",
-+ Help: ""Number of blocks created"",
-+ }),
-+ blocksDeleted: promauto.With(r).NewCounter(prometheus.CounterOpts{
-+ Namespace: metricsNamespace,
-+ Subsystem: metricsSubsystem,
-+ Name: ""blocks_deleted_total"",
-+ Help: ""Number of blocks deleted"",
-+ }),
-+ metasCreated: promauto.With(r).NewCounter(prometheus.CounterOpts{
-+ Namespace: metricsNamespace,
-+ Subsystem: metricsSubsystem,
-+ Name: ""metas_created_total"",
-+ Help: ""Number of metas created"",
-+ }),
-+ metasDeleted: promauto.With(r).NewCounter(prometheus.CounterOpts{
-+ Namespace: metricsNamespace,
-+ Subsystem: metricsSubsystem,
-+ Name: ""metas_deleted_total"",
-+ Help: ""Number of metas deleted"",
-+ }),
- }
-
- return &m
-diff --git a/pkg/bloomcompactor/spec.go b/pkg/bloomcompactor/spec.go
-index 67d41b650e375..cb030dfb59131 100644
---- a/pkg/bloomcompactor/spec.go
-+++ b/pkg/bloomcompactor/spec.go
-@@ -138,7 +138,7 @@ func (s *SimpleBloomGenerator) Generate(ctx context.Context) v1.Iterator[*v1.Blo
- )
- }
-
-- return NewLazyBlockBuilderIterator(ctx, s.opts, s.populator(ctx), s.readWriterFn, series, s.blocksIter)
-+ return NewLazyBlockBuilderIterator(ctx, s.opts, s.metrics, s.populator(ctx), s.readWriterFn, series, s.blocksIter)
- }
-
- // LazyBlockBuilderIterator is a lazy iterator over blocks that builds
-@@ -146,6 +146,7 @@ func (s *SimpleBloomGenerator) Generate(ctx context.Context) v1.Iterator[*v1.Blo
- type LazyBlockBuilderIterator struct {
- ctx context.Context
- opts v1.BlockOptions
-+ metrics *Metrics
- populate func(*v1.Series, *v1.Bloom) error
- readWriterFn func() (v1.BlockWriter, v1.BlockReader)
- series v1.PeekingIterator[*v1.Series]
-@@ -158,6 +159,7 @@ type LazyBlockBuilderIterator struct {
- func NewLazyBlockBuilderIterator(
- ctx context.Context,
- opts v1.BlockOptions,
-+ metrics *Metrics,
- populate func(*v1.Series, *v1.Bloom) error,
- readWriterFn func() (v1.BlockWriter, v1.BlockReader),
- series v1.PeekingIterator[*v1.Series],
-@@ -166,6 +168,7 @@ func NewLazyBlockBuilderIterator(
- return &LazyBlockBuilderIterator{
- ctx: ctx,
- opts: opts,
-+ metrics: metrics,
- populate: populate,
- readWriterFn: readWriterFn,
- series: series,
-@@ -189,7 +192,7 @@ func (b *LazyBlockBuilderIterator) Next() bool {
- return false
- }
-
-- mergeBuilder := v1.NewMergeBuilder(b.blocks, b.series, b.populate)
-+ mergeBuilder := v1.NewMergeBuilder(b.blocks, b.series, b.populate, b.metrics.bloomMetrics)
- writer, reader := b.readWriterFn()
- blockBuilder, err := v1.NewBlockBuilder(b.opts, writer)
- if err != nil {
-diff --git a/pkg/bloomcompactor/tsdb.go b/pkg/bloomcompactor/tsdb.go
-index 6159ce02a804a..7f5ec5eab81a3 100644
---- a/pkg/bloomcompactor/tsdb.go
-+++ b/pkg/bloomcompactor/tsdb.go
-@@ -236,8 +236,7 @@ func NewTSDBStores(
- if err != nil {
- return nil, errors.Wrap(err, ""failed to create object client"")
- }
-- prefix := path.Join(cfg.IndexTables.PathPrefix, cfg.IndexTables.Prefix)
-- res.stores[i] = NewBloomTSDBStore(storage.NewIndexStorageClient(c, prefix))
-+ res.stores[i] = NewBloomTSDBStore(storage.NewIndexStorageClient(c, cfg.IndexTables.PathPrefix))
- }
- }
-
-diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go
-index 6bc43cf794342..e9776dfef78f5 100644
---- a/pkg/bloomgateway/util_test.go
-+++ b/pkg/bloomgateway/util_test.go
-@@ -323,8 +323,7 @@ func createBlocks(t *testing.T, tenant string, n int, from, through model.Time,
- MetaRef: bloomshipper.MetaRef{
- Ref: ref,
- },
-- BlockTombstones: []bloomshipper.BlockRef{},
-- Blocks: []bloomshipper.BlockRef{blockRef},
-+ Blocks: []bloomshipper.BlockRef{blockRef},
- }
- block, data, _ := v1.MakeBlock(t, n, fromFp, throughFp, from, through)
- // Printing fingerprints and the log lines of its chunks comes handy for debugging...
-diff --git a/pkg/storage/bloom/v1/builder.go b/pkg/storage/bloom/v1/builder.go
-index d2d51b557e5d3..b094b847f2ef5 100644
---- a/pkg/storage/bloom/v1/builder.go
-+++ b/pkg/storage/bloom/v1/builder.go
-@@ -526,6 +526,7 @@ type MergeBuilder struct {
- store Iterator[*Series]
- // Add chunks to a bloom
- populate func(*Series, *Bloom) error
-+ metrics *Metrics
- }
-
- // NewMergeBuilder is a specific builder which does the following:
-@@ -536,11 +537,13 @@ func NewMergeBuilder(
- blocks Iterator[*SeriesWithBloom],
- store Iterator[*Series],
- populate func(*Series, *Bloom) error,
-+ metrics *Metrics,
- ) *MergeBuilder {
- return &MergeBuilder{
- blocks: blocks,
- store: store,
- populate: populate,
-+ metrics: metrics,
- }
- }
-
-@@ -568,6 +571,8 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) {
- nextInBlocks = deduped.At()
- }
-
-+ var chunksIndexed, chunksCopied int
-+
- cur := nextInBlocks
- chunksToAdd := nextInStore.Chunks
- // The next series from the store doesn't exist in the blocks, so we add it
-@@ -583,8 +588,11 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) {
- } else {
- // if the series already exists in the block, we only need to add the new chunks
- chunksToAdd = nextInStore.Chunks.Unless(nextInBlocks.Series.Chunks)
-+ chunksCopied = len(nextInStore.Chunks) - len(chunksToAdd)
- }
-
-+ chunksIndexed = len(chunksToAdd)
-+
- if len(chunksToAdd) > 0 {
- if err := mb.populate(
- &Series{
-@@ -597,6 +605,9 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) {
- }
- }
-
-+ mb.metrics.chunksIndexed.WithLabelValues(chunkIndexedTypeIterated).Add(float64(chunksIndexed))
-+ mb.metrics.chunksIndexed.WithLabelValues(chunkIndexedTypeCopied).Add(float64(chunksCopied))
-+
- blockFull, err := builder.AddSeries(*cur)
- if err != nil {
- return 0, errors.Wrap(err, ""adding series to block"")
-@@ -606,6 +617,10 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (uint32, error) {
- }
- }
-
-+ if err := mb.store.Err(); err != nil {
-+ return 0, errors.Wrap(err, ""iterating store"")
-+ }
-+
- checksum, err := builder.Close()
- if err != nil {
- return 0, errors.Wrap(err, ""closing block"")
-diff --git a/pkg/storage/bloom/v1/builder_test.go b/pkg/storage/bloom/v1/builder_test.go
-index 0122a35f7751c..0013ad8744579 100644
---- a/pkg/storage/bloom/v1/builder_test.go
-+++ b/pkg/storage/bloom/v1/builder_test.go
-@@ -226,7 +226,7 @@ func TestMergeBuilder(t *testing.T) {
- )
-
- // Ensure that the merge builder combines all the blocks correctly
-- mergeBuilder := NewMergeBuilder(dedupedBlocks(blocks), storeItr, pop)
-+ mergeBuilder := NewMergeBuilder(dedupedBlocks(blocks), storeItr, pop, NewMetrics(nil))
- indexBuf := bytes.NewBuffer(nil)
- bloomsBuf := bytes.NewBuffer(nil)
- writer := NewMemoryBlockWriter(indexBuf, bloomsBuf)
-@@ -400,6 +400,7 @@ func TestMergeBuilder_Roundtrip(t *testing.T) {
- // We're not actually indexing new data in this test
- return nil
- },
-+ NewMetrics(nil),
- )
- builder, err := NewBlockBuilder(DefaultBlockOptions, writer)
- require.Nil(t, err)
-diff --git a/pkg/storage/bloom/v1/index.go b/pkg/storage/bloom/v1/index.go
-index e3a14dc5453ea..58d43b8cd0aca 100644
---- a/pkg/storage/bloom/v1/index.go
-+++ b/pkg/storage/bloom/v1/index.go
-@@ -234,8 +234,8 @@ func aggregateHeaders(xs []SeriesHeader) SeriesHeader {
- Bounds: NewBounds(fromFp, throughFP),
- }
-
-- for _, x := range xs {
-- if x.FromTs < res.FromTs {
-+ for i, x := range xs {
-+ if i == 0 || x.FromTs < res.FromTs {
- res.FromTs = x.FromTs
- }
- if x.ThroughTs > res.ThroughTs {
-diff --git a/pkg/storage/bloom/v1/metrics.go b/pkg/storage/bloom/v1/metrics.go
-index aa604c29f1573..f5568a9d76596 100644
---- a/pkg/storage/bloom/v1/metrics.go
-+++ b/pkg/storage/bloom/v1/metrics.go
-@@ -10,12 +10,16 @@ type Metrics struct {
- bloomSize prometheus.Histogram // size of the bloom filter in bytes
- hammingWeightRatio prometheus.Histogram // ratio of the hamming weight of the bloom filter to the number of bits in the bloom filter
- estimatedCount prometheus.Histogram // estimated number of elements in the bloom filter
-+ chunksIndexed *prometheus.CounterVec
- }
-
-+const chunkIndexedTypeIterated = ""iterated""
-+const chunkIndexedTypeCopied = ""copied""
-+
- func NewMetrics(r prometheus.Registerer) *Metrics {
- return &Metrics{
- sbfCreationTime: promauto.With(r).NewCounter(prometheus.CounterOpts{
-- Name: ""bloom_creation_time"",
-+ Name: ""bloom_creation_time_total"",
- Help: ""Time spent creating scalable bloom filters"",
- }),
- bloomSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
-@@ -33,5 +37,9 @@ func NewMetrics(r prometheus.Registerer) *Metrics {
- Help: ""Estimated number of elements in the bloom filter"",
- Buckets: prometheus.ExponentialBucketsRange(1, 33554432, 10),
- }),
-+ chunksIndexed: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
-+ Name: ""bloom_chunks_indexed_total"",
-+ Help: ""Number of chunks indexed in bloom filters, partitioned by type. Type can be iterated or copied, where iterated indicates the chunk data was fetched and ngrams for it's contents generated whereas copied indicates the chunk already existed in another source block and was copied to the new block"",
-+ }, []string{""type""}),
- }
- }
-diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go
-index 882b0eab41c24..240f2b5166588 100644
---- a/pkg/storage/stores/shipper/bloomshipper/client.go
-+++ b/pkg/storage/stores/shipper/bloomshipper/client.go
-@@ -88,10 +88,6 @@ type Meta struct {
- // The specific TSDB files used to generate the block.
- Sources []tsdb.SingleTenantTSDBIdentifier
-
-- // TODO(owen-d): remove, unused
-- // Old blocks which can be deleted in the future. These should be from previous compaction rounds.
-- BlockTombstones []BlockRef
--
- // A list of blocks that were generated
- Blocks []BlockRef
- }
-diff --git a/pkg/storage/stores/shipper/bloomshipper/client_test.go b/pkg/storage/stores/shipper/bloomshipper/client_test.go
-index 897ed519946a7..e5bbe3b5b1bf5 100644
---- a/pkg/storage/stores/shipper/bloomshipper/client_test.go
-+++ b/pkg/storage/stores/shipper/bloomshipper/client_test.go
-@@ -63,8 +63,7 @@ func putMeta(c *BloomClient, tenant string, start model.Time, minFp, maxFp model
- // EndTimestamp: start.Add(12 * time.Hour),
- },
- },
-- Blocks: []BlockRef{},
-- BlockTombstones: []BlockRef{},
-+ Blocks: []BlockRef{},
- }
- raw, _ := json.Marshal(meta)
- return meta, c.client.PutObject(context.Background(), c.Meta(meta.MetaRef).Addr(), bytes.NewReader(raw))
-@@ -129,8 +128,7 @@ func TestBloomClient_PutMeta(t *testing.T) {
- // EndTimestamp: start.Add(12 * time.Hour),
- },
- },
-- Blocks: []BlockRef{},
-- BlockTombstones: []BlockRef{},
-+ Blocks: []BlockRef{},
- }
-
- err := c.PutMeta(ctx, meta)
-diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
-index 40a695e0b8e6c..962bebb9956fd 100644
---- a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
-+++ b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
-@@ -34,8 +34,7 @@ func makeMetas(t *testing.T, schemaCfg config.SchemaConfig, ts model.Time, keysp
- EndTimestamp: ts,
- },
- },
-- BlockTombstones: []BlockRef{},
-- Blocks: []BlockRef{},
-+ Blocks: []BlockRef{},
- }
- }
- return metas
-diff --git a/pkg/storage/stores/shipper/bloomshipper/resolver.go b/pkg/storage/stores/shipper/bloomshipper/resolver.go
-index 40a59cee42dbc..7d224b9f01392 100644
---- a/pkg/storage/stores/shipper/bloomshipper/resolver.go
-+++ b/pkg/storage/stores/shipper/bloomshipper/resolver.go
-@@ -14,6 +14,9 @@ const (
- BloomPrefix = ""bloom""
- MetasPrefix = ""metas""
- BlocksPrefix = ""blocks""
-+
-+ extTarGz = "".tar.gz""
-+ extJSON = "".json""
- )
-
- // KeyResolver is an interface for resolving keys to locations.
-@@ -36,7 +39,7 @@ func (defaultKeyResolver) Meta(ref MetaRef) Location {
- fmt.Sprintf(""%v"", ref.TableName),
- ref.TenantID,
- MetasPrefix,
-- fmt.Sprintf(""%v-%v"", ref.Bounds, ref.Checksum),
-+ fmt.Sprintf(""%v-%x%s"", ref.Bounds, ref.Checksum, extJSON),
- }
- }
-
-@@ -50,7 +53,8 @@ func (defaultKeyResolver) ParseMetaKey(loc Location) (MetaRef, error) {
- if err != nil {
- return MetaRef{}, fmt.Errorf(""failed to parse bounds of meta key %s : %w"", loc, err)
- }
-- checksum, err := strconv.ParseUint(fnParts[2], 16, 64)
-+ withoutExt := strings.TrimSuffix(fnParts[2], extJSON)
-+ checksum, err := strconv.ParseUint(withoutExt, 16, 64)
- if err != nil {
- return MetaRef{}, fmt.Errorf(""failed to parse checksum of meta key %s : %w"", loc, err)
- }
-@@ -77,7 +81,7 @@ func (defaultKeyResolver) Block(ref BlockRef) Location {
- ref.TenantID,
- BlocksPrefix,
- ref.Bounds.String(),
-- fmt.Sprintf(""%d-%d-%x"", ref.StartTimestamp, ref.EndTimestamp, ref.Checksum),
-+ fmt.Sprintf(""%d-%d-%x%s"", ref.StartTimestamp, ref.EndTimestamp, ref.Checksum, extTarGz),
- }
- }
-
-diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper.go b/pkg/storage/stores/shipper/bloomshipper/shipper.go
-index fd755b0a204a7..3267886ac063e 100644
---- a/pkg/storage/stores/shipper/bloomshipper/shipper.go
-+++ b/pkg/storage/stores/shipper/bloomshipper/shipper.go
-@@ -55,30 +55,15 @@ func (s *Shipper) Stop() {
- }
-
- // BlocksForMetas returns all the blocks from all the metas listed that are within the requested bounds
--// and not tombstoned in any of the metas
--func BlocksForMetas(metas []Meta, interval Interval, keyspaces []v1.FingerprintBounds) []BlockRef {
-- blocks := make(map[BlockRef]bool) // block -> isTombstoned
--
-+func BlocksForMetas(metas []Meta, interval Interval, keyspaces []v1.FingerprintBounds) (refs []BlockRef) {
- for _, meta := range metas {
-- for _, tombstone := range meta.BlockTombstones {
-- blocks[tombstone] = true
-- }
- for _, block := range meta.Blocks {
-- tombstoned, ok := blocks[block]
-- if ok && tombstoned {
-- // skip tombstoned blocks
-- continue
-+ if !isOutsideRange(block, interval, keyspaces) {
-+ refs = append(refs, block)
- }
-- blocks[block] = false
- }
- }
-
-- refs := make([]BlockRef, 0, len(blocks))
-- for ref, tombstoned := range blocks {
-- if !tombstoned && !isOutsideRange(ref, interval, keyspaces) {
-- refs = append(refs, ref)
-- }
-- }
- sort.Slice(refs, func(i, j int) bool {
- return refs[i].Bounds.Less(refs[j].Bounds)
- })
-diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go
-index c9e47f91fea28..e03d72c26ba37 100644
---- a/pkg/storage/stores/shipper/bloomshipper/shipper_test.go
-+++ b/pkg/storage/stores/shipper/bloomshipper/shipper_test.go
-@@ -14,49 +14,6 @@ import (
- )
-
- func TestBloomShipper_findBlocks(t *testing.T) {
-- t.Run(""expected block that are specified in tombstones to be filtered out"", func(t *testing.T) {
-- metas := []Meta{
-- {
-- Blocks: []BlockRef{
-- //this blockRef is marked as deleted in the next meta
-- createMatchingBlockRef(1),
-- createMatchingBlockRef(2),
-- },
-- },
-- {
-- Blocks: []BlockRef{
-- //this blockRef is marked as deleted in the next meta
-- createMatchingBlockRef(3),
-- createMatchingBlockRef(4),
-- },
-- },
-- {
-- BlockTombstones: []BlockRef{
-- createMatchingBlockRef(1),
-- createMatchingBlockRef(3),
-- },
-- Blocks: []BlockRef{
-- createMatchingBlockRef(5),
-- },
-- },
-- }
--
-- ts := model.Now()
--
-- interval := NewInterval(
-- ts.Add(-2*time.Hour),
-- ts.Add(-1*time.Hour),
-- )
-- blocks := BlocksForMetas(metas, interval, []v1.FingerprintBounds{{Min: 100, Max: 200}})
--
-- expectedBlockRefs := []BlockRef{
-- createMatchingBlockRef(2),
-- createMatchingBlockRef(4),
-- createMatchingBlockRef(5),
-- }
-- require.ElementsMatch(t, expectedBlockRefs, blocks)
-- })
--
- tests := map[string]struct {
- minFingerprint uint64
- maxFingerprint uint64
-diff --git a/pkg/storage/stores/shipper/bloomshipper/store_test.go b/pkg/storage/stores/shipper/bloomshipper/store_test.go
-index ca86cb94fa963..c99aa46df4bf3 100644
---- a/pkg/storage/stores/shipper/bloomshipper/store_test.go
-+++ b/pkg/storage/stores/shipper/bloomshipper/store_test.go
-@@ -83,8 +83,7 @@ func createMetaInStorage(store *BloomStore, tenant string, start model.Time, min
- // EndTimestamp: start.Add(12 * time.Hour),
- },
- },
-- Blocks: []BlockRef{},
-- BlockTombstones: []BlockRef{},
-+ Blocks: []BlockRef{},
- }
- err := store.storeDo(start, func(s *bloomStoreEntry) error {
- raw, _ := json.Marshal(meta)
-diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go
-index 9627718aa8ec7..00ee2e152144a 100644
---- a/pkg/validation/limits.go
-+++ b/pkg/validation/limits.go
-@@ -339,7 +339,12 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
- f.IntVar(&l.BloomGatewayBlocksDownloadingParallelism, ""bloom-gateway.blocks-downloading-parallelism"", 50, ""Maximum number of blocks will be downloaded in parallel by the Bloom Gateway."")
- f.DurationVar(&l.BloomGatewayCacheKeyInterval, ""bloom-gateway.cache-key-interval"", 15*time.Minute, ""Interval for computing the cache key in the Bloom Gateway."")
- _ = l.BloomCompactorMaxBlockSize.Set(defaultBloomCompactorMaxBlockSize)
-- f.Var(&l.BloomCompactorMaxBlockSize, ""bloom-compactor.max-block-size"", ""The maximum bloom block size. A value of 0 sets an unlimited size. Default is 200MB. The actual block size might exceed this limit since blooms will be added to blocks until the block exceeds the maximum block size."")
-+ f.Var(&l.BloomCompactorMaxBlockSize, ""bloom-compactor.max-block-size"",
-+ fmt.Sprintf(
-+ ""The maximum bloom block size. A value of 0 sets an unlimited size. Default is %s. The actual block size might exceed this limit since blooms will be added to blocks until the block exceeds the maximum block size."",
-+ defaultBloomCompactorMaxBlockSize,
-+ ),
-+ )
-
- l.ShardStreams = &shardstreams.Config{}
- l.ShardStreams.RegisterFlagsWithPrefix(""shard-streams"", f)",unknown,Blooms/integration fixes (#11979)
-8041bd29b90a79066f7c6393fef1db5ba29440b0,2024-08-13 23:26:30,renovate[bot],"fix(deps): update module github.com/azure/go-autorest/autorest/adal to v0.9.24 (#13862)
-
-Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
-Co-authored-by: Paul Rogers <129207811+paul1r@users.noreply.github.com>",False,"diff --git a/go.mod b/go.mod
-index 2b5058d6ba31c..1f21f95ab16fc 100644
---- a/go.mod
-+++ b/go.mod
-@@ -10,7 +10,7 @@ require (
- cloud.google.com/go/storage v1.41.0
- github.com/Azure/azure-pipeline-go v0.2.3
- github.com/Azure/azure-storage-blob-go v0.14.0
-- github.com/Azure/go-autorest/autorest/adal v0.9.23
-+ github.com/Azure/go-autorest/autorest/adal v0.9.24
- github.com/Azure/go-autorest/autorest/azure/auth v0.5.13
- github.com/Masterminds/sprig/v3 v3.2.3
- github.com/NYTimes/gziphandler v1.1.1
-diff --git a/go.sum b/go.sum
-index e85019be11f65..97892d0431229 100644
---- a/go.sum
-+++ b/go.sum
-@@ -198,8 +198,8 @@ github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQW
- github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
- github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
- github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk=
--github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8=
--github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c=
-+github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4=
-+github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8=
- github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM=
- github.com/Azure/go-autorest/autorest/azure/auth v0.4.1/go.mod h1:5TgH20II424SXIV9YDBsO4rBCKsh39Vbx9DvhJZZ8rU=
- github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
-diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md
-index b11eb07884b05..97434ea7f7709 100644
---- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md
-+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md
-@@ -160,7 +160,7 @@ if (err == nil) {
- ```Go
- certificatePath := ""./example-app.pfx""
-
--certData, err := ioutil.ReadFile(certificatePath)
-+certData, err := os.ReadFile(certificatePath)
- if err != nil {
- return nil, fmt.Errorf(""failed to read the certificate file (%s): %v"", certificatePath, err)
- }
-diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
-index 9daa4b58b881e..f040e2ac6b45f 100644
---- a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
-+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
-@@ -27,7 +27,7 @@ import (
- ""context""
- ""encoding/json""
- ""fmt""
-- ""io/ioutil""
-+ ""io""
- ""net/http""
- ""net/url""
- ""strings""
-@@ -116,7 +116,7 @@ func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConf
- }
-
- s := v.Encode()
-- body := ioutil.NopCloser(strings.NewReader(s))
-+ body := io.NopCloser(strings.NewReader(s))
-
- req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body)
- if err != nil {
-@@ -131,7 +131,7 @@ func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConf
- }
- defer resp.Body.Close()
-
-- rb, err := ioutil.ReadAll(resp.Body)
-+ rb, err := io.ReadAll(resp.Body)
- if err != nil {
- return nil, fmt.Errorf(""%s %s: %s"", logPrefix, errCodeHandlingFails, err.Error())
- }
-@@ -175,7 +175,7 @@ func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code
- }
-
- s := v.Encode()
-- body := ioutil.NopCloser(strings.NewReader(s))
-+ body := io.NopCloser(strings.NewReader(s))
-
- req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body)
- if err != nil {
-@@ -190,7 +190,7 @@ func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code
- }
- defer resp.Body.Close()
-
-- rb, err := ioutil.ReadAll(resp.Body)
-+ rb, err := io.ReadAll(resp.Body)
- if err != nil {
- return nil, fmt.Errorf(""%s %s: %s"", logPrefix, errTokenHandlingFails, err.Error())
- }
-diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
-index 2a974a39b3cd4..fb54a43235baf 100644
---- a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
-+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
-@@ -20,7 +20,6 @@ import (
- ""encoding/json""
- ""errors""
- ""fmt""
-- ""io/ioutil""
- ""os""
- ""path/filepath""
-
-@@ -62,7 +61,7 @@ func SaveToken(path string, mode os.FileMode, token Token) error {
- return fmt.Errorf(""failed to create directory (%s) to store token in: %v"", dir, err)
- }
-
-- newFile, err := ioutil.TempFile(dir, ""token"")
-+ newFile, err := os.CreateTemp(dir, ""token"")
- if err != nil {
- return fmt.Errorf(""failed to create the temp file to write the token: %v"", err)
- }
-diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
-index 2a24ab80cf16c..67baecd83ffe5 100644
---- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
-+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
-@@ -25,7 +25,6 @@ import (
- ""errors""
- ""fmt""
- ""io""
-- ""io/ioutil""
- ""math""
- ""net/http""
- ""net/url""
-@@ -1061,7 +1060,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
- } else if msiSecret.clientResourceID != """" {
- data.Set(""msi_res_id"", msiSecret.clientResourceID)
- }
-- req.Body = ioutil.NopCloser(strings.NewReader(data.Encode()))
-+ req.Body = io.NopCloser(strings.NewReader(data.Encode()))
- req.Header.Set(""Content-Type"", ""application/x-www-form-urlencoded"")
- break
- case msiTypeIMDS:
-@@ -1096,7 +1095,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
- }
-
- s := v.Encode()
-- body := ioutil.NopCloser(strings.NewReader(s))
-+ body := io.NopCloser(strings.NewReader(s))
- req.ContentLength = int64(len(s))
- req.Header.Set(contentType, mimeTypeFormPost)
- req.Body = body
-@@ -1113,7 +1112,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
-
- logger.Instance.WriteResponse(resp, logger.Filter{Body: authBodyFilter})
- defer resp.Body.Close()
-- rb, err := ioutil.ReadAll(resp.Body)
-+ rb, err := io.ReadAll(resp.Body)
-
- if resp.StatusCode != http.StatusOK {
- if err != nil {
-@@ -1235,7 +1234,7 @@ func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http
-
- for attempt < maxAttempts {
- if resp != nil && resp.Body != nil {
-- io.Copy(ioutil.Discard, resp.Body)
-+ io.Copy(io.Discard, resp.Body)
- resp.Body.Close()
- }
- resp, err = sender.Do(req)
-diff --git a/vendor/modules.txt b/vendor/modules.txt
-index 63e213615c8b5..294f67708db38 100644
---- a/vendor/modules.txt
-+++ b/vendor/modules.txt
-@@ -137,7 +137,7 @@ github.com/Azure/go-autorest
- ## explicit; go 1.15
- github.com/Azure/go-autorest/autorest
- github.com/Azure/go-autorest/autorest/azure
--# github.com/Azure/go-autorest/autorest/adal v0.9.23
-+# github.com/Azure/go-autorest/autorest/adal v0.9.24
- ## explicit; go 1.15
- github.com/Azure/go-autorest/autorest/adal
- # github.com/Azure/go-autorest/autorest/azure/auth v0.5.13",fix,"update module github.com/azure/go-autorest/autorest/adal to v0.9.24 (#13862)
-
-Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
-Co-authored-by: Paul Rogers <129207811+paul1r@users.noreply.github.com>"
-9ffee5148b7d369243149da236bb9befa4bcc637,2024-09-23 21:08:28,nicolevanderhoeven,"docs: Added new Query best practices page (#14057)
-
-Co-authored-by: J Stickler ",False,"diff --git a/docs/sources/query/bp-query.md b/docs/sources/query/bp-query.md
-new file mode 100644
-index 0000000000000..819fdc0a76b06
---- /dev/null
-+++ b/docs/sources/query/bp-query.md
-@@ -0,0 +1,80 @@
-+---
-+title: Query best practices
-+menuTitle: Query best practices
-+description: Describes best practices for querying in Grafana Loki.
-+aliases:
-+- ../bp-query
-+weight: 700
-+---
-+# Query best practices
-+
-+The way you write queries in Loki affects how quickly you get results returned from those queries. Understanding the way Loki parses queries can help you write queries that are efficient and performant.
-+
-+{{< admonition type=""tip"" >}}
-+Before you start optimizing queries, read the [labels best practices](https://grafana.com/docs/loki//get-started/labels/bp-labels/) page to understand what makes a good label. Choosing the right labels is the first step towards writing efficient queries.
-+{{< /admonition >}}
-+
-+Loki evaluates a LogQL query from left to right, in the order that it is written. To get the best possible query performance, eliminate as many potential results as you can earlier in the query and then continue to progressively narrow your search as you continue writing the query. This page describes the recommended order for writing queries that efficiently filter out unwanted results.
-+
-+## Narrow down your time range first
-+
-+Reduce the number of logs Loki needs to look through by specifying a period of time that you'd like to search through. Loki creates one index file per day, so queries that span over multiple days fetches multiple index files. The fewer files Loki has to search, the faster the query results are returned.
-+
-+Time ranges are typically not part of the query, but you can set a time range through your visualization tool or through [the Loki API](https://grafana.com/docs/loki//reference/loki-http-api/).
-+
-+
-+### In Grafana
-+
-+If you're using Loki with Grafana, you can use the dropdown menu on the upper right hand corner of a dashboard to select a time range, either relative (last X hours) or absolute (a specific date and time).
-+
-+
-+
-+### Through Loki API
-+
-+If you're querying Loki through [the Loki API](https://grafana.com/docs/loki//reference/loki-http-api/), you can use the [`query_range` endpoint]({{https://grafana.com/docs/loki//reference/loki-http-api/#query-logs-within-a-range-of-time"" >}}) to add `start` and `end` timestamps for your query as parameters to the HTTP call rather than as part of the query itself.
-+
-+```bash
-+http:///loki/api/v1/query_range?query={job=""app""}&start=1633017600000000000&end=1633104000000000000
-+
-+```
-+
-+
-+## Use precise label selectors
-+
-+Next, write your label selectors. Identify the most specific label you can use within the log line and search based on that first. For example, if the logs contain the labels `namespace` and `app_name` and the latter is a smaller subset of data, start your query by selecting based on `app_name`:
-+
-+```bash
-+{app_name=""carnivorousgreenhouse""}
-+```
-+
-+Using the most specific label selector has the added benefit of reducing the length of your query. Since `app_name` is more specific than `namespace`, you don't need to add a selector for `namespace`. Adding more general label selectors has no further effect on the query.
-+
-+
-+## Use simple line filter expressions over regular expressions
-+
-+When using [line filter expressions](https://grafana.com/docs/loki//query/log_queries/#line-filter-expression), prefer the simpler filter operators such as:
-+- `|=` (contains string) and
-+- `!=` (does not contain string)
-+over the regular expression filter operators:
-+- `|~` (matches the regular expression)
-+- `!~` (does not match the regular expression)
-+
-+Loki evaluates the first two filter expressions faster than it can evaluate regular expressions, so always try to rewrite your query in terms of whether a log line contains or does not contain a certain string. Use regular expressions only as a last resort.
-+
-+Line filter expressions are more efficient than parser expressions.
-+
-+## Avoid using complex text parsers
-+
-+Use [parser expressions](https://grafana.com/docs/loki//query/log_queries/#parser-expression) only after line filter expressions. Parser expressions are ways to look through the log line and extract labels in different formats, which can be useful but are also more intensive for Loki to do than line filter expressions. Using them after line filter expressions means that Loki only needs to evaluate parser expressions for log lines that match the line filter expression, reducing the amount of logs that Loki needs to search through.
-+
-+Parser expressions include [JSON](https://grafana.com/docs/loki//query/log_queries/#json, [logfmt](https://grafana.com/docs/loki//query/log_queries/#logfmt), [pattern](https://grafana.com/docs/loki//query/log_queries/#pattern), [regexp](https://grafana.com/docs/loki//query/log_queries/#regular-expression), and [unpack](https://grafana.com/docs/loki//query/log_queries/#unpack) parsers.
-+
-+## Use recording rules
-+
-+Some queries are sufficiently complex, or some datasets sufficiently large, that there is a limit as to how much query performance can be optimized. If you're following the tips on this page and are still experiencing slow query times, consider creating a [recording rule](https://grafana.com/docs/loki//operations/recording-rules/) for them. A recording rule runs a query at a predetermined time and also precomputes the results of that query, saving those results for faster retrieval later.
-+
-+## Further resources
-+
-+- [Watch: 5 tips for improving Grafana Loki query performance](https://grafana.com/blog/2023/01/10/watch-5-tips-for-improving-grafana-loki-query-performance/)
-+- [Grafana Loki Design Basics with Ed Welch (Grafana Office Hours #27)](https://www.youtube.com/live/3uFMJLufgSo?feature=shared&t=3385)
-+- [Labels best practices](https://grafana.com/docs/loki//get-started/labels/bp-labels/)
-\ No newline at end of file
-diff --git a/docs/sources/query/grafana-time-range-picker.png b/docs/sources/query/grafana-time-range-picker.png
-new file mode 100644
-index 0000000000000..f207e5c0ccec7
-Binary files /dev/null and b/docs/sources/query/grafana-time-range-picker.png differ",docs,"Added new Query best practices page (#14057)
-
-Co-authored-by: J Stickler "
-6dce98870d8c5c7054b3444d2fe4e66dad262a53,2024-04-18 20:01:26,Michel Hollands,"fix: Fix the lokitool imports (#12673)
-
-Signed-off-by: Michel Hollands ",False,"diff --git a/cmd/lokitool/main.go b/cmd/lokitool/main.go
-index 155705b07afa7..6b52fb0a3d657 100644
---- a/cmd/lokitool/main.go
-+++ b/cmd/lokitool/main.go
-@@ -8,7 +8,7 @@ import (
-
- ""github.com/prometheus/common/version""
-
-- ""github.com/grafana/loki/pkg/tool/commands""
-+ ""github.com/grafana/loki/v3/pkg/tool/commands""
- )
-
- var (
-diff --git a/pkg/tool/client/rules.go b/pkg/tool/client/rules.go
-index 40dd0e1a292be..d662794d81254 100644
---- a/pkg/tool/client/rules.go
-+++ b/pkg/tool/client/rules.go
-@@ -10,7 +10,7 @@ import (
- log ""github.com/sirupsen/logrus""
- ""gopkg.in/yaml.v3""
-
-- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt""
-+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt""
- )
-
- // CreateRuleGroup creates a new rule group
-diff --git a/pkg/tool/commands/rules.go b/pkg/tool/commands/rules.go
-index d1e16c026b2a6..4abc14162eddd 100644
---- a/pkg/tool/commands/rules.go
-+++ b/pkg/tool/commands/rules.go
-@@ -15,10 +15,10 @@ import (
- ""gopkg.in/alecthomas/kingpin.v2""
- yamlv3 ""gopkg.in/yaml.v3""
-
-- ""github.com/grafana/loki/pkg/tool/client""
-- ""github.com/grafana/loki/pkg/tool/printer""
-- ""github.com/grafana/loki/pkg/tool/rules""
-- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt""
-+ ""github.com/grafana/loki/v3/pkg/tool/client""
-+ ""github.com/grafana/loki/v3/pkg/tool/printer""
-+ ""github.com/grafana/loki/v3/pkg/tool/rules""
-+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt""
- )
-
- const (
-diff --git a/pkg/tool/commands/rules_test.go b/pkg/tool/commands/rules_test.go
-index d1878f856cf5c..fe27da35f9d37 100644
---- a/pkg/tool/commands/rules_test.go
-+++ b/pkg/tool/commands/rules_test.go
-@@ -7,7 +7,7 @@ import (
- ""github.com/stretchr/testify/assert""
- ""gopkg.in/yaml.v3""
-
-- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt""
-+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt""
- )
-
- func TestCheckDuplicates(t *testing.T) {
-diff --git a/pkg/tool/printer/printer.go b/pkg/tool/printer/printer.go
-index f85bd835a85de..084d483a07a45 100644
---- a/pkg/tool/printer/printer.go
-+++ b/pkg/tool/printer/printer.go
-@@ -13,8 +13,8 @@ import (
- ""github.com/mitchellh/colorstring""
- ""gopkg.in/yaml.v3""
-
-- ""github.com/grafana/loki/pkg/tool/rules""
-- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt""
-+ ""github.com/grafana/loki/v3/pkg/tool/rules""
-+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt""
- )
-
- // Printer is used for printing formatted output from the cortextool
-diff --git a/pkg/tool/printer/printer_test.go b/pkg/tool/printer/printer_test.go
-index 5c9a84899cd35..c8650d9bd6101 100644
---- a/pkg/tool/printer/printer_test.go
-+++ b/pkg/tool/printer/printer_test.go
-@@ -9,7 +9,7 @@ import (
- ""github.com/stretchr/testify/assert""
- ""github.com/stretchr/testify/require""
-
-- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt""
-+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt""
- )
-
- func TestPrintRuleSet(t *testing.T) {
-diff --git a/pkg/tool/rules/compare.go b/pkg/tool/rules/compare.go
-index 728726037acbd..2d64c534e88d1 100644
---- a/pkg/tool/rules/compare.go
-+++ b/pkg/tool/rules/compare.go
-@@ -10,7 +10,7 @@ import (
- ""github.com/prometheus/prometheus/model/rulefmt""
- yaml ""gopkg.in/yaml.v3""
-
-- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt""
-+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt""
- )
-
- var (
-diff --git a/pkg/tool/rules/compare_test.go b/pkg/tool/rules/compare_test.go
-index 0dfda624489b8..4df1aa2ee67af 100644
---- a/pkg/tool/rules/compare_test.go
-+++ b/pkg/tool/rules/compare_test.go
-@@ -6,7 +6,7 @@ import (
- ""github.com/prometheus/prometheus/model/rulefmt""
- yaml ""gopkg.in/yaml.v3""
-
-- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt""
-+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt""
- )
-
- func Test_rulesEqual(t *testing.T) {
-diff --git a/pkg/tool/rules/parser.go b/pkg/tool/rules/parser.go
-index f4017c049f6ef..aa8f833630091 100644
---- a/pkg/tool/rules/parser.go
-+++ b/pkg/tool/rules/parser.go
-@@ -12,7 +12,7 @@ import (
- log ""github.com/sirupsen/logrus""
- yaml ""gopkg.in/yaml.v3""
-
-- ""github.com/grafana/loki/pkg/ruler""
-+ ""github.com/grafana/loki/v3/pkg/ruler""
- )
-
- const (
-diff --git a/pkg/tool/rules/parser_test.go b/pkg/tool/rules/parser_test.go
-index 68f9ff6d70f80..35db097486a81 100644
---- a/pkg/tool/rules/parser_test.go
-+++ b/pkg/tool/rules/parser_test.go
-@@ -6,7 +6,7 @@ import (
-
- ""github.com/prometheus/prometheus/model/rulefmt""
-
-- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt""
-+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt""
- )
-
- func TestParseFiles(t *testing.T) {
-diff --git a/pkg/tool/rules/rules.go b/pkg/tool/rules/rules.go
-index e2c216317c546..eccfbdabe45a4 100644
---- a/pkg/tool/rules/rules.go
-+++ b/pkg/tool/rules/rules.go
-@@ -8,9 +8,9 @@ import (
- ""github.com/prometheus/prometheus/promql/parser""
- log ""github.com/sirupsen/logrus""
-
-- logql ""github.com/grafana/loki/pkg/logql/syntax""
-+ logql ""github.com/grafana/loki/v3/pkg/logql/syntax""
-
-- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt""
-+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt""
- )
-
- // RuleNamespace is used to parse a slightly modified prometheus
-diff --git a/pkg/tool/rules/rules_test.go b/pkg/tool/rules/rules_test.go
-index 690316db2d182..fba13040d49b8 100644
---- a/pkg/tool/rules/rules_test.go
-+++ b/pkg/tool/rules/rules_test.go
-@@ -8,7 +8,7 @@ import (
- ""gopkg.in/yaml.v3""
- ""gotest.tools/assert""
-
-- ""github.com/grafana/loki/pkg/tool/rules/rwrulefmt""
-+ ""github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt""
- )
-
- func TestAggregateBy(t *testing.T) {",fix,"Fix the lokitool imports (#12673)
-
-Signed-off-by: Michel Hollands "
-3d2282745bf121377ade603432347e6ca23b9235,2022-05-25 02:15:14,Callum Styan,"Runtime reloadable config; ring migration setup (#6214)
-
-* Clean up setting of memberlist and multikv config for Loki services that
-use the ring. Also sets the multi client runtime config function for all
-services that use the ring.
-
-Signed-off-by: Callum Styan
-
-* Add a test for the multi kv setup
-
-Signed-off-by: Callum Styan
-
-* Fix lint issues.
-
-Signed-off-by: Callum Styan ",False,"diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
-index 5def8a0cbcb3c..7654a1385e714 100644
---- a/pkg/loki/modules.go
-+++ b/pkg/loki/modules.go
-@@ -135,8 +135,6 @@ func (t *Loki) initServer() (services.Service, error) {
- }
-
- func (t *Loki) initRing() (_ services.Service, err error) {
-- t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig)
-- t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV
- t.ring, err = ring.New(t.Cfg.Ingester.LifecyclerConfig.RingConfig, ""ingester"", ingester.RingKey, util_log.Logger, prometheus.WrapRegistererWithPrefix(""cortex_"", prometheus.DefaultRegisterer))
- if err != nil {
- return
-@@ -164,6 +162,19 @@ func (t *Loki) initRuntimeConfig() (services.Service, error) {
- var err error
- t.runtimeConfig, err = runtimeconfig.New(t.Cfg.RuntimeConfig, prometheus.WrapRegistererWithPrefix(""loki_"", prometheus.DefaultRegisterer), util_log.Logger)
- t.TenantLimits = newtenantLimitsFromRuntimeConfig(t.runtimeConfig)
-+
-+ // Update config fields using runtime config. Only if multiKV is used for given ring these returned functions will be
-+ // called and register the listener.
-+ //
-+ // By doing the initialization here instead of per-module init function, we avoid the problem
-+ // of projects based on Loki forgetting the wiring if they override module's init method (they also don't have access to private symbols).
-+ t.Cfg.CompactorConfig.CompactorRing.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig)
-+ t.Cfg.Distributor.DistributorRing.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig)
-+ t.Cfg.IndexGateway.Ring.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig)
-+ t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig)
-+ t.Cfg.QueryScheduler.SchedulerRing.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig)
-+ t.Cfg.Ruler.Ring.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig)
-+
- return t.runtimeConfig, err
- }
-
-@@ -194,8 +205,6 @@ func (t *Loki) initTenantConfigs() (_ services.Service, err error) {
- }
-
- func (t *Loki) initDistributor() (services.Service, error) {
-- t.Cfg.Distributor.DistributorRing.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig)
-- t.Cfg.Distributor.DistributorRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV
- var err error
- t.distributor, err = distributor.New(t.Cfg.Distributor, t.Cfg.IngesterClient, t.tenantConfigs, t.ring, t.overrides, prometheus.DefaultRegisterer)
- if err != nil {
-@@ -315,8 +324,6 @@ func (t *Loki) initQuerier() (services.Service, error) {
- }
-
- func (t *Loki) initIngester() (_ services.Service, err error) {
-- t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig)
-- t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV
- t.Cfg.Ingester.LifecyclerConfig.ListenPort = t.Cfg.Server.GRPCListenPort
-
- t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Cfg.IngesterClient, t.Store, t.overrides, t.tenantConfigs, prometheus.DefaultRegisterer)
-@@ -735,7 +742,6 @@ func (t *Loki) initRuler() (_ services.Service, err error) {
- }
-
- t.Cfg.Ruler.Ring.ListenPort = t.Cfg.Server.GRPCListenPort
-- t.Cfg.Ruler.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV
-
- deleteStore, err := t.deleteRequestsStore()
- if err != nil {
-@@ -814,13 +820,20 @@ func (t *Loki) initMemberlistKV() (services.Service, error) {
- dnsProvider := dns.NewProvider(util_log.Logger, dnsProviderReg, dns.GolangResolverType)
-
- t.MemberlistKV = memberlist.NewKVInitService(&t.Cfg.MemberlistKV, util_log.Logger, dnsProvider, reg)
-+
-+ t.Cfg.CompactorConfig.CompactorRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV
-+ t.Cfg.Distributor.DistributorRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV
-+ t.Cfg.IndexGateway.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV
-+ t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV
-+ t.Cfg.QueryScheduler.SchedulerRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV
-+ t.Cfg.Ruler.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV
-+
- return t.MemberlistKV, nil
- }
-
- func (t *Loki) initCompactor() (services.Service, error) {
- // Set some config sections from other config sections in the config struct
- t.Cfg.CompactorConfig.CompactorRing.ListenPort = t.Cfg.Server.GRPCListenPort
-- t.Cfg.CompactorConfig.CompactorRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV
-
- if !config.UsingBoltdbShipper(t.Cfg.SchemaConfig.Configs) {
- level.Info(util_log.Logger).Log(""msg"", ""Not using boltdb-shipper index, not starting compactor"")
-@@ -853,7 +866,6 @@ func (t *Loki) initCompactor() (services.Service, error) {
- }
-
- func (t *Loki) initIndexGateway() (services.Service, error) {
-- t.Cfg.IndexGateway.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV
- t.Cfg.IndexGateway.Ring.ListenPort = t.Cfg.Server.GRPCListenPort
-
- indexClient, err := storage.NewIndexClient(config.BoltDBShipperType, t.Cfg.StorageConfig, t.Cfg.SchemaConfig, t.overrides, t.clientMetrics, t.indexGatewayRingManager.IndexGatewayOwnsTenant, prometheus.DefaultRegisterer)
-@@ -875,7 +887,6 @@ func (t *Loki) initIndexGatewayRing() (_ services.Service, err error) {
- }
-
- t.Cfg.StorageConfig.BoltDBShipperConfig.Mode = shipper.ModeReadOnly
-- t.Cfg.IndexGateway.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV
- t.Cfg.IndexGateway.Ring.ListenPort = t.Cfg.Server.GRPCListenPort
-
- managerMode := indexgateway.ClientMode
-@@ -897,7 +908,6 @@ func (t *Loki) initIndexGatewayRing() (_ services.Service, err error) {
- func (t *Loki) initQueryScheduler() (services.Service, error) {
- // Set some config sections from other config sections in the config struct
- t.Cfg.QueryScheduler.SchedulerRing.ListenPort = t.Cfg.Server.GRPCListenPort
-- t.Cfg.QueryScheduler.SchedulerRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV
-
- s, err := scheduler.NewScheduler(t.Cfg.QueryScheduler, t.overrides, util_log.Logger, prometheus.DefaultRegisterer)
- if err != nil {
-diff --git a/pkg/loki/modules_test.go b/pkg/loki/modules_test.go
-index b29ab27ff4ac1..c0c905cd71ab3 100644
---- a/pkg/loki/modules_test.go
-+++ b/pkg/loki/modules_test.go
-@@ -1,10 +1,20 @@
- package loki
-
- import (
-+ ""path/filepath""
- ""testing""
- ""time""
-
-+ ""github.com/grafana/dskit/flagext""
-+ ""github.com/prometheus/common/model""
-+ ""github.com/stretchr/testify/require""
-+
-+ ""github.com/prometheus/client_golang/prometheus""
-+
-+ ""github.com/grafana/loki/pkg/storage""
-+ ""github.com/grafana/loki/pkg/storage/chunk/client/local""
- ""github.com/grafana/loki/pkg/storage/config""
-+ ""github.com/grafana/loki/pkg/storage/stores/shipper""
- )
-
- func Test_calculateMaxLookBack(t *testing.T) {
-@@ -81,3 +91,101 @@ func Test_calculateMaxLookBack(t *testing.T) {
- })
- }
- }
-+
-+func prepareGlobalMetricsRegistry(t *testing.T) {
-+ oldReg, oldGat := prometheus.DefaultRegisterer, prometheus.DefaultGatherer
-+
-+ reg := prometheus.NewRegistry()
-+ prometheus.DefaultRegisterer, prometheus.DefaultGatherer = reg, reg
-+
-+ t.Cleanup(func() {
-+ prometheus.DefaultRegisterer, prometheus.DefaultGatherer = oldReg, oldGat
-+ })
-+}
-+
-+func TestMultiKVSetup(t *testing.T) {
-+ dir := t.TempDir()
-+
-+ for target, checkFn := range map[string]func(t *testing.T, c Config){
-+ All: func(t *testing.T, c Config) {
-+ require.NotNil(t, c.CompactorConfig.CompactorRing.KVStore.Multi.ConfigProvider)
-+ require.NotNil(t, c.Distributor.DistributorRing.KVStore.Multi.ConfigProvider)
-+ require.NotNil(t, c.IndexGateway.Ring.KVStore.Multi.ConfigProvider)
-+ require.NotNil(t, c.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider)
-+ require.NotNil(t, c.QueryScheduler.SchedulerRing.KVStore.Multi.ConfigProvider)
-+ require.NotNil(t, c.Ruler.Ring.KVStore.Multi.ConfigProvider)
-+ },
-+
-+ Compactor: func(t *testing.T, c Config) {
-+ require.NotNil(t, c.CompactorConfig.CompactorRing.KVStore.Multi.ConfigProvider)
-+ },
-+
-+ Distributor: func(t *testing.T, c Config) {
-+ require.NotNil(t, c.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider)
-+ },
-+
-+ IndexGateway: func(t *testing.T, c Config) {
-+ require.NotNil(t, c.IndexGateway.Ring.KVStore.Multi.ConfigProvider)
-+ },
-+
-+ Ingester: func(t *testing.T, c Config) {
-+ require.NotNil(t, c.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider)
-+ },
-+
-+ QueryScheduler: func(t *testing.T, c Config) {
-+ require.NotNil(t, c.QueryScheduler.SchedulerRing.KVStore.Multi.ConfigProvider)
-+ },
-+
-+ Ruler: func(t *testing.T, c Config) {
-+ require.NotNil(t, c.Ruler.Ring.KVStore.Multi.ConfigProvider)
-+ },
-+ } {
-+ t.Run(target, func(t *testing.T) {
-+ prepareGlobalMetricsRegistry(t)
-+
-+ cfg := Config{}
-+ cfg.SchemaConfig = config.SchemaConfig{
-+ Configs: []config.PeriodConfig{
-+ {
-+ IndexType: config.StorageTypeInMemory,
-+ ObjectType: config.StorageTypeFileSystem,
-+ RowShards: 16,
-+ Schema: ""v11"",
-+ From: config.DayTime{
-+ Time: model.Now(),
-+ },
-+ },
-+ },
-+ }
-+ flagext.DefaultValues(&cfg)
-+ // Set to 0 to find any free port.
-+ cfg.Server.HTTPListenPort = 0
-+ cfg.Server.GRPCListenPort = 0
-+ cfg.Target = []string{target}
-+
-+ // Must be set, otherwise MultiKV config provider will not be set.
-+ cfg.RuntimeConfig.LoadPath = filepath.Join(dir, ""config.yaml"")
-+
-+ // This would be overwritten by the default values setting.
-+ cfg.StorageConfig = storage.Config{
-+ FSConfig: local.FSConfig{Directory: dir},
-+ BoltDBShipperConfig: shipper.Config{
-+ SharedStoreType: config.StorageTypeFileSystem,
-+ ActiveIndexDirectory: dir,
-+ CacheLocation: dir,
-+ Mode: shipper.ModeWriteOnly},
-+ }
-+ cfg.Ruler.Config.StoreConfig.Type = config.StorageTypeLocal
-+ cfg.Ruler.Config.StoreConfig.Local.Directory = dir
-+
-+ c, err := New(cfg)
-+ require.NoError(t, err)
-+
-+ _, err = c.ModuleManager.InitModuleServices(cfg.Target...)
-+ require.NoError(t, err)
-+ defer c.Server.Stop()
-+
-+ checkFn(t, c.Cfg)
-+ })
-+ }
-+}
-diff --git a/pkg/storage/config/schema_config.go b/pkg/storage/config/schema_config.go
-index d7752e9fd0569..2e652e4928478 100644
---- a/pkg/storage/config/schema_config.go
-+++ b/pkg/storage/config/schema_config.go
-@@ -36,6 +36,7 @@ const (
- StorageTypeGCPColumnKey = ""gcp-columnkey""
- StorageTypeGCS = ""gcs""
- StorageTypeGrpc = ""grpc-store""
-+ StorageTypeLocal = ""local""
- StorageTypeS3 = ""s3""
- StorageTypeSwift = ""swift""
- // BoltDBShipperType holds the index type for using boltdb with shipper which keeps flushing them to a shared storage",unknown,"Runtime reloadable config; ring migration setup (#6214)
-
-* Clean up setting of memberlist and multikv config for Loki services that
-use the ring. Also sets the multi client runtime config function for all
-services that use the ring.
-
-Signed-off-by: Callum Styan
-
-* Add a test for the multi kv setup
-
-Signed-off-by: Callum Styan
-
-* Fix lint issues.
-
-Signed-off-by: Callum Styan "
-6f8bfe0c79fda038819426d989bb262a492f692c,2020-09-22 19:04:46,Owen Diehl,"Ruler docs + single binary inclusion (#2637)
-
-* starts alerting docs
-
-* ruler in single binary
-
-* make docs interactive
-
-* alerting docs
-
-* ruler prom alerts endpoint
-
-* Apply suggestions from code review
-
-Co-authored-by: Diana Payton <52059945+oddlittlebird@users.noreply.github.com>
-
-* doc fixes
-
-* capitalize ruler
-
-* removes double spaces
-
-* Update docs/sources/alerting/_index.md
-
-Co-authored-by: Diana Payton <52059945+oddlittlebird@users.noreply.github.com>
-
-* Apply suggestions from code review
-
-Co-authored-by: Ed Welch
-Co-authored-by: Diana Payton <52059945+oddlittlebird@users.noreply.github.com>
-
-Co-authored-by: Diana Payton <52059945+oddlittlebird@users.noreply.github.com>
-Co-authored-by: Ed Welch ",False,"diff --git a/docs/Makefile b/docs/Makefile
-index 2ad3330081875..6c01c0e765bc7 100644
---- a/docs/Makefile
-+++ b/docs/Makefile
-@@ -3,9 +3,9 @@ IMAGE = grafana/docs-base:latest
- .PHONY: docs
- docs:
- docker pull ${IMAGE}
-- docker run -v ${PWD}/sources:/hugo/content/docs/loki/latest -p 3002:3002 --rm $(IMAGE) /bin/bash -c 'mkdir -p content/docs/grafana/latest/ && touch content/docs/grafana/latest/menu.yaml && make server'
-+ docker run --rm -it -v ${PWD}/sources:/hugo/content/docs/loki/latest -p 3002:3002 $(IMAGE) /bin/bash -c 'mkdir -p content/docs/grafana/latest/ && touch content/docs/grafana/latest/menu.yaml && make server'
-
- .PHONY: docs-test
- docs-test:
- docker pull ${IMAGE}
-- docker run -v ${PWD}/sources:/hugo/content/docs/loki/latest -p 3002:3002 --rm $(IMAGE) /bin/bash -c 'mkdir -p content/docs/grafana/latest/ && touch content/docs/grafana/latest/menu.yaml && make prod'
-\ No newline at end of file
-+ docker run --rm -it -v ${PWD}/sources:/hugo/content/docs/loki/latest -p 3002:3002 $(IMAGE) /bin/bash -c 'mkdir -p content/docs/grafana/latest/ && touch content/docs/grafana/latest/menu.yaml && make prod'
-diff --git a/docs/sources/alerting/_index.md b/docs/sources/alerting/_index.md
-new file mode 100644
-index 0000000000000..9f1f47eeb400f
---- /dev/null
-+++ b/docs/sources/alerting/_index.md
-@@ -0,0 +1,259 @@
-+---
-+title: Alerting
-+weight: 700
-+---
-+
-+# Alerting
-+
-+Loki includes a component called the Ruler, adapted from our upstream project, Cortex. The Ruler is responsible for continually evaluating a set of configurable queries and then alerting when certain conditions happen, e.g. a high percentage of error logs.
-+
-+## Prometheus Compatible
-+
-+When running the Ruler (which runs by default in the single binary), Loki accepts rules files and then schedules them for continual evaluation. These are _Prometheus compatible_! This means the rules file has the same structure as in [Prometheus](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/), with the exception that the rules specified are in LogQL.
-+
-+Let's see what that looks like:
-+
-+The syntax of a rule file is:
-+
-+```yaml
-+groups:
-+ [ - ]
-+```
-+
-+A simple example file could be:
-+
-+```yaml
-+groups:
-+ - name: example
-+ rules:
-+ - alert: HighThroughputLogStreams
-+ expr: sum by(container) (rate({job=~""loki-dev/.*""}[1m])) > 1000
-+ for: 2m
-+```
-+
-+### ``
-+
-+```yaml
-+# The name of the group. Must be unique within a file.
-+name:
-+
-+# How often rules in the group are evaluated.
-+[ interval: | default = Ruler.evaluation_interval || 1m ]
-+
-+rules:
-+ [ - ... ]
-+```
-+
-+### ``
-+
-+The syntax for alerting rules is (see the LogQL [docs](https://grafana.com/docs/loki/latest/logql/#metric-queries) for more details):
-+
-+```yaml
-+# The name of the alert. Must be a valid label value.
-+alert:
-+
-+# The LogQL expression to evaluate (must be an instant vector). Every evaluation cycle this is
-+# evaluated at the current time, and all resultant time series become
-+# pending/firing alerts.
-+expr:
-+
-+# Alerts are considered firing once they have been returned for this long.
-+# Alerts which have not yet fired for long enough are considered pending.
-+[ for: | default = 0s ]
-+
-+# Labels to add or overwrite for each alert.
-+labels:
-+ [ : ]
-+
-+# Annotations to add to each alert.
-+annotations:
-+ [ : ]
-+```
-+
-+### Example
-+
-+A full-fledged example of a rules file might look like:
-+
-+```yaml
-+groups:
-+ - name: should_fire
-+ rules:
-+ - alert: HighPercentageError
-+ expr: |
-+ sum(rate({app=""foo"", env=""production""} |= ""error"" [5m])) by (job)
-+ /
-+ sum(rate({app=""foo"", env=""production""}[5m])) by (job)
-+ > 0.05
-+ for: 10m
-+ labels:
-+ severity: page
-+ annotations:
-+ summary: High request latency
-+ - name: credentials_leak
-+ rules:
-+ - alert: http-credentials-leaked
-+ annotations:
-+ message: ""{{ $labels.job }} is leaking http basic auth credentials.""
-+ expr: 'sum by (cluster, job, pod) (count_over_time({namespace=""prod""} |~ ""http(s?)://(\\w+):(\\w+)@"" [5m]) > 0)'
-+ for: 10m
-+ labels:
-+ severity: critical
-+```
-+
-+## Use cases
-+
-+The Ruler's Prometheus compatibility further accentuates the marriage between metrics and logs. For those looking to get started alerting based on logs, or wondering why this might be useful, here are a few use cases we think fit very well.
-+
-+### We aren't using metrics yet
-+
-+Many nascent projects, apps, or even companies may not have a metrics backend yet. We tend to add logging support before metric support, so if you're in this stage, alerting based on logs can help bridge the gap. It's easy to start building Loki alerts for things like _the percentage of error logs_ such as the example from earlier:
-+```yaml
-+- alert: HighPercentageError
-+ expr: |
-+ sum(rate({app=""foo"", env=""production""} |= ""error"" [5m])) by (job)
-+ /
-+ sum(rate({app=""foo"", env=""production""}[5m])) by (job)
-+ > 0.05
-+```
-+
-+### Black box monitoring
-+
-+We don't always control the source code of applications we run. Think load balancers and the myriad components (both open source and closed third-party) that support our applications; it's a common problem that these don't expose a metric you want (or any metrics at all). How then, can we bring them into our observability stack in order to monitor them effectively? Alerting based on logs is a great answer for these problems.
-+
-+For a sneak peek of how to combine this with the upcoming LogQL v2 functionality, take a look at Ward Bekker's [video](https://www.youtube.com/watch?v=RwQlR3D4Km4) which builds a robust nginx monitoring dashboard entirely from nginx logs.
-+
-+### Event alerting
-+
-+Sometimes you want to know whether _any_ instance of something has occurred. Alerting based on logs can be a great way to handle this, such as finding examples of leaked authentication credentials:
-+```yaml
-+- name: credentials_leak
-+ rules:
-+ - alert: http-credentials-leaked
-+ annotations:
-+ message: ""{{ $labels.job }} is leaking http basic auth credentials.""
-+ expr: 'sum by (cluster, job, pod) (count_over_time({namespace=""prod""} |~ ""http(s?)://(\\w+):(\\w+)@"" [5m]) > 0)'
-+ for: 10m
-+ labels:
-+ severity: critical
-+```
-+
-+### Alerting on high-cardinality sources
-+
-+Another great use case is alerting on high cardinality sources. These are things which are difficult/expensive to record as metrics because the potential label set is huge. A great example of this is per-tenant alerting in multi-tenanted systems like Loki. It's a common balancing act between the desire to have per-tenant metrics and the cardinality explosion that ensues (adding a single _tenant_ label to an existing Prometheus metric would increase it's cardinality by the number of tenants).
-+
-+Creating these alerts in LogQL is attractive because these metrics can be extracted at _query time_, meaning we don't suffer the cardinality explosion in our metrics store.
-+
-+> **Note:** To really take advantage of this, we'll need some features from the upcoming LogQL v2 language. Stay tuned.
-+
-+## Interacting with the Ruler
-+
-+Because the rule files are identical to Prometheus rule files, we can interact with the Loki Ruler via [`cortex-tool`](https://github.com/grafana/cortex-tools#rules). The CLI is in early development, but works alongside both Loki and cortex. Make sure to pass the `--backend=loki` argument to commands when using it with Loki.
-+
-+> **Note:** Not all commands in cortextool currently support Loki.
-+
-+An example workflow is included below:
-+
-+```sh
-+# diff rules against the currently managed ruleset in Loki
-+cortextool rules diff --rule-dirs=./output --backend=loki
-+
-+# ensure the remote ruleset matches your local ruleset, creating/updating/deleting remote rules which differ from your local specification.
-+cortextool rules sync --rule-dirs=./output --backend=loki
-+
-+# print the remote ruleset
-+cortextool rules print --backend=loki
-+```
-+
-+There is also a [github action](https://github.com/grafana/cortex-rules-action) available for `cortex-tool`, so you can add it into your CI/CD pipelines!
-+
-+For instance, you can sync rules on master builds via
-+```yaml
-+name: sync-cortex-rules-and-alerts
-+on:
-+ push:
-+ branches:
-+ - master
-+env:
-+ CORTEX_ADDRESS: ''
-+ CORTEX_TENANT_ID: ''
-+ CORTEX_API_KEY: ${{ secrets.API_KEY }}
-+ RULES_DIR: 'output/'
-+jobs:
-+ sync-loki-alerts:
-+ runs-on: ubuntu-18.04
-+ steps:
-+ - name: Diff rules
-+ id: diff-rules
-+ uses: grafana/cortex-rules-action@v0.3.0
-+ env:
-+ ACTION: 'diff'
-+ with:
-+ args: --backend=loki
-+ - name: Sync rules
-+ if: ${{ !contains(steps.diff-rules.outputs.detailed, 'no changes detected') }}
-+ uses: grafana/cortex-rules-action@v0.3.0
-+ env:
-+ ACTION: 'sync'
-+ with:
-+ args: --backend=loki
-+ - name: Print rules
-+ uses: grafana/cortex-rules-action@v0.3.0
-+ env:
-+ ACTION: 'print'
-+```
-+
-+## Scheduling and best practices
-+
-+One option to scale the Ruler is by scaling it horizontally. However, with multiple Ruler instances running they will need to coordinate to determine which instance will evaluate which rule. Similar to the ingesters, the Rulers establish a hash ring to divide up the responsibilities of evaluating rules.
-+
-+The possible configurations are listed fully in the configuration [docs](https://grafana.com/docs/loki/latest/configuration/), but in order to shard rules across multiple Rulers, the rules API must be enabled via flag (`-experimental.Ruler.enable-api`) or config file parameter. Secondly, the Ruler requires it's own ring be configured. From there the Rulers will shard and handle the division of rules automatically. Unlike ingesters, Rulers do not hand over responsibility: all rules are re-sharded randomly every time a Ruler is added to or removed from the ring.
-+
-+A full Ruler config example is:
-+
-+```yaml
-+Ruler:
-+ alertmanager_url:
-+ enable_alertmanager_v2: true
-+ enable_api: true
-+ enable_sharding: true
-+ ring:
-+ kvstore:
-+ consul:
-+ host: consul.loki-dev.svc.cluster.local:8500
-+ store: consul
-+ rule_path: /tmp/rules
-+ storage:
-+ gcs:
-+ bucket_name:
-+```
-+
-+## Ruler storage
-+
-+The Ruler supports six kinds of storage: configdb, azure, gcs, s3, swift, and local. Most kinds of storage work with the sharded Ruler configuration in an obvious way, i.e. configure all Rulers to use the same backend.
-+
-+The local implementation reads the rule files off of the local filesystem. This is a read only backend that does not support the creation and deletion of rules through [the API](https://grafana.com/docs/loki/latest/api/#Ruler). Despite the fact that it reads the local filesystem this method can still be used in a sharded Ruler configuration if the operator takes care to load the same rules to every Ruler. For instance this could be accomplished by mounting a [Kubernetes ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) onto every Ruler pod.
-+
-+A typical local configuration might look something like:
-+```
-+ -Ruler.storage.type=local
-+ -Ruler.storage.local.directory=/tmp/loki/rules
-+```
-+
-+With the above configuration, the Ruler would expect the following layout:
-+```
-+/tmp/loki/rules//rules1.yaml
-+ /rules2.yaml
-+```
-+Yaml files are expected to be in the [Prometheus format](#Prometheus_Compatible) but include LogQL expressions as specified in the beginning of this doc.
-+
-+## Future improvements
-+
-+There are a few things coming to increase the robustness of this service. In no particular order:
-+
-+- Recording rules.
-+- Backend metric stores adapters for generated alert and recording rule data. The first will likely be Cortex, as Loki is built atop it.
-+- Introduce LogQL v2.
-+
-+## Misc Details: Metrics backends vs in-memory
-+
-+Currently the Loki Ruler is decoupled from a backing Prometheus store. Generally, the result of evaluating rules as well as the history of the alert's state are stored as a time series. Loki is unable to store/retrieve these in order to allow it to run independently of i.e. Prometheus. As a workaround, Loki keeps a small in memory store whose purpose is to lazy load past evaluations when rescheduling or resharding Rulers. In the future, Loki will support optional metrics backends, allowing storage of these metrics for auditing & performance benefits.
-diff --git a/docs/sources/api/_index.md b/docs/sources/api/_index.md
-index 3c14e964fce90..3c083d89e82d3 100644
---- a/docs/sources/api/_index.md
-+++ b/docs/sources/api/_index.md
-@@ -41,8 +41,22 @@ The HTTP API includes the following endpoints:
- - [Series](#series)
- - [Examples](#examples-9)
- - [Statistics](#statistics)
--
--## Microservices Mode
-+ - [`GET /ruler/ring`](#ruler-ring-status)
-+ - [`GET /loki/api/v1/rules`](#list-rule-groups)
-+ - [`GET /loki/api/v1/rules/{namespace}`](#get-rule-groups-by-namespace)
-+ - [`GET /loki/api/v1/rules/{namespace}/{groupName}`](#get-rule-group)
-+ - [`POST /loki/api/v1/rules/{namespace}`](#set-rule-group)
-+ - [`DELETE /loki/api/v1/rules/{namespace}/{groupName}`](#delete-rule-group)
-+ - [`DELETE /loki/api/v1/rules/{namespace}`](#delete-namespace)
-+ - [`GET /api/prom/rules`](#list-rule-groups)
-+ - [`GET /api/prom/rules/{namespace}`](#get-rule-groups-by-namespace)
-+ - [`GET /api/prom/rules/{namespace}/{groupName}`](#get-rule-group)
-+ - [`POST /api/prom/rules/{namespace}`](#set-rule-group)
-+ - [`DELETE /api/prom/rules/{namespace}/{groupName}`](#delete-rule-group)
-+ - [`DELETE /api/prom/rules/{namespace}`](#delete-namespace)
-+ - [`GET /prometheus/api/v1/alerts`](#list-alerts)
-+
-+## Microservices mode
-
- When deploying Loki in microservices mode, the set of endpoints exposed by each
- component is different.
-@@ -95,9 +109,28 @@ And these endpoints are exposed by just the ingester:
-
- The API endpoints starting with `/loki/` are [Prometheus API-compatible](https://prometheus.io/docs/prometheus/latest/querying/api/) and the result formats can be used interchangeably.
-
-+These endpoints are exposed by the ruler:
-+
-+- [`GET /ruler/ring`](#ruler-ring-status)
-+- [`GET /api/v1/rules`](#list-rules)
-+- [`GET /api/v1/rules`](#list-rule-groups)
-+- [`GET /api/v1/rules/{namespace}`](#get-rule-groups-by-namespace)
-+- [`GET /api/v1/rules/{namespace}/{groupName}`](#get-rule-group)
-+- [`POST /api/v1/rules/{namespace}`](#set-rule-group)
-+- [`DELETE /api/v1/rules/{namespace}/{groupName}`](#delete-rule-group)
-+- [`DELETE /api/v1/rules/{namespace}`](#delete-namespace)
-+- [`GET /api/prom/rules`](#list-rules)
-+- [`GET /api/prom/rules`](#list-rule-groups)
-+- [`GET /api/prom/rules/{namespace}`](#get-rule-groups-by-namespace)
-+- [`GET /api/prom/rules/{namespace}/{groupName}`](#get-rule-group)
-+- [`POST /api/prom/rules/{namespace}`](#set-rule-group)
-+- [`DELETE /api/prom/rules/{namespace}/{groupName}`](#delete-rule-group)
-+- [`DELETE /api/prom/rules/{namespace}`](#delete-namespace)
-+- [`GET /prometheus/api/v1/alerts`](#list-alerts)
-+
- A [list of clients](../clients) can be found in the clients documentation.
-
--## Matrix, Vector, And Streams
-+## Matrix, vector, and streams
-
- Some Loki API endpoints return a result of a matrix, a vector, or a stream:
-
-@@ -936,3 +969,162 @@ The example belows show all possible statistics returned with their respective d
- }
- }
- ```
-+
-+## Ruler
-+
-+The ruler API endpoints require to configure a backend object storage to store the recording rules and alerts. The ruler API uses the concept of a ""namespace"" when creating rule groups. This is a stand-in for the name of the rule file in Prometheus. Rule groups must be named uniquely within a namespace.
-+
-+### Ruler ring status
-+
-+```
-+GET /ruler/ring
-+```
-+
-+Displays a web page with the ruler hash ring status, including the state, healthy and last heartbeat time of each ruler.
-+
-+### List rule groups
-+
-+```
-+GET /loki/api/v1/rules
-+```
-+
-+List all rules configured for the authenticated tenant. This endpoint returns a YAML dictionary with all the rule groups for each namespace and `200` status code on success.
-+
-+_This experimental endpoint is disabled by default and can be enabled via the `-experimental.ruler.enable-api` CLI flag (or its respective YAML config option)._
-+
-+#### Example response
-+
-+```yaml
-+---
-+:
-+- name:
-+ interval:
-+ rules:
-+ - alert:
-+ expr:
-+ for:
-+ annotations:
-+ :
-+ labels:
-+ :
-+- name:
-+ interval:
-+ rules:
-+ - alert:
-+ expr:
-+ for:
-+ annotations:
-+ :
-+ labels:
-+ :
-+:
-+- name:
-+ interval:
-+ rules:
-+ - alert:
-+ expr:
-+ for:
-+ annotations:
-+ :
-+ labels:
-+ :
-+```
-+
-+### Get rule groups by namespace
-+
-+```
-+GET /loki/api/v1/rules/{namespace}
-+```
-+
-+Returns the rule groups defined for a given namespace.
-+
-+_This experimental endpoint is disabled by default and can be enabled via the `-experimental.ruler.enable-api` CLI flag (or its respective YAML config option)._
-+
-+#### Example response
-+
-+```yaml
-+name:
-+interval:
-+rules:
-+ - alert:
-+ expr:
-+ for:
-+ annotations:
-+ :
-+ labels:
-+ :
-+```
-+
-+### Get rule group
-+
-+```
-+GET /loki/api/v1/rules/{namespace}/{groupName}
-+```
-+
-+Returns the rule group matching the request namespace and group name.
-+
-+_This experimental endpoint is disabled by default and can be enabled via the `-experimental.ruler.enable-api` CLI flag (or its respective YAML config option)._
-+
-+### Set rule group
-+
-+```
-+POST /loki/api/v1/rules/{namespace}
-+```
-+
-+Creates or updates a rule group. This endpoint expects a request with `Content-Type: application/yaml` header and the rules **YAML** definition in the request body, and returns `202` on success.
-+
-+_This experimental endpoint is disabled by default and can be enabled via the `-experimental.ruler.enable-api` CLI flag (or its respective YAML config option)._
-+
-+#### Example request
-+
-+Request headers:
-+- `Content-Type: application/yaml`
-+
-+Request body:
-+
-+```yaml
-+name:
-+interval:
-+rules:
-+ - alert:
-+ expr:
-+ for:
-+ annotations:
-+ :
-+ labels:
-+ :
-+```
-+
-+### Delete rule group
-+
-+```
-+DELETE /loki/api/v1/rules/{namespace}/{groupName}
-+
-+```
-+
-+Deletes a rule group by namespace and group name. This endpoints returns `202` on success.
-+
-+### Delete namespace
-+
-+```
-+DELETE /loki/api/v1/rules/{namespace}
-+```
-+
-+Deletes all the rule groups in a namespace (including the namespace itself). This endpoint returns `202` on success.
-+
-+_This experimental endpoint is disabled by default and can be enabled via the `-experimental.ruler.enable-api` CLI flag (or its respective YAML config option)._
-+
-+_Requires [authentication](#authentication)._
-+
-+
-+### List alerts
-+
-+```
-+GET /prometheus/api/v1/alerts
-+```
-+
-+Prometheus-compatible rules endpoint to list all active alerts.
-+
-+_For more information, please check out the Prometheus [alerts](https://prometheus.io/docs/prometheus/latest/querying/api/#alerts) documentation._
-+
-+_This experimental endpoint is disabled by default and can be enabled via the `-experimental.ruler.enable-api` CLI flag (or its respective YAML config option)._
-diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md
-index 1d333b5f4dbd0..796525ea38ce8 100644
---- a/docs/sources/configuration/_index.md
-+++ b/docs/sources/configuration/_index.md
-@@ -18,7 +18,8 @@ Configuration examples can be found in the [Configuration Examples](examples/) d
- - [querier_config](#querier_config)
- - [query_frontend_config](#query_frontend_config)
- - [queryrange_config](#queryrange_config)
-- - [`frontend_worker_config`](#frontend_worker_config)
-+ - [ruler_config](#ruler_config)
-+ - [frontend_worker_config](#frontend_worker_config)
- - [ingester_client_config](#ingester_client_config)
- - [ingester_config](#ingester_config)
- - [consul_config](#consul_config)
-@@ -103,6 +104,9 @@ Supported contents and default values of `loki.yaml`:
- # query-frontend.
- [query_range: ]
-
-+# The ruler_config configures the Loki ruler.
-+[ruler: ]
-+
- # Configures how the distributor will connect to ingesters. Only appropriate
- # when running all modules, the distributor, or the querier.
- [ingester_client: ]
-@@ -332,7 +336,339 @@ results_cache:
- [parallelise_shardable_queries: | default = false]
- ```
-
--## `frontend_worker_config`
-+## `ruler_config`
-+
-+The `ruler_config` configures the Loki ruler.
-+
-+```yaml
-+# URL of alerts return path.
-+# CLI flag: -ruler.external.url
-+[external_url: | default = ]
-+
-+ruler_client:
-+ # Path to the client certificate file, which will be used for authenticating
-+ # with the server. Also requires the key path to be configured.
-+ # CLI flag: -ruler.client.tls-cert-path
-+ [tls_cert_path: | default = """"]
-+
-+ # Path to the key file for the client certificate. Also requires the client
-+ # certificate to be configured.
-+ # CLI flag: -ruler.client.tls-key-path
-+ [tls_key_path: | default = """"]
-+
-+ # Path to the CA certificates file to validate server certificate against. If
-+ # not set, the host's root CA certificates are used.
-+ # CLI flag: -ruler.client.tls-ca-path
-+ [tls_ca_path: | default = """"]
-+
-+ # Skip validating server certificate.
-+ # CLI flag: -ruler.client.tls-insecure-skip-verify
-+ [tls_insecure_skip_verify: | default = false]
-+
-+# How frequently to evaluate rules
-+# CLI flag: -ruler.evaluation-interval
-+[evaluation_interval: | default = 1m]
-+
-+# How frequently to poll for rule changes
-+# CLI flag: -ruler.poll-interval
-+[poll_interval: | default = 1m]
-+
-+storage:
-+ # Method to use for backend rule storage (azure, gcs, s3, swift, local)
-+ # CLI flag: -ruler.storage.type
-+ [type: ]
-+
-+ azure:
-+ # Azure Cloud environment. Supported values are: AzureGlobal,
-+ # AzureChinaCloud, AzureGermanCloud, AzureUSGovernment.
-+ # CLI flag: -ruler.storage.azure.environment
-+ [environment: | default = ""AzureGlobal""]
-+
-+ # Name of the blob container used to store chunks. This container must be
-+ # created before running cortex.
-+ # CLI flag: -ruler.storage.azure.container-name
-+ [container_name: | default = ""cortex""]
-+
-+ # The Microsoft Azure account name to be used
-+ # CLI flag: -ruler.storage.azure.account-name
-+ [account_name: | default = """"]
-+
-+ # The Microsoft Azure account key to use.
-+ # CLI flag: -ruler.storage.azure.account-key
-+ [account_key: | default = """"]
-+
-+ # Preallocated buffer size for downloads.
-+ # CLI flag: -ruler.storage.azure.download-buffer-size
-+ [download_buffer_size: | default = 512000]
-+
-+ # Preallocated buffer size for uploads.
-+ # CLI flag: -ruler.storage.azure.upload-buffer-size
-+ [upload_buffer_size: | default = 256000]
-+
-+ # Number of buffers used to used to upload a chunk.
-+ # CLI flag: -ruler.storage.azure.download-buffer-count
-+ [upload_buffer_count: | default = 1]
-+
-+ # Timeout for requests made against azure blob storage.
-+ # CLI flag: -ruler.storage.azure.request-timeout
-+ [request_timeout: | default = 30s]
-+
-+ # Number of retries for a request which times out.
-+ # CLI flag: -ruler.storage.azure.max-retries
-+ [max_retries: | default = 5]
-+
-+ # Minimum time to wait before retrying a request.
-+ # CLI flag: -ruler.storage.azure.min-retry-delay
-+ [min_retry_delay: | default = 10ms]
-+
-+ # Maximum time to wait before retrying a request.
-+ # CLI flag: -ruler.storage.azure.max-retry-delay
-+ [max_retry_delay: | default = 500ms]
-+
-+ gcs:
-+ # Name of GCS bucket to put chunks in.
-+ # CLI flag: -ruler.storage.gcs.bucketname
-+ [bucket_name: | default = """"]
-+
-+ # The size of the buffer that GCS client for each PUT request. 0 to disable
-+ # buffering.
-+ # CLI flag: -ruler.storage.gcs.chunk-buffer-size
-+ [chunk_buffer_size: | default = 0]
-+
-+ # The duration after which the requests to GCS should be timed out.
-+ # CLI flag: -ruler.storage.gcs.request-timeout
-+ [request_timeout: | default = 0s]
-+
-+ s3:
-+ # S3 endpoint URL with escaped Key and Secret encoded. If only region is
-+ # specified as a host, proper endpoint will be deduced. Use
-+ # inmemory:/// to use a mock in-memory implementation.
-+ # CLI flag: -ruler.storage.s3.url
-+ [s3: | default = ]
-+
-+ # Set this to `true` to force the request to use path-style addressing.
-+ # CLI flag: -ruler.storage.s3.force-path-style
-+ [s3forcepathstyle: | default = false]
-+
-+ # Comma separated list of bucket names to evenly distribute chunks over.
-+ # Overrides any buckets specified in s3.url flag
-+ # CLI flag: -ruler.storage.s3.buckets
-+ [bucketnames: | default = """"]
-+
-+ # S3 Endpoint to connect to.
-+ # CLI flag: -ruler.storage.s3.endpoint
-+ [endpoint: | default = """"]
-+
-+ # AWS region to use.
-+ # CLI flag: -ruler.storage.s3.region
-+ [region: | default = """"]
-+
-+ # AWS Access Key ID
-+ # CLI flag: -ruler.storage.s3.access-key-id
-+ [access_key_id: | default = """"]
-+
-+ # AWS Secret Access Key
-+ # CLI flag: -ruler.storage.s3.secret-access-key
-+ [secret_access_key: | default = """"]
-+
-+ # Disable https on S3 connection.
-+ # CLI flag: -ruler.storage.s3.insecure
-+ [insecure: | default = false]
-+
-+ # Enable AES256 AWS server-side encryption
-+ # CLI flag: -ruler.storage.s3.sse-encryption
-+ [sse_encryption: | default = false]
-+
-+ http_config:
-+ # The maximum amount of time an idle connection will be held open.
-+ # CLI flag: -ruler.storage.s3.http.idle-conn-timeout
-+ [idle_conn_timeout: | default = 1m30s]
-+
-+ # If non-zero, specifies the amount of time to wait for a server's
-+ # response headers after fully writing the request.
-+ # CLI flag: -ruler.storage.s3.http.response-header-timeout
-+ [response_header_timeout: | default = 0s]
-+
-+ # Set to false to skip verifying the certificate chain and hostname.
-+ # CLI flag: -ruler.storage.s3.http.insecure-skip-verify
-+ [insecure_skip_verify: | default = false]
-+
-+ swift:
-+ # Openstack authentication URL.
-+ # CLI flag: -ruler.storage.swift.auth-url
-+ [auth_url: | default = """"]
-+
-+ # Openstack username for the api.
-+ # CLI flag: -ruler.storage.swift.username
-+ [username: | default = """"]
-+
-+ # Openstack user's domain name.
-+ # CLI flag: -ruler.storage.swift.user-domain-name
-+ [user_domain_name: | default = """"]
-+
-+ # Openstack user's domain ID.
-+ # CLI flag: -ruler.storage.swift.user-domain-id
-+ [user_domain_id: | default = """"]
-+
-+ # Openstack user ID for the API.
-+ # CLI flag: -ruler.storage.swift.user-id
-+ [user_id: | default = """"]
-+
-+ # Openstack API key.
-+ # CLI flag: -ruler.storage.swift.password
-+ [password: | default = """"]
-+
-+ # Openstack user's domain ID.
-+ # CLI flag: -ruler.storage.swift.domain-id
-+ [domain_id: | default = """"]
-+
-+ # Openstack user's domain name.
-+ # CLI flag: -ruler.storage.swift.domain-name
-+ [domain_name: | default = """"]
-+
-+ # Openstack project ID (v2,v3 auth only).
-+ # CLI flag: -ruler.storage.swift.project-id
-+ [project_id: | default = """"]
-+
-+ # Openstack project name (v2,v3 auth only).
-+ # CLI flag: -ruler.storage.swift.project-name
-+ [project_name: | default = """"]
-+
-+ # ID of the project's domain (v3 auth only), only needed if it differs the
-+ # from user domain.
-+ # CLI flag: -ruler.storage.swift.project-domain-id
-+ [project_domain_id: | default = """"]
-+
-+ # Name of the project's domain (v3 auth only), only needed if it differs
-+ # from the user domain.
-+ # CLI flag: -ruler.storage.swift.project-domain-name
-+ [project_domain_name: | default = """"]
-+
-+ # Openstack Region to use eg LON, ORD - default is use first region (v2,v3
-+ # auth only)
-+ # CLI flag: -ruler.storage.swift.region-name
-+ [region_name: | default = """"]
-+
-+ # Name of the Swift container to put chunks in.
-+ # CLI flag: -ruler.storage.swift.container-name
-+ [container_name: | default = ""cortex""]
-+
-+ local:
-+ # Directory to scan for rules
-+ # CLI flag: -ruler.storage.local.directory
-+ [directory: | default = """"]
-+
-+# File path to store temporary rule files
-+# CLI flag: -ruler.rule-path
-+[rule_path: | default = ""/rules""]
-+
-+# Comma-separated list of Alertmanager URLs to send notifications to.
-+# Each Alertmanager URL is treated as a separate group in the configuration.
-+# Multiple Alertmanagers in HA per group can be supported by using DNS
-+# resolution via -ruler.alertmanager-discovery.
-+# CLI flag: -ruler.alertmanager-url
-+[alertmanager_url: | default = """"]
-+
-+# Use DNS SRV records to discover Alertmanager hosts.
-+# CLI flag: -ruler.alertmanager-discovery
-+[enable_alertmanager_discovery: | default = false]
-+
-+# How long to wait between refreshing DNS resolutions of Alertmanager hosts.
-+# CLI flag: -ruler.alertmanager-refresh-interval
-+[alertmanager_refresh_interval: | default = 1m]
-+
-+# If enabled, then requests to Alertmanager use the v2 API.
-+# CLI flag: -ruler.alertmanager-use-v2
-+[enable_alertmanager_v2: | default = false]
-+
-+# Capacity of the queue for notifications to be sent to the Alertmanager.
-+# CLI flag: -ruler.notification-queue-capacity
-+[notification_queue_capacity: | default = 10000]
-+
-+# HTTP timeout duration when sending notifications to the Alertmanager.
-+# CLI flag: -ruler.notification-timeout
-+[notification_timeout: | default = 10s]
-+
-+# Max time to tolerate outage for restoring ""for"" state of alert.
-+# CLI flag: -ruler.for-outage-tolerance
-+[for_outage_tolerance: | default = 1h]
-+
-+# Minimum duration between alert and restored ""for"" state. This is maintained
-+# only for alerts with configured ""for"" time greater than the grace period.
-+# CLI flag: -ruler.for-grace-period
-+[for_grace_period: | default = 10m]
-+
-+# Minimum amount of time to wait before resending an alert to Alertmanager.
-+# CLI flag: -ruler.resend-delay
-+[resend_delay: | default = 1m]
-+
-+# Distribute rule evaluation using ring backend.
-+# CLI flag: -ruler.enable-sharding
-+[enable_sharding: | default = false]
-+
-+# Time to spend searching for a pending ruler when shutting down.
-+# CLI flag: -ruler.search-pending-for
-+[search_pending_for: | default = 5m]
-+
-+ring:
-+ kvstore:
-+ # Backend storage to use for the ring. Supported values are: consul, etcd,
-+ # inmemory, memberlist, multi.
-+ # CLI flag: -ruler.ring.store
-+ [store: | default = ""consul""]
-+
-+ # The prefix for the keys in the store. Should end with a /.
-+ # CLI flag: -ruler.ring.prefix
-+ [prefix: | default = ""rulers/""]
-+
-+ # The consul_config configures the consul client.
-+ # The CLI flags prefix for this block config is: ruler.ring
-+ [consul: ]
-+
-+ # The etcd_config configures the etcd client.
-+ # The CLI flags prefix for this block config is: ruler.ring
-+ [etcd: ]
-+
-+ multi:
-+ # Primary backend storage used by multi-client.
-+ # CLI flag: -ruler.ring.multi.primary
-+ [primary: | default = """"]
-+
-+ # Secondary backend storage used by multi-client.
-+ # CLI flag: -ruler.ring.multi.secondary
-+ [secondary: | default = """"]
-+
-+ # Mirror writes to secondary store.
-+ # CLI flag: -ruler.ring.multi.mirror-enabled
-+ [mirror_enabled: | default = false]
-+
-+ # Timeout for storing value to secondary store.
-+ # CLI flag: -ruler.ring.multi.mirror-timeout
-+ [mirror_timeout: | default = 2s]
-+
-+ # Period at which to heartbeat to the ring.
-+ # CLI flag: -ruler.ring.heartbeat-period
-+ [heartbeat_period: | default = 5s]
-+
-+ # The heartbeat timeout after which rulers are considered unhealthy within the
-+ # ring.
-+ # CLI flag: -ruler.ring.heartbeat-timeout
-+ [heartbeat_timeout: | default = 1m]
-+
-+ # Number of tokens for each ingester.
-+ # CLI flag: -ruler.ring.num-tokens
-+ [num_tokens: | default = 128]
-+
-+# Period with which to attempt to flush rule groups.
-+# CLI flag: -ruler.flush-period
-+[flush_period: | default = 1m]
-+
-+# Enable the Ruler API.
-+# CLI flag: -experimental.ruler.enable-api
-+[enable_api: | default = false]
-+```
-+
-+## frontend_worker_config
-
- The `frontend_worker_config` configures the worker - running within the Loki querier - picking up and executing queries enqueued by the query-frontend.
-
-diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
-index 8f5b24e28b26a..78d6b9f075879 100644
---- a/pkg/loki/loki.go
-+++ b/pkg/loki/loki.go
-@@ -349,7 +349,7 @@ func (t *Loki) setupModuleManager() error {
- TableManager: {Server},
- Compactor: {Server},
- IngesterQuerier: {Ring},
-- All: {Querier, Ingester, Distributor, TableManager},
-+ All: {Querier, Ingester, Distributor, TableManager, Ruler},
- }
-
- // Add IngesterQuerier as a dependency for store when target is either ingester or querier.",unknown,"Ruler docs + single binary inclusion (#2637)
-
-* starts alerting docs
-
-* ruler in single binary
-
-* make docs interactive
-
-* alerting docs
-
-* ruler prom alerts endpoint
-
-* Apply suggestions from code review
-
-Co-authored-by: Diana Payton <52059945+oddlittlebird@users.noreply.github.com>
-
-* doc fixes
-
-* capitalize ruler
-
-* removes double spaces
-
-* Update docs/sources/alerting/_index.md
-
-Co-authored-by: Diana Payton <52059945+oddlittlebird@users.noreply.github.com>
-
-* Apply suggestions from code review
-
-Co-authored-by: Ed Welch
-Co-authored-by: Diana Payton <52059945+oddlittlebird@users.noreply.github.com>
-
-Co-authored-by: Diana Payton <52059945+oddlittlebird@users.noreply.github.com>
-Co-authored-by: Ed Welch "
-03d4238700bafa548c1b9d8d6a4b3707a3d8b754,2022-04-15 22:34:58,Karen Miller,"Fix SSD Docker installation (#5916)
-
-* Fix SSD Docker installation
-
-* Clarify endpoints and ports",False,"diff --git a/docs/sources/installation/simple-scalable-docker.md b/docs/sources/installation/simple-scalable-docker.md
-index 60faf08bb0f9d..6a5d100fe3c83 100644
---- a/docs/sources/installation/simple-scalable-docker.md
-+++ b/docs/sources/installation/simple-scalable-docker.md
-@@ -45,8 +45,12 @@ docker-compose up
-
- The running Docker containers use the directory's configuration files.
-
--Navigate to http://localhost:3100/ready to check for cluster readiness.
--Navigate to http://localhost:3100/metrics to view the cluster metrics.
-+Navigate to http://localhost:3101/ready to check for read container readiness.
-+Navigate to http://localhost:3101/metrics to view read container metrics.
-+
-+Navigate to http://localhost:3102/ready to check for write container readiness.
-+Navigate to http://localhost:3102/metrics to view write container metrics.
-+
- Navigate to http://localhost:3000 for the Grafana instance that has Loki configured as a datasource.
-
- By default, the image runs processes as user loki with UID `10001` and GID `10001`.
-diff --git a/production/simple-scalable/docker-compose.yaml b/production/simple-scalable/docker-compose.yaml
-index cc6caf9d36beb..c34aeebe58019 100644
---- a/production/simple-scalable/docker-compose.yaml
-+++ b/production/simple-scalable/docker-compose.yaml
-@@ -9,7 +9,7 @@ services:
- image: grafana/loki:2.5.0
- command: ""-config.file=/etc/loki/config.yaml -target=read""
- ports:
-- - 3100
-+ - 3101:3100
- - 7946
- - 9095
- volumes:
-@@ -25,7 +25,7 @@ services:
- image: grafana/loki:2.5.0
- command: ""-config.file=/etc/loki/config.yaml -target=write""
- ports:
-- - 3100
-+ - 3102:3100
- - 7946
- - 9095
- volumes:",unknown,"Fix SSD Docker installation (#5916)
-
-* Fix SSD Docker installation
-
-* Clarify endpoints and ports"
-011692c1471984a956f86f58e446aefcfa5eee2f,2023-05-01 13:36:10,Dmitry Misharov,"/loki/api/v1/delete is routed to backend url (#9336)
-
-**What this PR does / why we need it**:
-This PR adds a routing rule for `/loki/api/v1/delete` endpoint to
-`gateway` component.
-
-**Which issue(s) this PR fixes**:
-Fixes #9325
-
-**Special notes for your reviewer**:
-
-**Checklist**
-- [x] Reviewed the
-[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md)
-guide (**required**)
-- [ ] Documentation added
-- [ ] Tests updated
-- [ ] `CHANGELOG.md` updated
-- [ ] Changes that require user attention or interaction to upgrade are
-documented in `docs/sources/upgrading/_index.md`",False,"diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl
-index bec9c665269a6..b5a5c63a5ced1 100644
---- a/production/helm/loki/templates/_helpers.tpl
-+++ b/production/helm/loki/templates/_helpers.tpl
-@@ -634,6 +634,10 @@ http {
- proxy_pass {{ $backendUrl }}$request_uri;
- }
-
-+ location ~ /loki/api/v1/delete.* {
-+ proxy_pass {{ $backendUrl }}$request_uri;
-+ }
-+
- location ~ /distributor/.* {
- proxy_pass {{ $writeUrl }}$request_uri;
- }",unknown,"/loki/api/v1/delete is routed to backend url (#9336)
-
-**What this PR does / why we need it**:
-This PR adds a routing rule for `/loki/api/v1/delete` endpoint to
-`gateway` component.
-
-**Which issue(s) this PR fixes**:
-Fixes #9325
-
-**Special notes for your reviewer**:
-
-**Checklist**
-- [x] Reviewed the
-[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md)
-guide (**required**)
-- [ ] Documentation added
-- [ ] Tests updated
-- [ ] `CHANGELOG.md` updated
-- [ ] Changes that require user attention or interaction to upgrade are
-documented in `docs/sources/upgrading/_index.md`"
-c3d3f2ba777a9f57a95e5b2ec67b641af9d69922,2020-06-15 23:45:16,Fredrik Enestad,docs: BoltDB typo (#2217),False,"diff --git a/docs/operations/storage/README.md b/docs/operations/storage/README.md
-index effa70216166f..c3d37917bc6ac 100644
---- a/docs/operations/storage/README.md
-+++ b/docs/operations/storage/README.md
-@@ -26,7 +26,7 @@ The following are supported for the index:
- * [Google Bigtable](https://cloud.google.com/bigtable)
- * [Apache Cassandra](https://cassandra.apache.org)
- * [BoltDB](https://github.com/boltdb/bolt) (doesn't work when clustering Loki)
--* [Boltb-Shipper](boltdb-shipper.md) EXPERIMENTAL index store which stores boltdb index files in the object store
-+* [BoltDB Shipper](boltdb-shipper.md) EXPERIMENTAL index store which stores boltdb index files in the object store
-
- The following are supported for the chunks:",docs,BoltDB typo (#2217)
-6f491233cae226d54d190521d2b935249d88ad05,2024-09-03 17:59:06,renovate[bot],"fix(deps): update aws-sdk-go-v2 monorepo (#13986)
-
-Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod
-index fb35609574b72..9679448043513 100644
---- a/tools/lambda-promtail/go.mod
-+++ b/tools/lambda-promtail/go.mod
-@@ -5,8 +5,8 @@ go 1.22
- require (
- github.com/aws/aws-lambda-go v1.47.0
- github.com/aws/aws-sdk-go-v2 v1.30.4
-- github.com/aws/aws-sdk-go-v2/config v1.27.28
-- github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0
-+ github.com/aws/aws-sdk-go-v2/config v1.27.31
-+ github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0
- github.com/go-kit/log v0.2.1
- github.com/gogo/protobuf v1.3.2
- github.com/golang/snappy v0.0.4
-@@ -24,7 +24,7 @@ require (
- github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect
- github.com/armon/go-metrics v0.4.1 // indirect
- github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect
-- github.com/aws/aws-sdk-go-v2/credentials v1.17.28 // indirect
-+ github.com/aws/aws-sdk-go-v2/credentials v1.17.30 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect
-@@ -36,7 +36,7 @@ require (
- github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 // indirect
-- github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 // indirect
-+ github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 // indirect
- github.com/aws/smithy-go v1.20.4 // indirect
- github.com/beorn7/perks v1.0.1 // indirect
- github.com/buger/jsonparser v1.1.1 // indirect
-diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum
-index 2627682cc9454..17803d1c55389 100644
---- a/tools/lambda-promtail/go.sum
-+++ b/tools/lambda-promtail/go.sum
-@@ -52,10 +52,10 @@ github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDag
- github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0=
- github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU=
- github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw=
--github.com/aws/aws-sdk-go-v2/config v1.27.28 h1:OTxWGW/91C61QlneCtnD62NLb4W616/NM1jA8LhJqbg=
--github.com/aws/aws-sdk-go-v2/config v1.27.28/go.mod h1:uzVRVtJSU5EFv6Fu82AoVFKozJi2ZCY6WRCXj06rbvs=
--github.com/aws/aws-sdk-go-v2/credentials v1.17.28 h1:m8+AHY/ND8CMHJnPoH7PJIRakWGa4gbfbxuY9TGTUXM=
--github.com/aws/aws-sdk-go-v2/credentials v1.17.28/go.mod h1:6TF7dSc78ehD1SL6KpRIPKMA1GyyWflIkjqg+qmf4+c=
-+github.com/aws/aws-sdk-go-v2/config v1.27.31 h1:kxBoRsjhT3pq0cKthgj6RU6bXTm/2SgdoUMyrVw0rAI=
-+github.com/aws/aws-sdk-go-v2/config v1.27.31/go.mod h1:z04nZdSWFPaDwK3DdJOG2r+scLQzMYuJeW0CujEm9FM=
-+github.com/aws/aws-sdk-go-v2/credentials v1.17.30 h1:aau/oYFtibVovr2rDt8FHlU17BTicFEMAi29V1U+L5Q=
-+github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g=
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE=
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI=
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY=
-@@ -74,14 +74,14 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHC
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c=
- github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 h1:jg16PhLPUiHIj8zYIW6bqzeQSuHVEiWnGA0Brz5Xv2I=
- github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY=
--github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0 h1:Cso4Ev/XauMVsbwdhYEoxg8rxZWw43CFqqaPB5w3W2c=
--github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI=
-+github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0 h1:Wb544Wh+xfSXqJ/j3R4aX9wrKUoZsJNmilBYZb3mKQ4=
-+github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI=
- github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c=
- github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM=
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 h1:SKvPgvdvmiTWoi0GAJ7AsJfOz3ngVkD/ERbs5pUnHNI=
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac=
--github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 h1:iAckBT2OeEK/kBDyN/jDtpEExhjeeA/Im2q4X0rJZT8=
--github.com/aws/aws-sdk-go-v2/service/sts v1.30.4/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0=
-+github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 h1:OMsEmCyz2i89XwRwPouAJvhj81wINh+4UK+k/0Yo/q8=
-+github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0=
- github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4=
- github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
- github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=",fix,"update aws-sdk-go-v2 monorepo (#13986)
-
-Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>"
-4cd0aedc76679e4b46ae7d591404debce5f9585b,2023-04-24 16:15:22,Danny Kopping,Fix output of limited response body when remote rule evaluation fails (#9253),False,"diff --git a/pkg/ruler/evaluator_remote.go b/pkg/ruler/evaluator_remote.go
-index 777022ba84351..4e26e0138733d 100644
---- a/pkg/ruler/evaluator_remote.go
-+++ b/pkg/ruler/evaluator_remote.go
-@@ -253,14 +253,15 @@ func (r *RemoteEvaluator) query(ctx context.Context, orgID, query string, ts tim
-
- fullBody := resp.Body
- // created a limited reader to avoid logging the entire response body should it be very large
-- limitedBody := io.LimitReader(bytes.NewReader(fullBody), 1024)
-+ limitedBody := io.LimitReader(bytes.NewReader(fullBody), 128)
-
- // TODO(dannyk): consider retrying if the rule has a very high interval, or the rule is very sensitive to missing samples
- // i.e. critical alerts or recording rules producing crucial RemoteEvaluatorMetrics series
- if resp.Code/100 != 2 {
- r.metrics.failedEvals.WithLabelValues(""upstream_error"", orgID).Inc()
-
-- level.Warn(log).Log(""msg"", ""rule evaluation failed with non-2xx response"", ""response_code"", resp.Code, ""response_body"", limitedBody)
-+ respBod, _ := io.ReadAll(limitedBody)
-+ level.Warn(log).Log(""msg"", ""rule evaluation failed with non-2xx response"", ""response_code"", resp.Code, ""response_body"", respBod)
- return nil, fmt.Errorf(""unsuccessful/unexpected response - status code %d"", resp.Code)
- }",unknown,Fix output of limited response body when remote rule evaluation fails (#9253)
-f0542c04e11aa714d43351ed4d86cda4f4bf40b6,2021-07-28 19:08:43,Danny Kopping,"Updating drone signature (#4072)
-
-Signed-off-by: Danny Kopping ",False,"diff --git a/.drone/drone.yml b/.drone/drone.yml
-index 89766b9f2516c..8108b45df82fe 100644
---- a/.drone/drone.yml
-+++ b/.drone/drone.yml
-@@ -973,6 +973,6 @@ get:
-
- ---
- kind: signature
--hmac: b9ca51f266b7895bd1ea53ca40721d65915472fde3dc25fb662968282bc8acd5
-+hmac: b70be41d1a7f91c11af945a34bdbdc7a4f7613cf830c13f5438dba0bf33a1ec5
-
- ...",unknown,"Updating drone signature (#4072)
-
-Signed-off-by: Danny Kopping "
-b27f7b946feefa23de028289182bf2211f1cd36a,2019-09-03 20:55:58,Robert Fratto,"Change label used to keep issues from being marked as stale to keepalive (#965)
-
-The label used previously, important, implied precedence rather than
-just a desire to keep an issue alive.",False,"diff --git a/.github/stale.yml b/.github/stale.yml
-index 6dd83d219f4e9..db13ca4fad5f7 100644
---- a/.github/stale.yml
-+++ b/.github/stale.yml
-@@ -6,7 +6,7 @@ daysUntilClose: 7
-
- # Labels that prevent issues from being marked as stale
- exemptLabels:
-- - important
-+ - keepalive
-
- # Label to use to identify a stale issue
- staleLabel: stale",unknown,"Change label used to keep issues from being marked as stale to keepalive (#965)
-
-The label used previously, important, implied precedence rather than
-just a desire to keep an issue alive."
-e382cfe95ddd8cb84b9d554d86799f9d14182f72,2024-12-10 03:04:01,renovate[bot],"fix(deps): update module github.com/axiomhq/hyperloglog to v0.2.1 (#15322)
-
-Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/go.mod b/go.mod
-index 4e010d25c205b..bac9587705b5e 100644
---- a/go.mod
-+++ b/go.mod
-@@ -116,7 +116,7 @@ require (
- github.com/DmitriyVTitov/size v1.5.0
- github.com/IBM/go-sdk-core/v5 v5.18.1
- github.com/IBM/ibm-cos-sdk-go v1.12.0
-- github.com/axiomhq/hyperloglog v0.2.0
-+ github.com/axiomhq/hyperloglog v0.2.1
- github.com/buger/jsonparser v1.1.1
- github.com/d4l3k/messagediff v1.2.1
- github.com/dolthub/swiss v0.2.1
-@@ -171,6 +171,7 @@ require (
- github.com/gorilla/handlers v1.5.2 // indirect
- github.com/hashicorp/golang-lru v0.6.0 // indirect
- github.com/imdario/mergo v0.3.16 // indirect
-+ github.com/kamstrup/intmap v0.5.0 // indirect
- github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect
- github.com/moby/docker-image-spec v1.3.1 // indirect
- github.com/moby/sys/userns v0.1.0 // indirect
-diff --git a/go.sum b/go.sum
-index 6cb4957429f32..f366e1c40596b 100644
---- a/go.sum
-+++ b/go.sum
-@@ -1006,8 +1006,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.16.1 h1:xsOtPAvHqhvQvBza5ohaUcfq1Lce
- github.com/aws/aws-sdk-go-v2/service/sts v1.16.1/go.mod h1:Aq2/Qggh2oemSfyHH+EO4UBbgWG6zFCXLHYI4ILTY7w=
- github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g=
- github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
--github.com/axiomhq/hyperloglog v0.2.0 h1:u1XT3yyY1rjzlWuP6NQIrV4bRYHOaqZaovqjcBEvZJo=
--github.com/axiomhq/hyperloglog v0.2.0/go.mod h1:GcgMjz9gaDKZ3G0UMS6Fq/VkZ4l7uGgcJyxA7M+omIM=
-+github.com/axiomhq/hyperloglog v0.2.1 h1:z+rouIlYdpZ+DVfnQigBimhQL6OKHIL3e8+hMiud5/c=
-+github.com/axiomhq/hyperloglog v0.2.1/go.mod h1:WCdOZ8PNJKNcBw3xFZ7iHlnUn1nDVHK/XToLjjmySh4=
- github.com/baidubce/bce-sdk-go v0.9.205 h1:9cx93gC4FSu3W3G4NkDfFl0XMUycCpvQN+nB3doNmvg=
- github.com/baidubce/bce-sdk-go v0.9.205/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
- github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
-@@ -2030,6 +2030,8 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d
- github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
- github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
- github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
-+github.com/kamstrup/intmap v0.5.0 h1:WY7OJQeG7Ujc9zpPTO6PraDGSveG9js9wCPoI2q8wJQ=
-+github.com/kamstrup/intmap v0.5.0/go.mod h1:gWUVWHKzWj8xpJVFf5GC0O26bWmv3GqdnIX/LMT6Aq4=
- github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo=
- github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
- github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
-diff --git a/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go b/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go
-index 638b291cd23a9..24b39e43562aa 100644
---- a/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go
-+++ b/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go
-@@ -18,7 +18,7 @@ type Sketch struct {
- p uint8
- m uint32
- alpha float64
-- tmpSet set
-+ tmpSet *set
- sparseList *compressedList
- regs []uint8
- }
-@@ -45,7 +45,7 @@ func NewSketch(precision uint8, sparse bool) (*Sketch, error) {
- alpha: alpha(float64(m)),
- }
- if sparse {
-- s.tmpSet = set{}
-+ s.tmpSet = newSet(0)
- s.sparseList = newCompressedList(0)
- } else {
- s.regs = make([]uint8, m)
-@@ -65,7 +65,7 @@ func (sk *Sketch) Clone() *Sketch {
- }
-
- func (sk *Sketch) maybeToNormal() {
-- if uint32(len(sk.tmpSet))*100 > sk.m {
-+ if uint32(sk.tmpSet.Len())*100 > sk.m {
- sk.mergeSparse()
- if uint32(sk.sparseList.Len()) > sk.m {
- sk.toNormal()
-@@ -90,9 +90,7 @@ func (sk *Sketch) Merge(other *Sketch) error {
- }
-
- func (sk *Sketch) mergeSparseSketch(other *Sketch) {
-- for k := range other.tmpSet {
-- sk.tmpSet.add(k)
-- }
-+ sk.tmpSet.Merge(other.tmpSet)
- for iter := other.sparseList.Iter(); iter.HasNext(); {
- sk.tmpSet.add(iter.Next())
- }
-@@ -105,10 +103,10 @@ func (sk *Sketch) mergeDenseSketch(other *Sketch) {
- }
-
- if other.sparse() {
-- for k := range other.tmpSet {
-+ other.tmpSet.ForEach(func(k uint32) {
- i, r := decodeHash(k, other.p, pp)
- sk.insert(i, r)
-- }
-+ })
- for iter := other.sparseList.Iter(); iter.HasNext(); {
- i, r := decodeHash(iter.Next(), other.p, pp)
- sk.insert(i, r)
-@@ -123,7 +121,7 @@ func (sk *Sketch) mergeDenseSketch(other *Sketch) {
- }
-
- func (sk *Sketch) toNormal() {
-- if len(sk.tmpSet) > 0 {
-+ if sk.tmpSet.Len() > 0 {
- sk.mergeSparse()
- }
-
-@@ -165,17 +163,17 @@ func (sk *Sketch) Estimate() uint64 {
- }
-
- func (sk *Sketch) mergeSparse() {
-- if len(sk.tmpSet) == 0 {
-+ if sk.tmpSet.Len() == 0 {
- return
- }
-
-- keys := make(uint64Slice, 0, len(sk.tmpSet))
-- for k := range sk.tmpSet {
-+ keys := make(uint64Slice, 0, sk.tmpSet.Len())
-+ sk.tmpSet.ForEach(func(k uint32) {
- keys = append(keys, k)
-- }
-+ })
- sort.Sort(keys)
-
-- newList := newCompressedList(4*len(sk.tmpSet) + len(sk.sparseList.b))
-+ newList := newCompressedList(4*sk.tmpSet.Len() + sk.sparseList.Len())
- for iter, i := sk.sparseList.Iter(), 0; iter.HasNext() || i < len(keys); {
- if !iter.HasNext() {
- newList.Append(keys[i])
-@@ -201,7 +199,7 @@ func (sk *Sketch) mergeSparse() {
- }
-
- sk.sparseList = newList
-- sk.tmpSet = set{}
-+ sk.tmpSet = newSet(0)
- }
-
- // MarshalBinary implements the encoding.BinaryMarshaler interface.
-@@ -277,7 +275,7 @@ func (sk *Sketch) UnmarshalBinary(data []byte) error {
- sparse := data[3] == byte(1)
-
- // Make a newSketch Sketch if the precision doesn't match or if the Sketch was used
-- if sk.p != p || sk.regs != nil || len(sk.tmpSet) > 0 || (sk.sparseList != nil && sk.sparseList.Len() > 0) {
-+ if sk.p != p || sk.regs != nil || sk.tmpSet.Len() > 0 || (sk.sparseList != nil && sk.sparseList.Len() > 0) {
- newh, err := NewSketch(p, sparse)
- if err != nil {
- return err
-@@ -292,14 +290,14 @@ func (sk *Sketch) UnmarshalBinary(data []byte) error {
-
- // Unmarshal the tmp_set.
- tssz := binary.BigEndian.Uint32(data[4:8])
-- sk.tmpSet = make(map[uint32]struct{}, tssz)
-+ sk.tmpSet = newSet(int(tssz))
-
- // We need to unmarshal tssz values in total, and each value requires us
- // to read 4 bytes.
- tsLastByte := int((tssz * 4) + 8)
- for i := 8; i < tsLastByte; i += 4 {
- k := binary.BigEndian.Uint32(data[i : i+4])
-- sk.tmpSet[k] = struct{}{}
-+ sk.tmpSet.add(k)
- }
-
- // Unmarshal the sparse Sketch.
-diff --git a/vendor/github.com/axiomhq/hyperloglog/sparse.go b/vendor/github.com/axiomhq/hyperloglog/sparse.go
-index 8c457d3278224..0151740df9859 100644
---- a/vendor/github.com/axiomhq/hyperloglog/sparse.go
-+++ b/vendor/github.com/axiomhq/hyperloglog/sparse.go
-@@ -2,6 +2,8 @@ package hyperloglog
-
- import (
- ""math/bits""
-+
-+ ""github.com/kamstrup/intmap""
- )
-
- func getIndex(k uint32, p, pp uint8) uint32 {
-@@ -34,37 +36,61 @@ func decodeHash(k uint32, p, pp uint8) (uint32, uint8) {
- return getIndex(k, p, pp), r
- }
-
--type set map[uint32]struct{}
-+type set struct {
-+ m *intmap.Set[uint32]
-+}
-+
-+func newSet(size int) *set {
-+ return &set{m: intmap.NewSet[uint32](size)}
-+}
-+
-+func (s *set) ForEach(fn func(v uint32)) {
-+ s.m.ForEach(func(v uint32) bool {
-+ fn(v)
-+ return true
-+ })
-+}
-+
-+func (s *set) Merge(other *set) {
-+ other.m.ForEach(func(v uint32) bool {
-+ s.m.Add(v)
-+ return true
-+ })
-+}
-+
-+func (s *set) Len() int {
-+ return s.m.Len()
-+}
-
--func (s set) add(v uint32) bool {
-- _, ok := s[v]
-- if ok {
-+func (s *set) add(v uint32) bool {
-+ if s.m.Has(v) {
- return false
- }
-- s[v] = struct{}{}
-+ s.m.Add(v)
- return true
- }
-
--func (s set) Clone() set {
-+func (s *set) Clone() *set {
- if s == nil {
- return nil
- }
-
-- newS := make(map[uint32]struct{}, len(s))
-- for k, v := range s {
-- newS[k] = v
-- }
-- return newS
-+ newS := intmap.NewSet[uint32](s.m.Len())
-+ s.m.ForEach(func(v uint32) bool {
-+ newS.Add(v)
-+ return true
-+ })
-+ return &set{m: newS}
- }
-
--func (s set) MarshalBinary() (data []byte, err error) {
-+func (s *set) MarshalBinary() (data []byte, err error) {
- // 4 bytes for the size of the set, and 4 bytes for each key.
- // list.
-- data = make([]byte, 0, 4+(4*len(s)))
-+ data = make([]byte, 0, 4+(4*s.m.Len()))
-
- // Length of the set. We only need 32 bits because the size of the set
- // couldn't exceed that on 32 bit architectures.
-- sl := len(s)
-+ sl := s.m.Len()
- data = append(data, []byte{
- byte(sl >> 24),
- byte(sl >> 16),
-@@ -73,14 +99,15 @@ func (s set) MarshalBinary() (data []byte, err error) {
- }...)
-
- // Marshal each element in the set.
-- for k := range s {
-+ s.m.ForEach(func(k uint32) bool {
- data = append(data, []byte{
- byte(k >> 24),
- byte(k >> 16),
- byte(k >> 8),
- byte(k),
- }...)
-- }
-+ return true
-+ })
-
- return data, nil
- }
-diff --git a/vendor/github.com/kamstrup/intmap/.gitignore b/vendor/github.com/kamstrup/intmap/.gitignore
-new file mode 100644
-index 0000000000000..1377554ebea6f
---- /dev/null
-+++ b/vendor/github.com/kamstrup/intmap/.gitignore
-@@ -0,0 +1 @@
-+*.swp
-diff --git a/vendor/github.com/kamstrup/intmap/LICENSE b/vendor/github.com/kamstrup/intmap/LICENSE
-new file mode 100644
-index 0000000000000..1eac633b0cd30
---- /dev/null
-+++ b/vendor/github.com/kamstrup/intmap/LICENSE
-@@ -0,0 +1,23 @@
-+Copyright (c) 2016, Brent Pedersen - Bioinformatics
-+All rights reserved.
-+
-+Redistribution and use in source and binary forms, with or without
-+modification, are permitted provided that the following conditions are met:
-+
-+* Redistributions of source code must retain the above copyright notice, this
-+ list of conditions and the following disclaimer.
-+
-+* Redistributions in binary form must reproduce the above copyright notice,
-+ this list of conditions and the following disclaimer in the documentation
-+ and/or other materials provided with the distribution.
-+
-+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ""AS IS""
-+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-diff --git a/vendor/github.com/kamstrup/intmap/README.md b/vendor/github.com/kamstrup/intmap/README.md
-new file mode 100644
-index 0000000000000..e1a1e7003aff8
---- /dev/null
-+++ b/vendor/github.com/kamstrup/intmap/README.md
-@@ -0,0 +1,52 @@
-+Fast hashmap with integer keys for Golang
-+
-+[](https://godoc.org/github.com/kamstrup/intmap)
-+[](https://goreportcard.com/report/github.com/kamstrup/intmap)
-+
-+# intmap
-+
-+ import ""github.com/kamstrup/intmap""
-+
-+Package intmap is a fast hashmap implementation for Golang, specialized for maps with integer type keys.
-+The values can be of any type.
-+
-+It is a full port of https://github.com/brentp/intintmap to use type parameters (aka generics).
-+
-+It interleaves keys and values in the same underlying array to improve locality.
-+This is also known as open addressing with linear probing.
-+
-+It is up to 3X faster than the builtin map:
-+```
-+name time/op
-+Map64Fill-8 201ms ± 5%
-+IntIntMapFill-8 207ms ±31%
-+StdMapFill-8 371ms ±11%
-+Map64Get10PercentHitRate-8 148µs ±40%
-+IntIntMapGet10PercentHitRate-8 171µs ±50%
-+StdMapGet10PercentHitRate-8 171µs ±33%
-+Map64Get100PercentHitRate-8 4.50ms ± 5%
-+IntIntMapGet100PercentHitRate-8 4.82ms ± 6%
-+StdMapGet100PercentHitRate-8 15.5ms ±32%
-+```
-+
-+## Usage
-+
-+```go
-+m := intmap.New[int64,int64](32768)
-+m.Put(int64(1234), int64(-222))
-+m.Put(int64(123), int64(33))
-+
-+v, ok := m.Get(int64(222))
-+v, ok := m.Get(int64(333))
-+
-+m.Del(int64(222))
-+m.Del(int64(333))
-+
-+fmt.Println(m.Len())
-+
-+m.ForEach(func(k int64, v int64) {
-+ fmt.Printf(""key: %d, value: %d\n"", k, v)
-+})
-+
-+m.Clear() // all gone, but buffers kept
-+```
-diff --git a/vendor/github.com/kamstrup/intmap/map64.go b/vendor/github.com/kamstrup/intmap/map64.go
-new file mode 100644
-index 0000000000000..ec8084db9f776
---- /dev/null
-+++ b/vendor/github.com/kamstrup/intmap/map64.go
-@@ -0,0 +1,442 @@
-+// Package intmap contains a fast hashmap implementation for maps with keys of any integer type
-+package intmap
-+
-+import (
-+ ""iter""
-+ ""math""
-+)
-+
-+// IntKey is a type constraint for values that can be used as keys in Map
-+type IntKey interface {
-+ ~int | ~uint | ~int64 | ~uint64 | ~int32 | ~uint32 | ~int16 | ~uint16 | ~int8 | ~uint8 | ~uintptr
-+}
-+
-+type pair[K IntKey, V any] struct {
-+ K K
-+ V V
-+}
-+
-+const fillFactor64 = 0.7
-+
-+func phiMix64(x int) int {
-+ h := x * 0x9E3779B9
-+ return h ^ (h >> 16)
-+}
-+
-+// Map is a hashmap where the keys are some any integer type.
-+// It is valid to call methods that read a nil map, similar to a standard Go map.
-+// Methods valid on a nil map are Has, Get, Len, and ForEach.
-+type Map[K IntKey, V any] struct {
-+ data []pair[K, V] // key-value pairs
-+ size int
-+
-+ zeroVal V // value of 'zero' key
-+ hasZeroKey bool // do we have 'zero' key in the map?
-+}
-+
-+// New creates a new map with keys being any integer subtype.
-+// The map can store up to the given capacity before reallocation and rehashing occurs.
-+func New[K IntKey, V any](capacity int) *Map[K, V] {
-+ return &Map[K, V]{
-+ data: make([]pair[K, V], arraySize(capacity, fillFactor64)),
-+ }
-+}
-+
-+// Has checks if the given key exists in the map.
-+// Calling this method on a nil map will return false.
-+func (m *Map[K, V]) Has(key K) bool {
-+ if m == nil {
-+ return false
-+ }
-+
-+ if key == K(0) {
-+ return m.hasZeroKey
-+ }
-+
-+ idx := m.startIndex(key)
-+ p := m.data[idx]
-+
-+ if p.K == K(0) { // end of chain already
-+ return false
-+ }
-+ if p.K == key { // we check zero prior to this call
-+ return true
-+ }
-+
-+ // hash collision, seek next hash match, bailing on first empty
-+ for {
-+ idx = m.nextIndex(idx)
-+ p = m.data[idx]
-+ if p.K == K(0) {
-+ return false
-+ }
-+ if p.K == key {
-+ return true
-+ }
-+ }
-+}
-+
-+// Get returns the value if the key is found.
-+// If you just need to check for existence it is easier to use Has.
-+// Calling this method on a nil map will return the zero value for V and false.
-+func (m *Map[K, V]) Get(key K) (V, bool) {
-+ if m == nil {
-+ var zero V
-+ return zero, false
-+ }
-+
-+ if key == K(0) {
-+ if m.hasZeroKey {
-+ return m.zeroVal, true
-+ }
-+ var zero V
-+ return zero, false
-+ }
-+
-+ idx := m.startIndex(key)
-+ p := m.data[idx]
-+
-+ if p.K == K(0) { // end of chain already
-+ var zero V
-+ return zero, false
-+ }
-+ if p.K == key { // we check zero prior to this call
-+ return p.V, true
-+ }
-+
-+ // hash collision, seek next hash match, bailing on first empty
-+ for {
-+ idx = m.nextIndex(idx)
-+ p = m.data[idx]
-+ if p.K == K(0) {
-+ var zero V
-+ return zero, false
-+ }
-+ if p.K == key {
-+ return p.V, true
-+ }
-+ }
-+}
-+
-+// Put adds or updates key with value val.
-+func (m *Map[K, V]) Put(key K, val V) {
-+ if key == K(0) {
-+ if !m.hasZeroKey {
-+ m.size++
-+ }
-+ m.zeroVal = val
-+ m.hasZeroKey = true
-+ return
-+ }
-+
-+ idx := m.startIndex(key)
-+ p := &m.data[idx]
-+
-+ if p.K == K(0) { // end of chain already
-+ p.K = key
-+ p.V = val
-+ if m.size >= m.sizeThreshold() {
-+ m.rehash()
-+ } else {
-+ m.size++
-+ }
-+ return
-+ } else if p.K == key { // overwrite existing value
-+ p.V = val
-+ return
-+ }
-+
-+ // hash collision, seek next empty or key match
-+ for {
-+ idx = m.nextIndex(idx)
-+ p = &m.data[idx]
-+
-+ if p.K == K(0) {
-+ p.K = key
-+ p.V = val
-+ if m.size >= m.sizeThreshold() {
-+ m.rehash()
-+ } else {
-+ m.size++
-+ }
-+ return
-+ } else if p.K == key {
-+ p.V = val
-+ return
-+ }
-+ }
-+}
-+
-+// PutIfNotExists adds the key-value pair only if the key does not already exist
-+// in the map, and returns the current value associated with the key and a boolean
-+// indicating whether the value was newly added or not.
-+func (m *Map[K, V]) PutIfNotExists(key K, val V) (V, bool) {
-+ if key == K(0) {
-+ if m.hasZeroKey {
-+ return m.zeroVal, false
-+ }
-+ m.zeroVal = val
-+ m.hasZeroKey = true
-+ m.size++
-+ return val, true
-+ }
-+
-+ idx := m.startIndex(key)
-+ p := &m.data[idx]
-+
-+ if p.K == K(0) { // end of chain already
-+ p.K = key
-+ p.V = val
-+ m.size++
-+ if m.size >= m.sizeThreshold() {
-+ m.rehash()
-+ }
-+ return val, true
-+ } else if p.K == key {
-+ return p.V, false
-+ }
-+
-+ // hash collision, seek next hash match, bailing on first empty
-+ for {
-+ idx = m.nextIndex(idx)
-+ p = &m.data[idx]
-+
-+ if p.K == K(0) {
-+ p.K = key
-+ p.V = val
-+ m.size++
-+ if m.size >= m.sizeThreshold() {
-+ m.rehash()
-+ }
-+ return val, true
-+ } else if p.K == key {
-+ return p.V, false
-+ }
-+ }
-+}
-+
-+// ForEach iterates through key-value pairs in the map while the function f returns true.
-+// This method returns immediately if invoked on a nil map.
-+//
-+// The iteration order of a Map is not defined, so please avoid relying on it.
-+func (m *Map[K, V]) ForEach(f func(K, V) bool) {
-+ if m == nil {
-+ return
-+ }
-+
-+ if m.hasZeroKey && !f(K(0), m.zeroVal) {
-+ return
-+ }
-+ forEach64(m.data, f)
-+}
-+
-+// All returns an iterator over key-value pairs from m.
-+// The iterator returns immediately if invoked on a nil map.
-+//
-+// The iteration order of a Map is not defined, so please avoid relying on it.
-+func (m *Map[K, V]) All() iter.Seq2[K, V] {
-+ return m.ForEach
-+}
-+
-+// Keys returns an iterator over keys in m.
-+// The iterator returns immediately if invoked on a nil map.
-+//
-+// The iteration order of a Map is not defined, so please avoid relying on it.
-+func (m *Map[K, V]) Keys() iter.Seq[K] {
-+ return func(yield func(k K) bool) {
-+ if m == nil {
-+ return
-+ }
-+
-+ if m.hasZeroKey && !yield(K(0)) {
-+ return
-+ }
-+
-+ for _, p := range m.data {
-+ if p.K != K(0) && !yield(p.K) {
-+ return
-+ }
-+ }
-+ }
-+}
-+
-+// Values returns an iterator over values in m.
-+// The iterator returns immediately if invoked on a nil map.
-+//
-+// The iteration order of a Map is not defined, so please avoid relying on it.
-+func (m *Map[K, V]) Values() iter.Seq[V] {
-+ return func(yield func(v V) bool) {
-+ if m == nil {
-+ return
-+ }
-+
-+ if m.hasZeroKey && !yield(m.zeroVal) {
-+ return
-+ }
-+
-+ for _, p := range m.data {
-+ if p.K != K(0) && !yield(p.V) {
-+ return
-+ }
-+ }
-+ }
-+}
-+
-+// Clear removes all items from the map, but keeps the internal buffers for reuse.
-+func (m *Map[K, V]) Clear() {
-+ var zero V
-+ m.hasZeroKey = false
-+ m.zeroVal = zero
-+
-+ // compiles down to runtime.memclr()
-+ for i := range m.data {
-+ m.data[i] = pair[K, V]{}
-+ }
-+
-+ m.size = 0
-+}
-+
-+func (m *Map[K, V]) rehash() {
-+ oldData := m.data
-+ m.data = make([]pair[K, V], 2*len(m.data))
-+
-+ // reset size
-+ if m.hasZeroKey {
-+ m.size = 1
-+ } else {
-+ m.size = 0
-+ }
-+
-+ forEach64(oldData, func(k K, v V) bool {
-+ m.Put(k, v)
-+ return true
-+ })
-+}
-+
-+// Len returns the number of elements in the map.
-+// The length of a nil map is defined to be zero.
-+func (m *Map[K, V]) Len() int {
-+ if m == nil {
-+ return 0
-+ }
-+
-+ return m.size
-+}
-+
-+func (m *Map[K, V]) sizeThreshold() int {
-+ return int(math.Floor(float64(len(m.data)) * fillFactor64))
-+}
-+
-+func (m *Map[K, V]) startIndex(key K) int {
-+ return phiMix64(int(key)) & (len(m.data) - 1)
-+}
-+
-+func (m *Map[K, V]) nextIndex(idx int) int {
-+ return (idx + 1) & (len(m.data) - 1)
-+}
-+
-+func forEach64[K IntKey, V any](pairs []pair[K, V], f func(k K, v V) bool) {
-+ for _, p := range pairs {
-+ if p.K != K(0) && !f(p.K, p.V) {
-+ return
-+ }
-+ }
-+}
-+
-+// Del deletes a key and its value, returning true iff the key was found
-+func (m *Map[K, V]) Del(key K) bool {
-+ if key == K(0) {
-+ if m.hasZeroKey {
-+ m.hasZeroKey = false
-+ m.size--
-+ return true
-+ }
-+ return false
-+ }
-+
-+ idx := m.startIndex(key)
-+ p := m.data[idx]
-+
-+ if p.K == key {
-+ // any keys that were pushed back needs to be shifted nack into the empty slot
-+ // to avoid breaking the chain
-+ m.shiftKeys(idx)
-+ m.size--
-+ return true
-+ } else if p.K == K(0) { // end of chain already
-+ return false
-+ }
-+
-+ for {
-+ idx = m.nextIndex(idx)
-+ p = m.data[idx]
-+
-+ if p.K == key {
-+ // any keys that were pushed back needs to be shifted nack into the empty slot
-+ // to avoid breaking the chain
-+ m.shiftKeys(idx)
-+ m.size--
-+ return true
-+ } else if p.K == K(0) {
-+ return false
-+ }
-+
-+ }
-+}
-+
-+func (m *Map[K, V]) shiftKeys(idx int) int {
-+ // Shift entries with the same hash.
-+ // We need to do this on deletion to ensure we don't have zeroes in the hash chain
-+ for {
-+ var p pair[K, V]
-+ lastIdx := idx
-+ idx = m.nextIndex(idx)
-+ for {
-+ p = m.data[idx]
-+ if p.K == K(0) {
-+ m.data[lastIdx] = pair[K, V]{}
-+ return lastIdx
-+ }
-+
-+ slot := m.startIndex(p.K)
-+ if lastIdx <= idx {
-+ if lastIdx >= slot || slot > idx {
-+ break
-+ }
-+ } else {
-+ if lastIdx >= slot && slot > idx {
-+ break
-+ }
-+ }
-+ idx = m.nextIndex(idx)
-+ }
-+ m.data[lastIdx] = p
-+ }
-+}
-+
-+func nextPowerOf2(x uint32) uint32 {
-+ if x == math.MaxUint32 {
-+ return x
-+ }
-+
-+ if x == 0 {
-+ return 1
-+ }
-+
-+ x--
-+ x |= x >> 1
-+ x |= x >> 2
-+ x |= x >> 4
-+ x |= x >> 8
-+ x |= x >> 16
-+
-+ return x + 1
-+}
-+
-+func arraySize(exp int, fill float64) int {
-+ s := nextPowerOf2(uint32(math.Ceil(float64(exp) / fill)))
-+ if s < 2 {
-+ s = 2
-+ }
-+ return int(s)
-+}
-diff --git a/vendor/github.com/kamstrup/intmap/set.go b/vendor/github.com/kamstrup/intmap/set.go
-new file mode 100644
-index 0000000000000..b81ce224b6036
---- /dev/null
-+++ b/vendor/github.com/kamstrup/intmap/set.go
-@@ -0,0 +1,59 @@
-+package intmap
-+
-+import ""iter""
-+
-+// Set is a specialization of Map modelling a set of integers.
-+// Like Map, methods that read from the set are valid on the nil Set.
-+// This include Has, Len, and ForEach.
-+type Set[K IntKey] Map[K, struct{}]
-+
-+// NewSet creates a new Set with a given initial capacity.
-+func NewSet[K IntKey](capacity int) *Set[K] {
-+ return (*Set[K])(New[K, struct{}](capacity))
-+}
-+
-+// Add an element to the set. Returns true if the element was not already present.
-+func (s *Set[K]) Add(k K) bool {
-+ _, found := (*Map[K, struct{}])(s).PutIfNotExists(k, struct{}{})
-+ return found
-+}
-+
-+// Del deletes a key, returning true iff the key was found
-+func (s *Set[K]) Del(k K) bool {
-+ return (*Map[K, struct{}])(s).Del(k)
-+}
-+
-+// Clear removes all items from the Set, but keeps the internal buffers for reuse.
-+func (s *Set[K]) Clear() {
-+ (*Map[K, struct{}])(s).Clear()
-+}
-+
-+// Has returns true if the key is in the set.
-+// If the set is nil this method always return false.
-+func (s *Set[K]) Has(k K) bool {
-+ return (*Map[K, struct{}])(s).Has(k)
-+}
-+
-+// Len returns the number of elements in the set.
-+// If the set is nil this method return 0.
-+func (s *Set[K]) Len() int {
-+ return (*Map[K, struct{}])(s).Len()
-+}
-+
-+// ForEach iterates over the elements in the set while the visit function returns true.
-+// This method returns immediately if the set is nil.
-+//
-+// The iteration order of a Set is not defined, so please avoid relying on it.
-+func (s *Set[K]) ForEach(visit func(k K) bool) {
-+ (*Map[K, struct{}])(s).ForEach(func(k K, _ struct{}) bool {
-+ return visit(k)
-+ })
-+}
-+
-+// All returns an iterator over keys from the set.
-+// The iterator returns immediately if the set is nil.
-+//
-+// The iteration order of a Set is not defined, so please avoid relying on it.
-+func (s *Set[K]) All() iter.Seq[K] {
-+ return s.ForEach
-+}
-diff --git a/vendor/modules.txt b/vendor/modules.txt
-index 3168c6735b76c..7810d1ce504bf 100644
---- a/vendor/modules.txt
-+++ b/vendor/modules.txt
-@@ -462,8 +462,8 @@ github.com/aws/smithy-go/rand
- github.com/aws/smithy-go/time
- github.com/aws/smithy-go/transport/http
- github.com/aws/smithy-go/transport/http/internal/io
--# github.com/axiomhq/hyperloglog v0.2.0
--## explicit; go 1.21
-+# github.com/axiomhq/hyperloglog v0.2.1
-+## explicit; go 1.23
- github.com/axiomhq/hyperloglog
- # github.com/baidubce/bce-sdk-go v0.9.205
- ## explicit; go 1.11
-@@ -1169,6 +1169,9 @@ github.com/json-iterator/go
- # github.com/julienschmidt/httprouter v1.3.0
- ## explicit; go 1.7
- github.com/julienschmidt/httprouter
-+# github.com/kamstrup/intmap v0.5.0
-+## explicit; go 1.23
-+github.com/kamstrup/intmap
- # github.com/klauspost/compress v1.17.11
- ## explicit; go 1.21
- github.com/klauspost/compress",fix,"update module github.com/axiomhq/hyperloglog to v0.2.1 (#15322)
-
-Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>"
-5f9fe83becf2d802b29468a41e7ae08b8d29edac,2024-11-04 20:53:43,Grot (@grafanabot),"chore( operator): community release 0.7.0 (#14109)
-
-Co-authored-by: loki-gh-app[bot] <160051081+loki-gh-app[bot]@users.noreply.github.com>",False,"diff --git a/.release-please-manifest.json b/.release-please-manifest.json
-index 96501106b6f6e..102cfe6531863 100644
---- a/.release-please-manifest.json
-+++ b/.release-please-manifest.json
-@@ -1,4 +1,4 @@
- {
- ""."": ""3.1.1"",
-- ""operator"": ""0.6.2""
-+ ""operator"": ""0.7.0""
- }
-diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
-index 7727779251bcc..17bc50ddc3194 100644
---- a/operator/CHANGELOG.md
-+++ b/operator/CHANGELOG.md
-@@ -1,5 +1,42 @@
- ## Main
-
-+## [0.7.0](https://github.com/grafana/loki/compare/operator/v0.6.2...operator/v0.7.0) (2024-11-01)
-+
-+
-+### ⚠ BREAKING CHANGES
-+
-+* **operator:** Provide default OTLP attribute configuration ([#14410](https://github.com/grafana/loki/issues/14410))
-+* **operator:** Rename loki api go module ([#14568](https://github.com/grafana/loki/issues/14568))
-+* **operator:** Migrate project layout to kubebuilder go/v4 ([#14447](https://github.com/grafana/loki/issues/14447))
-+
-+### Features
-+
-+* **operator:** Declare feature FIPS support for OpenShift only ([#14308](https://github.com/grafana/loki/issues/14308)) ([720c303](https://github.com/grafana/loki/commit/720c3037923c174e71a02d99d4bee6271428fbdb))
-+* **operator:** introduce 1x.pico size ([#14407](https://github.com/grafana/loki/issues/14407)) ([57de81d](https://github.com/grafana/loki/commit/57de81d8c27e221832790443cebaf141353c3e3f))
-+* **operator:** Provide default OTLP attribute configuration ([#14410](https://github.com/grafana/loki/issues/14410)) ([1b52387](https://github.com/grafana/loki/commit/1b5238721994c00764b6a7e7d63269c5b56d2480))
-+* **operator:** Update Loki operand to v3.2.1 ([#14526](https://github.com/grafana/loki/issues/14526)) ([5e970e5](https://github.com/grafana/loki/commit/5e970e50b166e73f5563e21c23db3ea99b24642e))
-+* **operator:** User-guide for OTLP configuration ([#14620](https://github.com/grafana/loki/issues/14620)) ([27b4071](https://github.com/grafana/loki/commit/27b40713540bd60918780cdd4cb645e6761427cb))
-+
-+
-+### Bug Fixes
-+
-+* **deps:** update module github.com/prometheus/client_golang to v1.20.5 ([#14655](https://github.com/grafana/loki/issues/14655)) ([e12f843](https://github.com/grafana/loki/commit/e12f8436b4080db54c6d31c6af38416c6fdd7eb4))
-+* **operator:** add 1x.pico OpenShift UI dropdown menu ([#14660](https://github.com/grafana/loki/issues/14660)) ([4687f37](https://github.com/grafana/loki/commit/4687f377db0a7ae07ffdea354582c882c10b72c4))
-+* **operator:** Add missing groupBy label for all rules on OpenShift ([#14279](https://github.com/grafana/loki/issues/14279)) ([ce7b2e8](https://github.com/grafana/loki/commit/ce7b2e89d9470e4e6a61a94f2b51ff8b938b5a5e))
-+* **operator:** correctly ignore again BlotDB dashboards ([#14587](https://github.com/grafana/loki/issues/14587)) ([4879d10](https://github.com/grafana/loki/commit/4879d106bbeea29e331ddb7c9a49274600190032))
-+* **operator:** Disable automatic discovery of service name ([#14506](https://github.com/grafana/loki/issues/14506)) ([3834c74](https://github.com/grafana/loki/commit/3834c74966b307411732cd3cbaf66305008b10eb))
-+* **operator:** Disable log level discovery for OpenShift tenancy modes ([#14613](https://github.com/grafana/loki/issues/14613)) ([5034d34](https://github.com/grafana/loki/commit/5034d34ad23451954ea2459c341456da8d93d020))
-+* **operator:** Fix building the size-calculator image ([#14573](https://github.com/grafana/loki/issues/14573)) ([a79b8fe](https://github.com/grafana/loki/commit/a79b8fe7802964cbb96bde75a7502a8b1e8a23ab))
-+* **operator:** Fix make build target for size-calculator ([#14551](https://github.com/grafana/loki/issues/14551)) ([e727187](https://github.com/grafana/loki/commit/e727187ec3be2f10c80e984d00c40dad0308b036))
-+* **operator:** Move OTLP attribute for statefulset name to stream labels ([#14630](https://github.com/grafana/loki/issues/14630)) ([5df3594](https://github.com/grafana/loki/commit/5df3594f791d77031c53d7b0f5b01191de8a23f2))
-+* **operator:** Use empty initiliazed pod status map when no pods ([#14314](https://github.com/grafana/loki/issues/14314)) ([6f533ed](https://github.com/grafana/loki/commit/6f533ed4386ee2db61680a9021934bfe9a9ba749))
-+
-+
-+### Code Refactoring
-+
-+* **operator:** Migrate project layout to kubebuilder go/v4 ([#14447](https://github.com/grafana/loki/issues/14447)) ([dbb3b6e](https://github.com/grafana/loki/commit/dbb3b6edc96f3545a946319c0324518800d286cf))
-+* **operator:** Rename loki api go module ([#14568](https://github.com/grafana/loki/issues/14568)) ([976d8ab](https://github.com/grafana/loki/commit/976d8ab81c1a79f35d7cec96f6a9c35a9947fa48))
-+
- ## [0.6.2](https://github.com/grafana/loki/compare/operator/v0.6.1...operator/v0.6.2) (2024-09-11)",chore,"community release 0.7.0 (#14109)
-
-Co-authored-by: loki-gh-app[bot] <160051081+loki-gh-app[bot]@users.noreply.github.com>"
-14a5e22c31f8cdd7b86ea3848cbc971f7a50a3c7,2020-11-23 13:07:53,Sandeep Sukhani,add missing ingester query routes in loki reads and operational dashboard (#2961),False,"diff --git a/production/loki-mixin/dashboards/dashboard-loki-operational.json b/production/loki-mixin/dashboards/dashboard-loki-operational.json
-index 401d51c1f1c05..f0e75caf63c0a 100644
---- a/production/loki-mixin/dashboards/dashboard-loki-operational.json
-+++ b/production/loki-mixin/dashboards/dashboard-loki-operational.json
-@@ -1534,17 +1534,17 @@
- ""steppedLine"": false,
- ""targets"": [
- {
-- ""expr"": ""histogram_quantile(0.99, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\""$namespace/ingester\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label\"", cluster=\""$cluster\""})) * 1e3"",
-+ ""expr"": ""histogram_quantile(0.99, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\""$namespace/ingester\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\"", cluster=\""$cluster\""})) * 1e3"",
- ""legendFormat"": "".99-{{route}}"",
- ""refId"": ""A""
- },
- {
-- ""expr"": ""histogram_quantile(0.9, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\""$namespace/ingester\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label\"", cluster=\""$cluster\""})) * 1e3"",
-+ ""expr"": ""histogram_quantile(0.9, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\""$namespace/ingester\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\"", cluster=\""$cluster\""})) * 1e3"",
- ""legendFormat"": "".9-{{route}}"",
- ""refId"": ""B""
- },
- {
-- ""expr"": ""histogram_quantile(0.5, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\""$namespace/ingester\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label\"", cluster=\""$cluster\""})) * 1e3"",
-+ ""expr"": ""histogram_quantile(0.5, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\""$namespace/ingester\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\"", cluster=\""$cluster\""})) * 1e3"",
- ""legendFormat"": "".5-{{route}}"",
- ""refId"": ""C""
- }
-@@ -1639,7 +1639,7 @@
- ""steppedLine"": false,
- ""targets"": [
- {
-- ""expr"": ""sum(rate(loki_request_duration_seconds_count{cluster=\""$cluster\"", namespace=\""$namespace\"", job=\""$namespace/ingester\"", status_code!~\""5[0-9]{2}\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label\""}[5m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\""$cluster\"", namespace=\""$namespace\"", job=\""$namespace/ingester\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label\""}[5m])) by (route)"",
-+ ""expr"": ""sum(rate(loki_request_duration_seconds_count{cluster=\""$cluster\"", namespace=\""$namespace\"", job=\""$namespace/ingester\"", status_code!~\""5[0-9]{2}\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\""}[5m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\""$cluster\"", namespace=\""$namespace\"", job=\""$namespace/ingester\"", route=~\""/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\""}[5m])) by (route)"",
- ""interval"": """",
- ""legendFormat"": ""{{route}}"",
- ""refId"": ""A""
-diff --git a/production/loki-mixin/dashboards/loki-reads.libsonnet b/production/loki-mixin/dashboards/loki-reads.libsonnet
-index 1c5d45a826344..883c46e04b112 100644
---- a/production/loki-mixin/dashboards/loki-reads.libsonnet
-+++ b/production/loki-mixin/dashboards/loki-reads.libsonnet
-@@ -6,7 +6,7 @@ local utils = import 'mixin-utils/utils.libsonnet';
- local dashboards = self,
-
- local http_routes = 'loki_api_v1_series|api_prom_series|api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_labels|loki_api_v1_label_name_values',
-- local grpc_routes = '/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series',
-+ local grpc_routes = '/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs',
-
- 'loki-reads.json': {
- local cfg = self,",unknown,add missing ingester query routes in loki reads and operational dashboard (#2961)
-1f6f4337cee64f1ef212bcc46cd6c7f429762ae7,2019-10-15 18:03:26,Cyril Tovena,"Add logql filter to match stages and drop capability (#1112)
-
-* Add logql filter to match stages and drop capability
-
-* use const string instead and remove unused value
-
-* Uses action property instead of drop_entries",False,"diff --git a/docs/clients/promtail/pipelines.md b/docs/clients/promtail/pipelines.md
-index b72a5a0fee2f0..8e05c056dd4af 100644
---- a/docs/clients/promtail/pipelines.md
-+++ b/docs/clients/promtail/pipelines.md
-@@ -18,7 +18,7 @@ stages:
- 2. Change the timestamp of the log line
- 3. Change the content of the log line
- 4. Create a metric based on the extracted data
--4. **Filtering stages** optionally apply a subset of stages based on some
-+4. **Filtering stages** optionally apply a subset of stages or drop entries based on some
- condition.
-
- Typical pipelines will start with a parsing stage (such as a
-@@ -28,7 +28,7 @@ something with that extracted data. The most common action stage will be a
- [labels](./stages/labels.md) stage to turn extracted data into a label.
-
- A common stage will also be the [match](./stages/match.md) stage to selectively
--apply stages based on the current labels.
-+apply stages or drop entries based on a [LogQL stream selector and filter expressions](../../logql.md).
-
- Note that pipelines can not currently be used to deduplicate logs; Loki will
- receive the same log line multiple times if, for example:
-@@ -76,9 +76,9 @@ scrape_configs:
- source: timestamp
-
- # This stage is only going to run if the scraped target has a label of
-- # ""name"" with a value of ""nginx"".
-+ # ""name"" with a value of ""nginx"" and if the log line contains the word ""GET""
- - match:
-- selector: '{name=""nginx""}'
-+ selector: '{name=""nginx""} |= ""GET""'
- stages:
- # This regex stage extracts a new output by matching against some
- # values and capturing the rest.
-@@ -126,10 +126,10 @@ scrape_configs:
- level:
- component:
-
-- # This stage will only run if the scraped target has a label of ""app""
-- # and a value of ""some-app"".
-+ # This stage will only run if the scraped target has a label ""app""
-+ # with a value of ""some-app"" and the log line doesn't contains the word ""info""
- - match:
-- selector: '{app=""some-app""}'
-+ selector: '{app=""some-app""} != ""info""'
- stages:
- # The regex stage tries to extract a Go panic by looking for panic:
- # in the log message.
-@@ -215,4 +215,3 @@ Action stages:
- Filtering stages:
-
- * [match](./stages/match.md): Conditionally run stages based on the label set.
--
-diff --git a/docs/clients/promtail/stages/match.md b/docs/clients/promtail/stages/match.md
-index 5a931b01c2946..9744fb70806d3 100644
---- a/docs/clients/promtail/stages/match.md
-+++ b/docs/clients/promtail/stages/match.md
-@@ -1,20 +1,24 @@
- # `match` stage
-
- The match stage is a filtering stage that conditionally applies a set of stages
--when a log entry matches a configurable [LogQL](../../../logql.md) stream
--selector.
-+or drop entries when a log entry matches a configurable [LogQL](../../../logql.md)
-+stream selector and filter expressions.
-
- ## Schema
-
- ```yaml
- match:
-- # LogQL stream selector.
-+ # LogQL stream selector and filter expressions.
- selector:
-
- # Names the pipeline. When defined, creates an additional label in
- # the pipeline_duration_seconds histogram, where the value is
- # concatenated with job_name using an underscore.
-- [pipieline_name: ]
-+ [pipeline_name: ]
-+
-+ # When set to drop (default to keep), all entries matching the selector will
-+ # be dropped. Stages must not be defined when dropping entries.
-+ [action: ]
-
- # Nested set of pipeline stages only if the selector
- # matches the labels of the log entries:
-@@ -46,33 +50,39 @@ pipeline_stages:
- - labels:
- app:
- - match:
-- selector: ""{app=\""loki\""}""
-+ selector: '{app=""loki""}'
- stages:
- - json:
- expressions:
- msg: message
- - match:
- pipeline_name: ""app2""
-- selector: ""{app=\""pokey\""}""
-+ selector: '{app=""pokey""}'
-+ action: keep
- stages:
- - json:
- expressions:
- msg: msg
-+- match:
-+ selector: '{app=""promtail""} |~ "".*noisy error.*""'
-+ action: drop
- - output:
- source: msg
- ```
-
--And the given log line:
-+And given log lines:
-
--```
-+```json
- { ""time"":""2012-11-01T22:08:41+00:00"", ""app"":""loki"", ""component"": [""parser"",""type""], ""level"" : ""WARN"", ""message"" : ""app1 log line"" }
-+{ ""time"":""2012-11-01T22:08:41+00:00"", ""app"":""promtail"", ""component"": [""parser"",""type""], ""level"" : ""ERROR"", ""message"" : ""foo noisy error"" }
- ```
-
--The first stage will add `app` with a value of `loki` into the extracted map,
-+The first stage will add `app` with a value of `loki` into the extracted map for the first log line,
- while the second stage will add `app` as a label (again with the value of `loki`).
-+The second line will follow the same flow and will be added the label `app` with a value of `promtail`.
-
- The third stage uses LogQL to only execute the nested stages when there is a
--label of `app` whose value is `loki`. This matches in our case; the nested
-+label of `app` whose value is `loki`. This matches the first line in our case; the nested
- `json` stage then adds `msg` into the extracted map with a value of `app1 log
- line`.
-
-@@ -80,6 +90,9 @@ The fourth stage uses LogQL to only executed the nested stages when there is a
- label of `app` whose value is `pokey`. This does **not** match in our case, so
- the nested `json` stage is not ran.
-
-+The fifth stage will drop any entries from the application `promtail` that matches
-+the regex `.*noisy error`.
-+
- The final `output` stage changes the contents of the log line to be the value of
- `msg` from the extracted map. In this case, the log line is changed to `app1 log
- line`.
-diff --git a/pkg/logentry/stages/match.go b/pkg/logentry/stages/match.go
-index d59c6d704b83a..0c0838afd0b76 100644
---- a/pkg/logentry/stages/match.go
-+++ b/pkg/logentry/stages/match.go
-@@ -3,12 +3,13 @@ package stages
- import (
- ""time""
-
-+ ""github.com/prometheus/prometheus/pkg/labels""
-+
- ""github.com/go-kit/kit/log""
- ""github.com/mitchellh/mapstructure""
- ""github.com/pkg/errors""
- ""github.com/prometheus/client_golang/prometheus""
- ""github.com/prometheus/common/model""
-- ""github.com/prometheus/prometheus/pkg/labels""
-
- ""github.com/grafana/loki/pkg/logql""
- )
-@@ -19,6 +20,10 @@ const (
- ErrSelectorRequired = ""selector statement required for match stage""
- ErrMatchRequiresStages = ""match stage requires at least one additional stage to be defined in '- stages'""
- ErrSelectorSyntax = ""invalid selector syntax for match stage""
-+ ErrStagesWithDropLine = ""match stage configured to drop entries cannot contains stages""
-+ ErrUnknownMatchAction = ""match stage action should be 'keep' or 'drop'""
-+ MatchActionKeep = ""keep""
-+ MatchActionDrop = ""drop""
- )
-
- // MatcherConfig contains the configuration for a matcherStage
-@@ -26,10 +31,11 @@ type MatcherConfig struct {
- PipelineName *string `mapstructure:""pipeline_name""`
- Selector string `mapstructure:""selector""`
- Stages PipelineStages `mapstructure:""stages""`
-+ Action string `mapstructure:""action""`
- }
-
- // validateMatcherConfig validates the MatcherConfig for the matcherStage
--func validateMatcherConfig(cfg *MatcherConfig) ([]*labels.Matcher, error) {
-+func validateMatcherConfig(cfg *MatcherConfig) (logql.LogSelectorExpr, error) {
- if cfg == nil {
- return nil, errors.New(ErrEmptyMatchStageConfig)
- }
-@@ -39,14 +45,26 @@ func validateMatcherConfig(cfg *MatcherConfig) ([]*labels.Matcher, error) {
- if cfg.Selector == """" {
- return nil, errors.New(ErrSelectorRequired)
- }
-- if cfg.Stages == nil || len(cfg.Stages) == 0 {
-+ switch cfg.Action {
-+ case MatchActionKeep, MatchActionDrop:
-+ case """":
-+ cfg.Action = MatchActionKeep
-+ default:
-+ return nil, errors.New(ErrUnknownMatchAction)
-+ }
-+
-+ if cfg.Action == MatchActionKeep && (cfg.Stages == nil || len(cfg.Stages) == 0) {
- return nil, errors.New(ErrMatchRequiresStages)
- }
-- matchers, err := logql.ParseMatchers(cfg.Selector)
-+ if cfg.Action == MatchActionDrop && (cfg.Stages != nil && len(cfg.Stages) != 0) {
-+ return nil, errors.New(ErrStagesWithDropLine)
-+ }
-+
-+ selector, err := logql.ParseLogSelector(cfg.Selector)
- if err != nil {
- return nil, errors.Wrap(err, ErrSelectorSyntax)
- }
-- return matchers, nil
-+ return selector, nil
- }
-
- // newMatcherStage creates a new matcherStage from config
-@@ -56,7 +74,7 @@ func newMatcherStage(logger log.Logger, jobName *string, config interface{}, reg
- if err != nil {
- return nil, err
- }
-- matchers, err := validateMatcherConfig(cfg)
-+ selector, err := validateMatcherConfig(cfg)
- if err != nil {
- return nil, err
- }
-@@ -67,21 +85,34 @@ func newMatcherStage(logger log.Logger, jobName *string, config interface{}, reg
- nPtr = &name
- }
-
-- pl, err := NewPipeline(logger, cfg.Stages, nPtr, registerer)
-+ var pl *Pipeline
-+ if cfg.Action == MatchActionKeep {
-+ var err error
-+ pl, err = NewPipeline(logger, cfg.Stages, nPtr, registerer)
-+ if err != nil {
-+ return nil, errors.Wrapf(err, ""match stage failed to create pipeline from config: %v"", config)
-+ }
-+ }
-+
-+ filter, err := selector.Filter()
- if err != nil {
-- return nil, errors.Wrapf(err, ""match stage failed to create pipeline from config: %v"", config)
-+ return nil, errors.Wrap(err, ""error parsing filter"")
- }
-
- return &matcherStage{
-- matchers: matchers,
-+ matchers: selector.Matchers(),
- pipeline: pl,
-+ action: cfg.Action,
-+ filter: filter,
- }, nil
- }
-
- // matcherStage applies Label matchers to determine if the include stages should be run
- type matcherStage struct {
- matchers []*labels.Matcher
-+ filter logql.Filter
- pipeline Stage
-+ action string
- }
-
- // Process implements Stage
-@@ -91,7 +122,15 @@ func (m *matcherStage) Process(labels model.LabelSet, extracted map[string]inter
- return
- }
- }
-- m.pipeline.Process(labels, extracted, t, entry)
-+ if m.filter == nil || m.filter([]byte(*entry)) {
-+ switch m.action {
-+ case MatchActionDrop:
-+ // Adds the drop label to not be sent by the api.EntryHandler
-+ labels[dropLabel] = """"
-+ case MatchActionKeep:
-+ m.pipeline.Process(labels, extracted, t, entry)
-+ }
-+ }
- }
-
- // Name implements Stage
-diff --git a/pkg/logentry/stages/match_test.go b/pkg/logentry/stages/match_test.go
-index 9849d064309fa..843e2b8dd24a3 100644
---- a/pkg/logentry/stages/match_test.go
-+++ b/pkg/logentry/stages/match_test.go
-@@ -99,43 +99,70 @@ func TestMatchPipeline(t *testing.T) {
- func TestMatcher(t *testing.T) {
- t.Parallel()
- tests := []struct {
-- matcher string
-- labels map[string]string
-+ selector string
-+ labels map[string]string
-+ action string
-
-- shouldRun bool
-- wantErr bool
-+ shouldDrop bool
-+ shouldRun bool
-+ wantErr bool
- }{
-- {""{foo=\""bar\""} |= \""foo\"""", map[string]string{""foo"": ""bar""}, false, true},
-- {""{foo=\""bar\""} |~ \""foo\"""", map[string]string{""foo"": ""bar""}, false, true},
-- {""foo"", map[string]string{""foo"": ""bar""}, false, true},
-- {""{}"", map[string]string{""foo"": ""bar""}, false, true},
-- {""{"", map[string]string{""foo"": ""bar""}, false, true},
-- {"""", map[string]string{""foo"": ""bar""}, true, true},
-- {""{foo=\""bar\""}"", map[string]string{""foo"": ""bar""}, true, false},
-- {""{foo=\""\""}"", map[string]string{""foo"": ""bar""}, false, false},
-- {""{foo=\""\""}"", map[string]string{}, true, false},
-- {""{foo!=\""bar\""}"", map[string]string{""foo"": ""bar""}, false, false},
-- {""{foo=\""bar\"",bar!=\""test\""}"", map[string]string{""foo"": ""bar""}, true, false},
-- {""{foo=\""bar\"",bar!=\""test\""}"", map[string]string{""foo"": ""bar"", ""bar"": ""test""}, false, false},
-- {""{foo=\""bar\"",bar=~\""te.*\""}"", map[string]string{""foo"": ""bar"", ""bar"": ""test""}, true, false},
-- {""{foo=\""bar\"",bar!~\""te.*\""}"", map[string]string{""foo"": ""bar"", ""bar"": ""test""}, false, false},
-- {""{foo=\""\""}"", map[string]string{}, true, false},
-+ {`{foo=""bar""} |= ""foo""`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, true, false},
-+ {`{foo=""bar""} |~ ""foo""`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, true, false},
-+ {`{foo=""bar""} |= ""bar""`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, false, false},
-+ {`{foo=""bar""} |~ ""bar""`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, false, false},
-+ {`{foo=""bar""} != ""bar""`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, true, false},
-+ {`{foo=""bar""} !~ ""bar""`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, true, false},
-+ {`{foo=""bar""} != ""foo""`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, false, false},
-+ {`{foo=""bar""} |= ""foo""`, map[string]string{""foo"": ""bar""}, MatchActionDrop, true, false, false},
-+ {`{foo=""bar""} |~ ""foo""`, map[string]string{""foo"": ""bar""}, MatchActionDrop, true, false, false},
-+ {`{foo=""bar""} |= ""bar""`, map[string]string{""foo"": ""bar""}, MatchActionDrop, false, false, false},
-+ {`{foo=""bar""} |~ ""bar""`, map[string]string{""foo"": ""bar""}, MatchActionDrop, false, false, false},
-+ {`{foo=""bar""} != ""bar""`, map[string]string{""foo"": ""bar""}, MatchActionDrop, true, false, false},
-+ {`{foo=""bar""} !~ ""bar""`, map[string]string{""foo"": ""bar""}, MatchActionDrop, true, false, false},
-+ {`{foo=""bar""} != ""foo""`, map[string]string{""foo"": ""bar""}, MatchActionDrop, false, false, false},
-+ {`{foo=""bar""} !~ ""[]""`, map[string]string{""foo"": ""bar""}, MatchActionDrop, false, false, true},
-+ {""foo"", map[string]string{""foo"": ""bar""}, MatchActionKeep, false, false, true},
-+ {""{}"", map[string]string{""foo"": ""bar""}, MatchActionKeep, false, false, true},
-+ {""{"", map[string]string{""foo"": ""bar""}, MatchActionKeep, false, false, true},
-+ {"""", map[string]string{""foo"": ""bar""}, MatchActionKeep, false, true, true},
-+ {`{foo=""bar""}`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, true, false},
-+ {`{foo=""""}`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, false, false},
-+ {`{foo=""""}`, map[string]string{}, MatchActionKeep, false, true, false},
-+ {`{foo!=""bar""}`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, false, false},
-+ {`{foo!=""bar""}`, map[string]string{""foo"": ""bar""}, MatchActionDrop, false, false, false},
-+ {`{foo=""bar"",bar!=""test""}`, map[string]string{""foo"": ""bar""}, MatchActionKeep, false, true, false},
-+ {`{foo=""bar"",bar!=""test""}`, map[string]string{""foo"": ""bar""}, MatchActionDrop, true, false, false},
-+ {`{foo=""bar"",bar!=""test""}`, map[string]string{""foo"": ""bar"", ""bar"": ""test""}, MatchActionKeep, false, false, false},
-+ {`{foo=""bar"",bar=~""te.*""}`, map[string]string{""foo"": ""bar"", ""bar"": ""test""}, MatchActionDrop, true, false, false},
-+ {`{foo=""bar"",bar=~""te.*""}`, map[string]string{""foo"": ""bar"", ""bar"": ""test""}, MatchActionKeep, false, true, false},
-+ {`{foo=""bar"",bar!~""te.*""}`, map[string]string{""foo"": ""bar"", ""bar"": ""test""}, MatchActionKeep, false, false, false},
-+ {`{foo=""bar"",bar!~""te.*""}`, map[string]string{""foo"": ""bar"", ""bar"": ""test""}, MatchActionDrop, false, false, false},
-+
-+ {`{foo=""""}`, map[string]string{}, MatchActionKeep, false, true, false},
- }
-
- for _, tt := range tests {
-- t.Run(fmt.Sprintf(""%s/%s"", tt.matcher, tt.labels), func(t *testing.T) {
-+ name := fmt.Sprintf(""%s/%s/%s"", tt.selector, tt.labels, tt.action)
-+
-+ t.Run(name, func(t *testing.T) {
- // Build a match config which has a simple label stage that when matched will add the test_label to
- // the labels in the pipeline.
-- matchConfig := MatcherConfig{
-- nil,
-- tt.matcher,
-- PipelineStages{
-+ var stages PipelineStages
-+ if tt.action != MatchActionDrop {
-+ stages = PipelineStages{
- PipelineStage{
- StageTypeLabel: LabelsConfig{
- ""test_label"": nil,
- },
- },
-- },
-+ }
-+ }
-+ matchConfig := MatcherConfig{
-+ nil,
-+ tt.selector,
-+ stages,
-+ tt.action,
- }
- s, err := newMatcherStage(util.Logger, nil, matchConfig, prometheus.DefaultRegisterer)
- if (err != nil) != tt.wantErr {
-@@ -143,7 +170,7 @@ func TestMatcher(t *testing.T) {
- return
- }
- if s != nil {
-- ts, entry := time.Now(), """"
-+ ts, entry := time.Now(), ""foo""
- extracted := map[string]interface{}{
- ""test_label"": ""unimportant value"",
- }
-@@ -156,6 +183,41 @@ func TestMatcher(t *testing.T) {
- t.Error(""stage ran but should have not"")
- }
- }
-+ if tt.shouldDrop {
-+ if _, ok := labels[dropLabel]; !ok {
-+ t.Error(""stage should have been dropped"")
-+ }
-+ }
-+ }
-+ })
-+ }
-+}
-+
-+func Test_validateMatcherConfig(t *testing.T) {
-+ empty := """"
-+ notempty := ""test""
-+ tests := []struct {
-+ name string
-+ cfg *MatcherConfig
-+ wantErr bool
-+ }{
-+ {""empty"", nil, true},
-+ {""pipeline name required"", &MatcherConfig{PipelineName: &empty}, true},
-+ {""selector required"", &MatcherConfig{PipelineName: ¬empty, Selector: """"}, true},
-+ {""nil stages without dropping"", &MatcherConfig{PipelineName: ¬empty, Selector: `{app=""foo""}`, Action: MatchActionKeep, Stages: nil}, true},
-+ {""empty stages without dropping"", &MatcherConfig{PipelineName: ¬empty, Selector: `{app=""foo""}`, Action: MatchActionKeep, Stages: []interface{}{}}, true},
-+ {""stages with dropping"", &MatcherConfig{PipelineName: ¬empty, Selector: `{app=""foo""}`, Action: MatchActionDrop, Stages: []interface{}{""""}}, true},
-+ {""empty stages dropping"", &MatcherConfig{PipelineName: ¬empty, Selector: `{app=""foo""}`, Action: MatchActionDrop, Stages: []interface{}{}}, false},
-+ {""stages without dropping"", &MatcherConfig{PipelineName: ¬empty, Selector: `{app=""foo""}`, Action: MatchActionKeep, Stages: []interface{}{""""}}, false},
-+ {""bad selector"", &MatcherConfig{PipelineName: ¬empty, Selector: `{app=""foo}`, Action: MatchActionKeep, Stages: []interface{}{""""}}, true},
-+ {""bad action"", &MatcherConfig{PipelineName: ¬empty, Selector: `{app=""foo}`, Action: ""nope"", Stages: []interface{}{""""}}, true},
-+ }
-+ for _, tt := range tests {
-+ t.Run(tt.name, func(t *testing.T) {
-+ _, err := validateMatcherConfig(tt.cfg)
-+ if (err != nil) != tt.wantErr {
-+ t.Errorf(""validateMatcherConfig() error = %v, wantErr %v"", err, tt.wantErr)
-+ return
- }
- })
- }
-diff --git a/pkg/logentry/stages/pipeline.go b/pkg/logentry/stages/pipeline.go
-index 384e9825cb701..2974850bc5cee 100644
---- a/pkg/logentry/stages/pipeline.go
-+++ b/pkg/logentry/stages/pipeline.go
-@@ -12,6 +12,8 @@ import (
- ""github.com/grafana/loki/pkg/promtail/api""
- )
-
-+const dropLabel = ""__drop__""
-+
- // PipelineStages contains configuration for each stage within a pipeline
- type PipelineStages = []interface{}
-
-@@ -109,6 +111,10 @@ func (p *Pipeline) Wrap(next api.EntryHandler) api.EntryHandler {
- return api.EntryHandlerFunc(func(labels model.LabelSet, timestamp time.Time, line string) error {
- extracted := map[string]interface{}{}
- p.Process(labels, extracted, ×tamp, &line)
-+ // if the labels set contains the __drop__ label we don't send this entry to the next EntryHandler
-+ if _, ok := labels[dropLabel]; ok {
-+ return nil
-+ }
- return next.Handle(labels, timestamp, line)
- })
- }
-diff --git a/pkg/logentry/stages/pipeline_test.go b/pkg/logentry/stages/pipeline_test.go
-index 7b5dd0f89da82..fc8023b38cb68 100644
---- a/pkg/logentry/stages/pipeline_test.go
-+++ b/pkg/logentry/stages/pipeline_test.go
-@@ -11,7 +11,6 @@ import (
- ""github.com/prometheus/common/model""
- ""github.com/stretchr/testify/assert""
- ""github.com/stretchr/testify/require""
--
- ""gopkg.in/yaml.v2""
- )
-
-@@ -190,3 +189,64 @@ func BenchmarkPipeline(b *testing.B) {
- })
- }
- }
-+
-+type stubHandler struct {
-+ bool
-+}
-+
-+func (s *stubHandler) Handle(labels model.LabelSet, time time.Time, entry string) error {
-+ s.bool = true
-+ return nil
-+}
-+
-+func TestPipeline_Wrap(t *testing.T) {
-+ now := time.Now()
-+ var config map[string]interface{}
-+ err := yaml.Unmarshal([]byte(testYaml), &config)
-+ if err != nil {
-+ panic(err)
-+ }
-+ p, err := NewPipeline(util.Logger, config[""pipeline_stages""].([]interface{}), nil, prometheus.DefaultRegisterer)
-+ if err != nil {
-+ panic(err)
-+ }
-+
-+ tests := map[string]struct {
-+ labels model.LabelSet
-+ shouldSend bool
-+ }{
-+ ""should drop"": {
-+ map[model.LabelName]model.LabelValue{
-+ dropLabel: ""true"",
-+ ""stream"": ""stderr"",
-+ ""action"": ""GET"",
-+ ""status_code"": ""200"",
-+ },
-+ false,
-+ },
-+ ""should send"": {
-+ map[model.LabelName]model.LabelValue{
-+ ""stream"": ""stderr"",
-+ ""action"": ""GET"",
-+ ""status_code"": ""200"",
-+ },
-+ true,
-+ },
-+ }
-+
-+ for tName, tt := range tests {
-+ tt := tt
-+ t.Run(tName, func(t *testing.T) {
-+ t.Parallel()
-+ extracted := map[string]interface{}{}
-+ p.Process(tt.labels, extracted, &now, &rawTestLine)
-+ stub := &stubHandler{}
-+ handler := p.Wrap(stub)
-+ if err := handler.Handle(tt.labels, now, rawTestLine); err != nil {
-+ t.Fatalf(""failed to handle entry: %v"", err)
-+ }
-+ assert.Equal(t, stub.bool, tt.shouldSend)
-+
-+ })
-+ }
-+}",unknown,"Add logql filter to match stages and drop capability (#1112)
-
-* Add logql filter to match stages and drop capability
-
-* use const string instead and remove unused value
-
-* Uses action property instead of drop_entries"
-01a4de1bfbff17dccfc87e226d715cb86873e930,2024-03-25 21:39:52,J Stickler,docs: [style] quickstart as one word (#12301),False,"diff --git a/docs/sources/get-started/_index.md b/docs/sources/get-started/_index.md
-index 36daa54cff0bc..5860ec1cc4fa5 100644
---- a/docs/sources/get-started/_index.md
-+++ b/docs/sources/get-started/_index.md
-@@ -32,7 +32,6 @@ To collect logs and view your log data generally involves the following steps:
-
- **Next steps:** Learn more about Loki’s query language, [LogQL](https://grafana.com/docs/loki/latest/query/).
-
--
- ## Example Grafana Agent configuration file to ship Kubernetes Pod logs to Loki
-
- To deploy Grafana Agent to collect Pod logs from your Kubernetes cluster and ship them to Loki, you an use the Grafana Agent Helm chart, and a `values.yaml` file.
-@@ -40,7 +39,6 @@ To deploy Grafana Agent to collect Pod logs from your Kubernetes cluster and shi
- 1. Install Loki with the [Helm chart](https://grafana.com/docs/loki/latest/setup/install/helm/install-scalable/).
- 1. Deploy the Grafana Agent, using the [Grafana Agent Helm chart](https://grafana.com/docs/agent/latest/flow/setup/install/kubernetes/) and this example `values.yaml` file updating the value for `forward_to = [loki.write.endpoint.receiver]`:
-
--
- ```yaml
- agent:
- mounts:
-@@ -101,14 +99,15 @@ agent:
- }
-
- ```
--
-
- 1. Then install Grafana Agent in your Kubernetes cluster using:
-
- ```bash
- helm upgrade -f values.yaml agent grafana/grafana-agent
- ```
-+
- This sample file is configured to:
-+
- - Install Grafana Agent to discover Pod logs.
- - Add `container` and `pod` labels to the logs.
- - Push the logs to your Loki cluster using the tenant ID `cloud`.
-diff --git a/docs/sources/get-started/quick-start.md b/docs/sources/get-started/quick-start.md
-index 70cbfc2c57d21..b4213e233546d 100644
---- a/docs/sources/get-started/quick-start.md
-+++ b/docs/sources/get-started/quick-start.md
-@@ -1,11 +1,11 @@
- ---
--title: Quick start to run Loki locally
--menuTitle: Loki quick start
-+title: Quickstart to run Loki locally
-+menuTitle: Loki quickstart
- weight: 550
- description: How to create and use a simple local Loki cluster for testing and evaluation purposes.
- ---
-
--# Quick start to run Loki locally
-+# Quickstart to run Loki locally
-
- If you want to experiment with Loki, you can run Loki locally using the Docker Compose file that ships with Loki. It runs Loki in a [monolithic deployment](https://grafana.com/docs/loki/latest/get-started/deployment-modes/#monolithic-mode) mode and includes a sample application to generate logs.
-
-@@ -24,11 +24,12 @@ The Docker Compose configuration instantiates the following components, each in
- ## Installing Loki and collecting sample logs
-
- Prerequisites
-+
- - [Docker](https://docs.docker.com/install)
- - [Docker Compose](https://docs.docker.com/compose/install)
-
- {{% admonition type=""note"" %}}
--This quick start assumes you are running Linux.
-+This quickstart assumes you are running Linux.
- {{% /admonition %}}
-
- **To install Loki locally, follow these steps:**
-@@ -57,6 +58,7 @@ This quick start assumes you are running Linux.
- ```
-
- You should see something similar to the following:
-+
- ```bash
- ✔ Network evaluate-loki_loki Created 0.1s
- ✔ Container evaluate-loki-minio-1 Started 0.6s
-@@ -99,30 +101,37 @@ Once you have collected logs, you will want to view them. You can view your log
- Here are some basic sample queries to get you started using LogQL. Note that these queries assume that you followed the instructions to create a directory called `evaluate-loki`. If you installed in a different directory, you’ll need to modify these queries to match your installation directory. After copying any of these queries into the query editor, click **Run Query** (4) to execute the query.
-
- 1. View all the log lines which have the container label ""flog"":
-+
- ```bash
- {container=""evaluate-loki-flog-1""}
- ```
-+
- In Loki, this is called a log stream. Loki uses [labels](https://grafana.com/docs/loki/latest/get-started/labels/) as metadata to describe log streams. Loki queries always start with a label selector. In the query above, the label selector is `container`.
-
- 1. To view all the log lines which have the container label ""grafana"":
-+
- ```bash
- {container=""evaluate-loki-grafana-1""}
- ```
-
- 1. Find all the log lines in the container=flog stream that contain the string ""status"":
-+
- ```bash
- {container=""evaluate-loki-flog-1""} |= `status`
- ```
-
- 1. Find all the log lines in the container=flog stream where the JSON field ""status"" is ""404"":
-+
- ```bash
- {container=""evaluate-loki-flog-1""} | json | status=`404`
- ```
-
- 1. Calculate the number of logs per second where the JSON field ""status"" is ""404"":
-+
- ```bash
- sum by(container) (rate({container=""evaluate-loki-flog-1""} | json | status=`404` [$__auto]))
- ```
-+
- The final query above is a metric query which returns a time series. This will trigger Grafana to draw a graph of the results. You can change the type of graph for a different view of the data. Click **Bars** to view a bar graph of the data.
-
- 1. Click the **Builder** tab (3) to return to Builder mode in the query editor.
-@@ -134,30 +143,37 @@ Once you have collected logs, you will want to view them. You can view your log
- For a thorough introduction to LogQL, refer to the [LogQL reference](https://grafana.com/docs/loki/latest/query/).
-
- ## Sample queries (code view)
-+
- Here are some more sample queries that you can run using the Flog sample data.
-
- To see all the log lines that flog has generated, enter the LogQL query:
-+
- ```bash
- {container=""evaluate-loki-flog-1""}|= ``
- ```
--The flog app generates log lines for simulated HTTP requests.
-+
-+The flog app generates log lines for simulated HTTP requests.
-
- To see all `GET` log lines, enter the LogQL query:
-+
- ```bash
- {container=""evaluate-loki-flog-1""} |= ""GET""
- ```
-
- To see all `POST` methods, enter the LogQL query:
-+
- ```bash
- {container=""evaluate-loki-flog-1""} |= ""POST""
- ```
-
- To see every log line with a 401 status (unauthorized error), enter the LogQL query:
-+
- ```bash
- {container=""evaluate-loki-flog-1""} | json | status=""401""
- ```
-
- To see every log line that does not contain the value 401:
-+
- ```bash
- {container=""evaluate-loki-flog-1""} != ""401""
- ```",docs,[style] quickstart as one word (#12301)
-0ab1b28812ec44a9ece076c5144992f2bc69a8a6,2020-04-30 02:20:40,Ed Welch,"Loki: Improve logging and add metrics to streams dropped by stream limit (#2012)
-
-* Improve the log message when we drop streams because a user is hitting a stream limit.
-Increment the dropped samples metrics when this happens also.
-
-Signed-off-by: Ed Welch
-
-* improving comments
-
-Signed-off-by: Ed Welch ",False,"diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go
-index 7d05eb97608c2..afcbc5d72526f 100644
---- a/pkg/ingester/instance.go
-+++ b/pkg/ingester/instance.go
-@@ -2,6 +2,7 @@ package ingester
-
- import (
- ""context""
-+ ""github.com/grafana/loki/pkg/util/validation""
- ""net/http""
- ""sync""
- ""time""
-@@ -129,13 +130,8 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error {
-
- var appendErr error
- for _, s := range req.Streams {
-- labels, err := util.ToClientLabels(s.Labels)
-- if err != nil {
-- appendErr = err
-- continue
-- }
-
-- stream, err := i.getOrCreateStream(labels)
-+ stream, err := i.getOrCreateStream(s)
- if err != nil {
- appendErr = err
- continue
-@@ -153,7 +149,11 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error {
- return appendErr
- }
-
--func (i *instance) getOrCreateStream(labels []client.LabelAdapter) (*stream, error) {
-+func (i *instance) getOrCreateStream(pushReqStream *logproto.Stream) (*stream, error) {
-+ labels, err := util.ToClientLabels(pushReqStream.Labels)
-+ if err != nil {
-+ return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
-+ }
- rawFp := client.FastFingerprint(labels)
- fp := i.mapper.mapFP(rawFp, labels)
-
-@@ -162,8 +162,14 @@ func (i *instance) getOrCreateStream(labels []client.LabelAdapter) (*stream, err
- return stream, nil
- }
-
-- err := i.limiter.AssertMaxStreamsPerUser(i.instanceID, len(i.streams))
-+ err = i.limiter.AssertMaxStreamsPerUser(i.instanceID, len(i.streams))
- if err != nil {
-+ validation.DiscardedSamples.WithLabelValues(validation.StreamLimit, i.instanceID).Add(float64(len(pushReqStream.Entries)))
-+ bytes := 0
-+ for _, e := range pushReqStream.Entries {
-+ bytes += len(e.Line)
-+ }
-+ validation.DiscardedBytes.WithLabelValues(validation.StreamLimit, i.instanceID).Add(float64(bytes))
- return nil, httpgrpc.Errorf(http.StatusTooManyRequests, err.Error())
- }
-
-diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go
-index 50bfed5473882..c425672e207f2 100644
---- a/pkg/ingester/instance_test.go
-+++ b/pkg/ingester/instance_test.go
-@@ -10,8 +10,6 @@ import (
-
- ""github.com/prometheus/prometheus/pkg/labels""
-
-- ""github.com/grafana/loki/pkg/util""
--
- ""github.com/grafana/loki/pkg/chunkenc""
- ""github.com/grafana/loki/pkg/logproto""
-
-@@ -124,15 +122,12 @@ func TestSyncPeriod(t *testing.T) {
- result = append(result, logproto.Entry{Timestamp: tt, Line: fmt.Sprintf(""hello %d"", i)})
- tt = tt.Add(time.Duration(1 + rand.Int63n(randomStep.Nanoseconds())))
- }
--
-- err = inst.Push(context.Background(), &logproto.PushRequest{Streams: []*logproto.Stream{{Labels: lbls, Entries: result}}})
-- require.NoError(t, err)
--
-- // let's verify results.
-- ls, err := util.ToClientLabels(lbls)
-+ pr := &logproto.PushRequest{Streams: []*logproto.Stream{{Labels: lbls, Entries: result}}}
-+ err = inst.Push(context.Background(), pr)
- require.NoError(t, err)
-
-- s, err := inst.getOrCreateStream(ls)
-+ // let's verify results
-+ s, err := inst.getOrCreateStream(pr.Streams[0])
- require.NoError(t, err)
-
- // make sure each chunk spans max 'sync period' time
-diff --git a/pkg/ingester/limiter.go b/pkg/ingester/limiter.go
-index 5f1b52002754e..382c1c0be70bb 100644
---- a/pkg/ingester/limiter.go
-+++ b/pkg/ingester/limiter.go
-@@ -8,7 +8,7 @@ import (
- )
-
- const (
-- errMaxStreamsPerUserLimitExceeded = ""per-user streams limit (local: %d global: %d actual local: %d) exceeded""
-+ errMaxStreamsPerUserLimitExceeded = ""tenant '%v' per-user streams limit exceeded, streams: %d exceeds calculated limit: %d (local limit: %d, global limit: %d, global/ingesters: %d)""
- )
-
- // RingCount is the interface exposed by a ring implementation which allows
-@@ -37,32 +37,28 @@ func NewLimiter(limits *validation.Overrides, ring RingCount, replicationFactor
- // AssertMaxStreamsPerUser ensures limit has not been reached compared to the current
- // number of streams in input and returns an error if so.
- func (l *Limiter) AssertMaxStreamsPerUser(userID string, streams int) error {
-- actualLimit := l.maxStreamsPerUser(userID)
-- if streams < actualLimit {
-- return nil
-- }
--
-- localLimit := l.limits.MaxLocalStreamsPerUser(userID)
-- globalLimit := l.limits.MaxGlobalStreamsPerUser(userID)
--
-- return fmt.Errorf(errMaxStreamsPerUserLimitExceeded, localLimit, globalLimit, actualLimit)
--}
--
--func (l *Limiter) maxStreamsPerUser(userID string) int {
-+ // Start by setting the local limit either from override or default
- localLimit := l.limits.MaxLocalStreamsPerUser(userID)
-
- // We can assume that streams are evenly distributed across ingesters
- // so we do convert the global limit into a local limit
- globalLimit := l.limits.MaxGlobalStreamsPerUser(userID)
-- localLimit = l.minNonZero(localLimit, l.convertGlobalToLocalLimit(globalLimit))
-+ adjustedGlobalLimit := l.convertGlobalToLocalLimit(globalLimit)
-+
-+ // Set the calculated limit to the lesser of the local limit or the new calculated global limit
-+ calculatedLimit := l.minNonZero(localLimit, adjustedGlobalLimit)
-
- // If both the local and global limits are disabled, we just
- // use the largest int value
-- if localLimit == 0 {
-- localLimit = math.MaxInt32
-+ if calculatedLimit == 0 {
-+ calculatedLimit = math.MaxInt32
-+ }
-+
-+ if streams < calculatedLimit {
-+ return nil
- }
-
-- return localLimit
-+ return fmt.Errorf(errMaxStreamsPerUserLimitExceeded, userID, streams, calculatedLimit, localLimit, globalLimit, adjustedGlobalLimit)
- }
-
- func (l *Limiter) convertGlobalToLocalLimit(globalLimit int) int {
-diff --git a/pkg/ingester/limiter_test.go b/pkg/ingester/limiter_test.go
-index c01a06862824d..e43e65d74b205 100644
---- a/pkg/ingester/limiter_test.go
-+++ b/pkg/ingester/limiter_test.go
-@@ -11,112 +11,86 @@ import (
- ""github.com/grafana/loki/pkg/util/validation""
- )
-
--func TestLimiter_maxStreamsPerUser(t *testing.T) {
-+func TestLimiter_AssertMaxStreamsPerUser(t *testing.T) {
- tests := map[string]struct {
- maxLocalStreamsPerUser int
- maxGlobalStreamsPerUser int
- ringReplicationFactor int
- ringIngesterCount int
-- expected int
-+ streams int
-+ expected error
- }{
-+ ""both local and global limit are disabled"": {
-+ maxLocalStreamsPerUser: 0,
-+ maxGlobalStreamsPerUser: 0,
-+ ringReplicationFactor: 1,
-+ ringIngesterCount: 1,
-+ streams: 100,
-+ expected: nil,
-+ },
-+ ""current number of streams is below the limit"": {
-+ maxLocalStreamsPerUser: 0,
-+ maxGlobalStreamsPerUser: 1000,
-+ ringReplicationFactor: 3,
-+ ringIngesterCount: 10,
-+ streams: 299,
-+ expected: nil,
-+ },
-+ ""current number of streams is above the limit"": {
-+ maxLocalStreamsPerUser: 0,
-+ maxGlobalStreamsPerUser: 1000,
-+ ringReplicationFactor: 3,
-+ ringIngesterCount: 10,
-+ streams: 300,
-+ expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, ""test"", 300, 300, 0, 1000, 300),
-+ },
- ""both local and global limits are disabled"": {
- maxLocalStreamsPerUser: 0,
- maxGlobalStreamsPerUser: 0,
- ringReplicationFactor: 1,
- ringIngesterCount: 1,
-- expected: math.MaxInt32,
-+ streams: math.MaxInt32 - 1,
-+ expected: nil,
- },
- ""only local limit is enabled"": {
- maxLocalStreamsPerUser: 1000,
- maxGlobalStreamsPerUser: 0,
- ringReplicationFactor: 1,
- ringIngesterCount: 1,
-- expected: 1000,
-+ streams: 3000,
-+ expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, ""test"", 3000, 1000, 1000, 0, 0),
- },
- ""only global limit is enabled with replication-factor=1"": {
- maxLocalStreamsPerUser: 0,
- maxGlobalStreamsPerUser: 1000,
- ringReplicationFactor: 1,
- ringIngesterCount: 10,
-- expected: 100,
-+ streams: 3000,
-+ expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, ""test"", 3000, 100, 0, 1000, 100),
- },
- ""only global limit is enabled with replication-factor=3"": {
- maxLocalStreamsPerUser: 0,
- maxGlobalStreamsPerUser: 1000,
- ringReplicationFactor: 3,
- ringIngesterCount: 10,
-- expected: 300,
-+ streams: 3000,
-+ expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, ""test"", 3000, 300, 0, 1000, 300),
- },
- ""both local and global limits are set with local limit < global limit"": {
- maxLocalStreamsPerUser: 150,
- maxGlobalStreamsPerUser: 1000,
- ringReplicationFactor: 3,
- ringIngesterCount: 10,
-- expected: 150,
-+ streams: 3000,
-+ expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, ""test"", 3000, 150, 150, 1000, 300),
- },
- ""both local and global limits are set with local limit > global limit"": {
- maxLocalStreamsPerUser: 500,
- maxGlobalStreamsPerUser: 1000,
- ringReplicationFactor: 3,
- ringIngesterCount: 10,
-- expected: 300,
-- },
-- }
--
-- for testName, testData := range tests {
-- testData := testData
--
-- t.Run(testName, func(t *testing.T) {
-- // Mock the ring
-- ring := &ringCountMock{count: testData.ringIngesterCount}
--
-- // Mock limits
-- limits, err := validation.NewOverrides(validation.Limits{
-- MaxLocalStreamsPerUser: testData.maxLocalStreamsPerUser,
-- MaxGlobalStreamsPerUser: testData.maxGlobalStreamsPerUser,
-- }, nil)
-- require.NoError(t, err)
--
-- limiter := NewLimiter(limits, ring, testData.ringReplicationFactor)
-- actual := limiter.maxStreamsPerUser(""test"")
--
-- assert.Equal(t, testData.expected, actual)
-- })
-- }
--}
--
--func TestLimiter_AssertMaxStreamsPerUser(t *testing.T) {
-- tests := map[string]struct {
-- maxLocalStreamsPerUser int
-- maxGlobalStreamsPerUser int
-- ringReplicationFactor int
-- ringIngesterCount int
-- streams int
-- expected error
-- }{
-- ""both local and global limit are disabled"": {
-- maxLocalStreamsPerUser: 0,
-- maxGlobalStreamsPerUser: 0,
-- ringReplicationFactor: 1,
-- ringIngesterCount: 1,
-- streams: 100,
-- expected: nil,
-- },
-- ""current number of streams is below the limit"": {
-- maxLocalStreamsPerUser: 0,
-- maxGlobalStreamsPerUser: 1000,
-- ringReplicationFactor: 3,
-- ringIngesterCount: 10,
-- streams: 299,
-- expected: nil,
-- },
-- ""current number of streams is above the limit"": {
-- maxLocalStreamsPerUser: 0,
-- maxGlobalStreamsPerUser: 1000,
-- ringReplicationFactor: 3,
-- ringIngesterCount: 10,
-- streams: 300,
-- expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, 0, 1000, 300),
-+ streams: 3000,
-+ expected: fmt.Errorf(errMaxStreamsPerUserLimitExceeded, ""test"", 3000, 300, 500, 1000, 300),
- },
- }
-
-diff --git a/pkg/util/validation/validate.go b/pkg/util/validation/validate.go
-index 9293a989c17fb..97f54caa15e20 100644
---- a/pkg/util/validation/validate.go
-+++ b/pkg/util/validation/validate.go
-@@ -10,6 +10,9 @@ const (
- RateLimited = ""rate_limited""
- // LineTooLong is a reason for discarding too long log lines.
- LineTooLong = ""line_too_long""
-+ // StreamLimit is a reason for discarding lines when we can't create a new stream
-+ // because the limit of active streams has been reached.
-+ StreamLimit = ""stream_limit""
- )
-
- // DiscardedBytes is a metric of the total discarded bytes, by reason.",Loki,"Improve logging and add metrics to streams dropped by stream limit (#2012)
-
-* Improve the log message when we drop streams because a user is hitting a stream limit.
-Increment the dropped samples metrics when this happens also.
-
-Signed-off-by: Ed Welch
-
-* improving comments
-
-Signed-off-by: Ed Welch "
-3d0236bc7140ca21279a9295ddca70ce6c1f53ef,2024-10-21 21:07:18,Christian Haudum,"chore: Fix variable declaration for Bloom Build dashboard (#14553)
-
-Signed-off-by: Christian Haudum ",False,"diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-bloom-build.json b/production/loki-mixin-compiled-ssd/dashboards/loki-bloom-build.json
-index 02aa2ee1d3416..149dfacd857d3 100644
---- a/production/loki-mixin-compiled-ssd/dashboards/loki-bloom-build.json
-+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-bloom-build.json
-@@ -6367,7 +6367,7 @@
- ""multi"": false,
- ""name"": ""tenant"",
- ""options"": [ ],
-- ""query"": ""label_values(loki_bloomplanner_tenant_tasks_planned{cluster=\""$cluster\"", namespace=\""$namespace\""})"",
-+ ""query"": ""label_values(loki_bloomplanner_tenant_tasks_planned{cluster=\""$cluster\"", namespace=\""$namespace\""}, tenant)"",
- ""refresh"": 0,
- ""regex"": """",
- ""sort"": 3,
-diff --git a/production/loki-mixin-compiled/dashboards/loki-bloom-build.json b/production/loki-mixin-compiled/dashboards/loki-bloom-build.json
-index 02aa2ee1d3416..149dfacd857d3 100644
---- a/production/loki-mixin-compiled/dashboards/loki-bloom-build.json
-+++ b/production/loki-mixin-compiled/dashboards/loki-bloom-build.json
-@@ -6367,7 +6367,7 @@
- ""multi"": false,
- ""name"": ""tenant"",
- ""options"": [ ],
-- ""query"": ""label_values(loki_bloomplanner_tenant_tasks_planned{cluster=\""$cluster\"", namespace=\""$namespace\""})"",
-+ ""query"": ""label_values(loki_bloomplanner_tenant_tasks_planned{cluster=\""$cluster\"", namespace=\""$namespace\""}, tenant)"",
- ""refresh"": 0,
- ""regex"": """",
- ""sort"": 3,
-diff --git a/production/loki-mixin/dashboards/loki-bloom-build.libsonnet b/production/loki-mixin/dashboards/loki-bloom-build.libsonnet
-index bce61a0e50215..5405f6ef1b602 100644
---- a/production/loki-mixin/dashboards/loki-bloom-build.libsonnet
-+++ b/production/loki-mixin/dashboards/loki-bloom-build.libsonnet
-@@ -12,7 +12,7 @@ local template = import 'grafonnet/template.libsonnet';
- template.new(
- 'tenant',
- '$datasource',
-- 'label_values(loki_bloomplanner_tenant_tasks_planned{cluster=""$cluster"", namespace=""$namespace""})',
-+ 'label_values(loki_bloomplanner_tenant_tasks_planned{cluster=""$cluster"", namespace=""$namespace""}, tenant)',
- label='Tenant',
- sort=3, // numerical ascending
- includeAll=true,",chore,"Fix variable declaration for Bloom Build dashboard (#14553)
-
-Signed-off-by: Christian Haudum "
-9d84a6868b4327a243c6f26e0c5c9954402ada23,2025-02-04 19:40:12,sherinabr,fix: export ExcludedMetadataLabels so it can be extended in GEL (#16083),False,"diff --git a/pkg/util/entry_size.go b/pkg/util/entry_size.go
-index 4f2c8f0bf82dc..91f0b300010a6 100644
---- a/pkg/util/entry_size.go
-+++ b/pkg/util/entry_size.go
-@@ -20,12 +20,12 @@ func EntryTotalSize(entry *push.Entry) int {
- return len(entry.Line) + StructuredMetadataSize(entry.StructuredMetadata)
- }
-
--var excludedStructuredMetadataLabels = []string{constants.LevelLabel}
-+var ExcludedStructuredMetadataLabels = []string{constants.LevelLabel}
-
- func StructuredMetadataSize(metas push.LabelsAdapter) int {
- size := 0
- for _, meta := range metas {
-- if slices.Contains(excludedStructuredMetadataLabels, meta.Name) {
-+ if slices.Contains(ExcludedStructuredMetadataLabels, meta.Name) {
- continue
- }
- size += len(meta.Name) + len(meta.Value)",fix,export ExcludedMetadataLabels so it can be extended in GEL (#16083)
-ea6abbfd079f79dcc0d019bf1691a6a3a9803a8f,2023-03-08 02:55:54,Dylan Guedes,"Loki-Mixin: Remove query-readiness panel (#8735)
-
-**What this PR does / why we need it**:
-Remove from loki-mixin the panel that shows the query_readiness metric.
-The metric was removed a while ago.",False,"diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-reads-resources.json b/production/loki-mixin-compiled-ssd/dashboards/loki-reads-resources.json
-index 4cef18cdf6aab..e54223c76aa13 100644
---- a/production/loki-mixin-compiled-ssd/dashboards/loki-reads-resources.json
-+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-reads-resources.json
-@@ -535,83 +535,6 @@
- ""show"": false
- }
- ]
-- },
-- {
-- ""aliasColors"": { },
-- ""bars"": false,
-- ""dashLength"": 10,
-- ""dashes"": false,
-- ""datasource"": ""$datasource"",
-- ""fill"": 1,
-- ""gridPos"": { },
-- ""id"": 7,
-- ""legend"": {
-- ""avg"": false,
-- ""current"": false,
-- ""max"": false,
-- ""min"": false,
-- ""show"": true,
-- ""total"": false,
-- ""values"": false
-- },
-- ""lines"": true,
-- ""linewidth"": 1,
-- ""links"": [ ],
-- ""nullPointMode"": ""null as zero"",
-- ""percentage"": false,
-- ""pointradius"": 5,
-- ""points"": false,
-- ""renderer"": ""flot"",
-- ""seriesOverrides"": [ ],
-- ""spaceLength"": 10,
-- ""span"": 6,
-- ""stack"": false,
-- ""steppedLine"": false,
-- ""targets"": [
-- {
-- ""expr"": ""loki_boltdb_shipper_query_readiness_duration_seconds{cluster=~\""$cluster\"", namespace=~\""$namespace\""}"",
-- ""format"": ""time_series"",
-- ""intervalFactor"": 2,
-- ""legendFormat"": ""duration"",
-- ""legendLink"": null,
-- ""step"": 10
-- }
-- ],
-- ""thresholds"": [ ],
-- ""timeFrom"": null,
-- ""timeShift"": null,
-- ""title"": ""Query Readiness Duration"",
-- ""tooltip"": {
-- ""shared"": true,
-- ""sort"": 2,
-- ""value_type"": ""individual""
-- },
-- ""type"": ""graph"",
-- ""xaxis"": {
-- ""buckets"": null,
-- ""mode"": ""time"",
-- ""name"": null,
-- ""show"": true,
-- ""values"": [ ]
-- },
-- ""yaxes"": [
-- {
-- ""format"": ""s"",
-- ""label"": null,
-- ""logBase"": 1,
-- ""max"": null,
-- ""min"": 0,
-- ""show"": true
-- },
-- {
-- ""format"": ""short"",
-- ""label"": null,
-- ""logBase"": 1,
-- ""max"": null,
-- ""min"": null,
-- ""show"": false
-- }
-- ]
- }
- ],
- ""repeat"": null,
-@@ -633,7 +556,7 @@
- ""dashes"": false,
- ""datasource"": ""$datasource"",
- ""fill"": 1,
-- ""id"": 8,
-+ ""id"": 7,
- ""legend"": {
- ""avg"": false,
- ""current"": false,
-@@ -734,7 +657,7 @@
- ""dashes"": false,
- ""datasource"": ""$datasource"",
- ""fill"": 1,
-- ""id"": 9,
-+ ""id"": 8,
- ""legend"": {
- ""avg"": false,
- ""current"": false,
-@@ -835,7 +758,7 @@
- ""dashes"": false,
- ""datasource"": ""$datasource"",
- ""fill"": 1,
-- ""id"": 10,
-+ ""id"": 9,
- ""legend"": {
- ""avg"": false,
- ""current"": false,
-diff --git a/production/loki-mixin-compiled/dashboards/loki-reads-resources.json b/production/loki-mixin-compiled/dashboards/loki-reads-resources.json
-index 33fdffdcd3097..9e4ef679a8725 100644
---- a/production/loki-mixin-compiled/dashboards/loki-reads-resources.json
-+++ b/production/loki-mixin-compiled/dashboards/loki-reads-resources.json
-@@ -1634,83 +1634,6 @@
- ""show"": false
- }
- ]
-- },
-- {
-- ""aliasColors"": { },
-- ""bars"": false,
-- ""dashLength"": 10,
-- ""dashes"": false,
-- ""datasource"": ""$datasource"",
-- ""fill"": 1,
-- ""gridPos"": { },
-- ""id"": 19,
-- ""legend"": {
-- ""avg"": false,
-- ""current"": false,
-- ""max"": false,
-- ""min"": false,
-- ""show"": true,
-- ""total"": false,
-- ""values"": false
-- },
-- ""lines"": true,
-- ""linewidth"": 1,
-- ""links"": [ ],
-- ""nullPointMode"": ""null as zero"",
-- ""percentage"": false,
-- ""pointradius"": 5,
-- ""points"": false,
-- ""renderer"": ""flot"",
-- ""seriesOverrides"": [ ],
-- ""spaceLength"": 10,
-- ""span"": 6,
-- ""stack"": false,
-- ""steppedLine"": false,
-- ""targets"": [
-- {
-- ""expr"": ""loki_boltdb_shipper_query_readiness_duration_seconds{cluster=~\""$cluster\"", namespace=~\""$namespace\""}"",
-- ""format"": ""time_series"",
-- ""intervalFactor"": 2,
-- ""legendFormat"": ""duration"",
-- ""legendLink"": null,
-- ""step"": 10
-- }
-- ],
-- ""thresholds"": [ ],
-- ""timeFrom"": null,
-- ""timeShift"": null,
-- ""title"": ""Query Readiness Duration"",
-- ""tooltip"": {
-- ""shared"": true,
-- ""sort"": 2,
-- ""value_type"": ""individual""
-- },
-- ""type"": ""graph"",
-- ""xaxis"": {
-- ""buckets"": null,
-- ""mode"": ""time"",
-- ""name"": null,
-- ""show"": true,
-- ""values"": [ ]
-- },
-- ""yaxes"": [
-- {
-- ""format"": ""s"",
-- ""label"": null,
-- ""logBase"": 1,
-- ""max"": null,
-- ""min"": 0,
-- ""show"": true
-- },
-- {
-- ""format"": ""short"",
-- ""label"": null,
-- ""logBase"": 1,
-- ""max"": null,
-- ""min"": null,
-- ""show"": false
-- }
-- ]
- }
- ],
- ""repeat"": null,
-@@ -1732,7 +1655,7 @@
- ""dashes"": false,
- ""datasource"": ""$datasource"",
- ""fill"": 1,
-- ""id"": 20,
-+ ""id"": 19,
- ""legend"": {
- ""avg"": false,
- ""current"": false,
-@@ -1833,7 +1756,7 @@
- ""dashes"": false,
- ""datasource"": ""$datasource"",
- ""fill"": 1,
-- ""id"": 21,
-+ ""id"": 20,
- ""legend"": {
- ""avg"": false,
- ""current"": false,
-@@ -1934,7 +1857,7 @@
- ""dashes"": false,
- ""datasource"": ""$datasource"",
- ""fill"": 1,
-- ""id"": 22,
-+ ""id"": 21,
- ""legend"": {
- ""avg"": false,
- ""current"": false,
-@@ -2021,7 +1944,7 @@
- ""datasource"": ""$datasource"",
- ""fill"": 1,
- ""gridPos"": { },
-- ""id"": 23,
-+ ""id"": 22,
- ""legend"": {
- ""avg"": false,
- ""current"": false,
-@@ -2098,7 +2021,7 @@
- ""datasource"": ""$datasource"",
- ""fill"": 1,
- ""gridPos"": { },
-- ""id"": 24,
-+ ""id"": 23,
- ""legend"": {
- ""avg"": false,
- ""current"": false,
-@@ -2200,7 +2123,7 @@
- ""datasource"": ""$datasource"",
- ""fill"": 1,
- ""gridPos"": { },
-- ""id"": 25,
-+ ""id"": 24,
- ""legend"": {
- ""avg"": false,
- ""current"": false,
-@@ -2302,7 +2225,7 @@
- ""datasource"": ""$datasource"",
- ""fill"": 1,
- ""gridPos"": { },
-- ""id"": 26,
-+ ""id"": 25,
- ""legend"": {
- ""avg"": false,
- ""current"": false,
-diff --git a/production/loki-mixin/dashboards/loki-reads-resources.libsonnet b/production/loki-mixin/dashboards/loki-reads-resources.libsonnet
-index c03241eeb92be..4e54760b513ba 100644
---- a/production/loki-mixin/dashboards/loki-reads-resources.libsonnet
-+++ b/production/loki-mixin/dashboards/loki-reads-resources.libsonnet
-@@ -120,13 +120,6 @@ local utils = import 'mixin-utils/utils.libsonnet';
- .addPanel(
- $.containerDiskSpaceUtilizationPanel('Disk Space Utilization', index_gateway_job_matcher),
- )
-- .addPanel(
-- $.panel('Query Readiness Duration') +
-- $.queryPanel(
-- ['loki_boltdb_shipper_query_readiness_duration_seconds{%s}' % $.namespaceMatcher()], ['duration']
-- ) +
-- { yaxes: $.yaxes('s') },
-- )
- )
- .addRow(
- $.row('Ingester')",unknown,"Loki-Mixin: Remove query-readiness panel (#8735)
-
-**What this PR does / why we need it**:
-Remove from loki-mixin the panel that shows the query_readiness metric.
-The metric was removed a while ago."
-db438aa30911363cf1a654143e8a383ee84cb2ec,2025-03-22 02:45:31,renovate[bot],"chore(deps): update dependency eslint to v9.23.0 (main) (#16865)
-
-Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>",False,"diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json
-index dfef3c1d32313..4975916689fff 100644
---- a/pkg/ui/frontend/package-lock.json
-+++ b/pkg/ui/frontend/package-lock.json
-@@ -870,9 +870,9 @@
- }
- },
- ""node_modules/@eslint/config-helpers"": {
-- ""version"": ""0.1.0"",
-- ""resolved"": ""https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.1.0.tgz"",
-- ""integrity"": ""sha512-kLrdPDJE1ckPo94kmPPf9Hfd0DU0Jw6oKYrhe+pwSC0iTUInmTa+w6fw8sGgcfkFJGNdWOUeOaDM4quW4a7OkA=="",
-+ ""version"": ""0.2.0"",
-+ ""resolved"": ""https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.2.0.tgz"",
-+ ""integrity"": ""sha512-yJLLmLexii32mGrhW29qvU3QBVTu0GUmEf/J4XsBtVhp4JkIUFN/BjWqTF63yRvGApIDpZm5fa97LtYtINmfeQ=="",
- ""dev"": true,
- ""license"": ""Apache-2.0"",
- ""engines"": {
-@@ -893,9 +893,9 @@
- }
- },
- ""node_modules/@eslint/eslintrc"": {
-- ""version"": ""3.3.0"",
-- ""resolved"": ""https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.0.tgz"",
-- ""integrity"": ""sha512-yaVPAiNAalnCZedKLdR21GOGILMLKPyqSLWaAjQFvYA2i/ciDi8ArYVr69Anohb6cH2Ukhqti4aFnYyPm8wdwQ=="",
-+ ""version"": ""3.3.1"",
-+ ""resolved"": ""https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.1.tgz"",
-+ ""integrity"": ""sha512-gtF186CXhIl1p4pJNGZw8Yc6RlshoePRvE0X91oPGb3vZ8pM3qOS9W9NGPat9LziaBV7XrJWGylNQXkGcnM3IQ=="",
- ""dev"": true,
- ""license"": ""MIT"",
- ""dependencies"": {
-@@ -930,9 +930,9 @@
- }
- },
- ""node_modules/@eslint/js"": {
-- ""version"": ""9.22.0"",
-- ""resolved"": ""https://registry.npmjs.org/@eslint/js/-/js-9.22.0.tgz"",
-- ""integrity"": ""sha512-vLFajx9o8d1/oL2ZkpMYbkLv8nDB6yaIwFNt7nI4+I80U/z03SxmfOMsLbvWr3p7C+Wnoh//aOu2pQW8cS0HCQ=="",
-+ ""version"": ""9.23.0"",
-+ ""resolved"": ""https://registry.npmjs.org/@eslint/js/-/js-9.23.0.tgz"",
-+ ""integrity"": ""sha512-35MJ8vCPU0ZMxo7zfev2pypqTwWTofFZO6m4KAtdoFhRpLJUpHTZZ+KB3C7Hb1d7bULYwO4lJXGCi5Se+8OMbw=="",
- ""dev"": true,
- ""license"": ""MIT"",
- ""engines"": {
-@@ -4072,19 +4072,19 @@
- }
- },
- ""node_modules/eslint"": {
-- ""version"": ""9.22.0"",
-- ""resolved"": ""https://registry.npmjs.org/eslint/-/eslint-9.22.0.tgz"",
-- ""integrity"": ""sha512-9V/QURhsRN40xuHXWjV64yvrzMjcz7ZyNoF2jJFmy9j/SLk0u1OLSZgXi28MrXjymnjEGSR80WCdab3RGMDveQ=="",
-+ ""version"": ""9.23.0"",
-+ ""resolved"": ""https://registry.npmjs.org/eslint/-/eslint-9.23.0.tgz"",
-+ ""integrity"": ""sha512-jV7AbNoFPAY1EkFYpLq5bslU9NLNO8xnEeQXwErNibVryjk67wHVmddTBilc5srIttJDBrB0eMHKZBFbSIABCw=="",
- ""dev"": true,
- ""license"": ""MIT"",
- ""dependencies"": {
- ""@eslint-community/eslint-utils"": ""^4.2.0"",
- ""@eslint-community/regexpp"": ""^4.12.1"",
- ""@eslint/config-array"": ""^0.19.2"",
-- ""@eslint/config-helpers"": ""^0.1.0"",
-+ ""@eslint/config-helpers"": ""^0.2.0"",
- ""@eslint/core"": ""^0.12.0"",
-- ""@eslint/eslintrc"": ""^3.3.0"",
-- ""@eslint/js"": ""9.22.0"",
-+ ""@eslint/eslintrc"": ""^3.3.1"",
-+ ""@eslint/js"": ""9.23.0"",
- ""@eslint/plugin-kit"": ""^0.2.7"",
- ""@humanfs/node"": ""^0.16.6"",
- ""@humanwhocodes/module-importer"": ""^1.0.1"",",chore,"update dependency eslint to v9.23.0 (main) (#16865)
-
-Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>"
-23f998a5aa71182ca791612fb2c8165c3a140efb,2019-08-19 21:54:10,Robert Fratto,ci: update apt-get before installing deps for rootless step (#914),False,"diff --git a/.circleci/config.yml b/.circleci/config.yml
-index bd623920c2bda..a35601fd14e61 100644
---- a/.circleci/config.yml
-+++ b/.circleci/config.yml
-@@ -64,7 +64,7 @@ workflows:
- - publish/canary
- - publish/docker-driver
- filters: {<<: *tag-or-master}
--
-+
- - test-helm:
- requires: [ lint, test ]
- filters: {<<: *tags}
-@@ -91,7 +91,8 @@ workflows:
- run:
- name: rootless
- command: |
-- sudo apt-get install -qy uidmap libseccomp-dev binfmt-support go-bindata
-+ sudo apt-get update && \
-+ sudo apt-get install -qy uidmap libseccomp-dev binfmt-support go-bindata
- sudo docker run --privileged linuxkit/binfmt:v0.6
-
- .img: &img",ci,update apt-get before installing deps for rootless step (#914)
-21dd4afdc76d7790e177d2dd364ecf5b629c8112,2024-05-30 19:04:05,Jack Baldry,"docs: Republish the sizing calculator but don't list it in the table of contents and don't index it (#13070)
-
-Co-authored-by: J Stickler ",False,"diff --git a/docs/sources/setup/size/_index.md b/docs/sources/setup/size/_index.md
-index 74dcb8e504964..162748eb9e3b8 100644
---- a/docs/sources/setup/size/_index.md
-+++ b/docs/sources/setup/size/_index.md
-@@ -1,4 +1,7 @@
- ---
-+_build:
-+ list: false
-+noindex: true
- title: Size the cluster
- menuTitle: Size the cluster
- description: Provides a tool that generates a Helm Chart values.yaml file based on expected ingestion, retention rate, and node type, to help size your Grafana deployment.
-@@ -6,7 +9,6 @@ aliases:
- - ../installation/sizing/
- - ../installation/helm/generate
- weight: 100
--draft: true
- ---
-
- ",docs,"Republish the sizing calculator but don't list it in the table of contents and don't index it (#13070)
-
-Co-authored-by: J Stickler "
-7654c27c121048d9022c439779f73c44105f218d,2019-12-14 03:16:44,Cyril Tovena,"Adds configurable compression algorithms for chunks (#1411)
-
-* Adds L4Z encoding.
-
-Signed-off-by: Cyril Tovena
-
-* Adds encoding benchmarks
-
-Signed-off-by: Cyril Tovena
-
-* Adds snappy encoding.
-
-Signed-off-by: Cyril Tovena
-
-* Adds chunk size test
-
-Signed-off-by: Cyril Tovena
-
-* Adds snappy v2
-
-Signed-off-by: Cyril Tovena
-
-* Improve benchmarks
-
-Signed-off-by: Cyril Tovena
-
-* Remove chunkenc
-
-Signed-off-by: Cyril Tovena
-
-* Update lz4 to latest master version.
-
-Signed-off-by: Peter Štibraný
-
-* Use temporary buffer in serialise method to avoid allocations when doing string -> byte conversion.
-It also makes code little more readable. We pool those buffers for reuse.
-
-Signed-off-by: Peter Štibraný
-
-* Added gzip -1 for comparison.
-
-Signed-off-by: Peter Štibraný
-
-* Initialize reader and buffered reader lazily.
-
-This helps with reader/buffered reader reuse.
-
-Signed-off-by: Peter Štibraný
-
-* Don't keep entries, extracted generateData function
-
-(mostly to get more understandable profile)
-
-Signed-off-by: Peter Štibraný
-
-* Improve test and benchmark to cover all encodings.
-
-Signed-off-by: Cyril Tovena
-
-* Adds support for a new chunk format with encoding info.
-
-Signed-off-by: Cyril Tovena
-
-* Ingesters now support encoding config.
-
-Signed-off-by: Cyril Tovena
-
-* Add support for no compression.
-
-Signed-off-by: Cyril Tovena
-
-* Add docs
-
-Signed-off-by: Cyril Tovena
-
-* Remove default Gzip for ByteChunk.
-
-Signed-off-by: Cyril Tovena
-
-* Removes none, snappyv2 and gzip-1
-
-Signed-off-by: Cyril Tovena
-
-* Move log test lines to testdata and add supported encoding stringer
-
-Signed-off-by: Cyril Tovena
-
-* got linted
-
-Signed-off-by: Cyril Tovena ",False,"diff --git a/docs/configuration/README.md b/docs/configuration/README.md
-index 0d4633be0d46c..46b2f399734b1 100644
---- a/docs/configuration/README.md
-+++ b/docs/configuration/README.md
-@@ -268,7 +268,7 @@ The `ingester_config` block configures Ingesters.
- [chunk_idle_period: | default = 30m]
-
- # The targeted _uncompressed_ size in bytes of a chunk block
--# When this threshold is exceeded the head block will be cut and compressed inside the chunk
-+# When this threshold is exceeded the head block will be cut and compressed inside the chunk
- [chunk_block_size: | default = 262144]
-
- # A target _compressed_ size in bytes for chunks.
-@@ -277,6 +277,13 @@ The `ingester_config` block configures Ingesters.
- # The default value of 0 for this will create chunks with a fixed 10 blocks,
- # A non zero value will create chunks with a variable number of blocks to meet the target size.
- [chunk_target_size: | default = 0]
-+
-+# The compression algorithm to use for chunks. (supported: gzip, gzip-1, lz4, none, snappy, snappyv2)
-+# You should choose your algorithm depending on your need:
-+# - `gzip` highest compression ratio but also slowest decompression speed. (144 kB per chunk)
-+# - `lz4` fastest compression speed (188 kB per chunk)
-+# - `snappy` fast and popular compression algorithm (272 kB per chunk)
-+[chunk_encoding: | default = gzip]
- ```
-
- ### lifecycler_config
-diff --git a/go.mod b/go.mod
-index a20367fe1e6df..3ec811d3ca5d6 100644
---- a/go.mod
-+++ b/go.mod
-@@ -16,8 +16,10 @@ require (
- github.com/docker/go-connections v0.4.0 // indirect
- github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect
- github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8
-+ github.com/dustin/go-humanize v1.0.0
- github.com/fatih/color v1.7.0
- github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c
-+ github.com/frankban/quicktest v1.7.2 // indirect
- github.com/go-kit/kit v0.9.0
- github.com/gocql/gocql v0.0.0-20181124151448-70385f88b28b // indirect
- github.com/gogo/protobuf v1.3.0 // remember to update loki-build-image/Dockerfile too
-@@ -31,14 +33,14 @@ require (
- github.com/influxdata/go-syslog/v2 v2.0.1
- github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af
- github.com/json-iterator/go v1.1.7
-- github.com/klauspost/compress v1.7.4
-- github.com/klauspost/cpuid v1.2.1 // indirect
-+ github.com/klauspost/compress v1.9.4
- github.com/mitchellh/mapstructure v1.1.2
- github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
- github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
- github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
- github.com/opencontainers/image-spec v1.0.1 // indirect
- github.com/opentracing/opentracing-go v1.1.0
-+ github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible
- github.com/pkg/errors v0.8.1
- github.com/prometheus/client_golang v1.1.0
- github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4
-diff --git a/go.sum b/go.sum
-index cb6a1b5c22241..b5caaf5fda951 100644
---- a/go.sum
-+++ b/go.sum
-@@ -159,6 +159,8 @@ github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c h1:QwbffUs/+p
- github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c/go.mod h1:WQX+afhrekY9rGK+WT4xvKSlzmia9gDoLYu4GGYGASQ=
- github.com/fluent/fluent-logger-golang v1.2.1/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU=
- github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
-+github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk=
-+github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
- github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
- github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
- github.com/fsouza/fake-gcs-server v1.3.0 h1:f2mbomatUsbw8NRY7rzqiiWNn4BRM+Jredz0Pt70Usg=
-@@ -394,10 +396,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
- github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
- github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
- github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
--github.com/klauspost/compress v1.7.4 h1:4UqAIzZ1Ns2epCTyJ1d2xMWvxtX+FNSCYWeOFogK9nc=
--github.com/klauspost/compress v1.7.4/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
--github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
--github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-+github.com/klauspost/compress v1.9.4 h1:xhvAeUPQ2drNUhKtrGdTGNvV9nNafHMUkRyLkzxJoB4=
-+github.com/klauspost/compress v1.9.4/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
- github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
- github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
- github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-@@ -506,6 +506,8 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T
- github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
- github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
- github.com/philhofer/fwd v0.0.0-20160129035939-98c11a7a6ec8/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
-+github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible h1:5isCJDRADbeSlWx6KVXAYwrcihyCGVXr7GNCdLEVDr8=
-+github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
- github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
- github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
- github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-diff --git a/pkg/chunkenc/facade.go b/pkg/chunkenc/facade.go
-index 8556b0fdd0442..d603920e3e653 100644
---- a/pkg/chunkenc/facade.go
-+++ b/pkg/chunkenc/facade.go
-@@ -7,13 +7,18 @@ import (
- )
-
- // GzipLogChunk is a cortex encoding type for our chunks.
-+// Deprecated: the chunk encoding/compression format is inside the chunk data.
- const GzipLogChunk = encoding.Encoding(128)
-
-+// LogChunk is a cortex encoding type for our chunks.
-+const LogChunk = encoding.Encoding(129)
-+
- func init() {
- encoding.MustRegisterEncoding(GzipLogChunk, ""GzipLogChunk"", func() encoding.Chunk {
-- return &Facade{
-- c: NewMemChunk(EncGZIP),
-- }
-+ return &Facade{}
-+ })
-+ encoding.MustRegisterEncoding(LogChunk, ""LogChunk"", func() encoding.Chunk {
-+ return &Facade{}
- })
- }
-
-@@ -32,6 +37,9 @@ func NewFacade(c Chunk) encoding.Chunk {
-
- // Marshal implements encoding.Chunk.
- func (f Facade) Marshal(w io.Writer) error {
-+ if f.c == nil {
-+ return nil
-+ }
- buf, err := f.c.Bytes()
- if err != nil {
- return err
-@@ -49,11 +57,14 @@ func (f *Facade) UnmarshalFromBuf(buf []byte) error {
-
- // Encoding implements encoding.Chunk.
- func (Facade) Encoding() encoding.Encoding {
-- return GzipLogChunk
-+ return LogChunk
- }
-
- // Utilization implements encoding.Chunk.
- func (f Facade) Utilization() float64 {
-+ if f.c == nil {
-+ return 0
-+ }
- return f.c.Utilization()
- }
-
-@@ -66,7 +77,7 @@ func (f Facade) LokiChunk() Chunk {
- func UncompressedSize(c encoding.Chunk) (int, bool) {
- f, ok := c.(*Facade)
-
-- if !ok {
-+ if !ok || f.c == nil {
- return 0, false
- }
-
-diff --git a/pkg/chunkenc/gzip_test.go b/pkg/chunkenc/gzip_test.go
-deleted file mode 100644
-index 7cebc3d1373be..0000000000000
---- a/pkg/chunkenc/gzip_test.go
-+++ /dev/null
-@@ -1,396 +0,0 @@
--package chunkenc
--
--import (
-- ""bytes""
-- ""fmt""
-- ""math""
-- ""math/rand""
-- ""sync""
-- ""testing""
-- ""time""
--
-- ""github.com/stretchr/testify/assert""
--
-- ""github.com/stretchr/testify/require""
--
-- ""github.com/grafana/loki/pkg/logproto""
--)
--
--func TestGZIPBlock(t *testing.T) {
-- chk := NewMemChunk(EncGZIP)
--
-- cases := []struct {
-- ts int64
-- str string
-- cut bool
-- }{
-- {
-- ts: 1,
-- str: ""hello, world!"",
-- },
-- {
-- ts: 2,
-- str: ""hello, world2!"",
-- },
-- {
-- ts: 3,
-- str: ""hello, world3!"",
-- },
-- {
-- ts: 4,
-- str: ""hello, world4!"",
-- },
-- {
-- ts: 5,
-- str: ""hello, world5!"",
-- },
-- {
-- ts: 6,
-- str: ""hello, world6!"",
-- cut: true,
-- },
-- {
-- ts: 7,
-- str: ""hello, world7!"",
-- },
-- {
-- ts: 8,
-- str: ""hello, worl\nd8!"",
-- },
-- {
-- ts: 8,
-- str: ""hello, world 8, 2!"",
-- },
-- {
-- ts: 8,
-- str: ""hello, world 8, 3!"",
-- },
-- {
-- ts: 9,
-- str: """",
-- },
-- }
--
-- for _, c := range cases {
-- require.NoError(t, chk.Append(logprotoEntry(c.ts, c.str)))
-- if c.cut {
-- require.NoError(t, chk.cut())
-- }
-- }
--
-- it, err := chk.Iterator(time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, nil)
-- require.NoError(t, err)
--
-- idx := 0
-- for it.Next() {
-- e := it.Entry()
-- require.Equal(t, cases[idx].ts, e.Timestamp.UnixNano())
-- require.Equal(t, cases[idx].str, e.Line)
-- idx++
-- }
--
-- require.NoError(t, it.Error())
-- require.Equal(t, len(cases), idx)
--
-- t.Run(""bounded-iteration"", func(t *testing.T) {
-- it, err := chk.Iterator(time.Unix(0, 3), time.Unix(0, 7), logproto.FORWARD, nil)
-- require.NoError(t, err)
--
-- idx := 2
-- for it.Next() {
-- e := it.Entry()
-- require.Equal(t, cases[idx].ts, e.Timestamp.UnixNano())
-- require.Equal(t, cases[idx].str, e.Line)
-- idx++
-- }
-- require.NoError(t, it.Error())
-- require.Equal(t, 6, idx)
-- })
--}
--
--func TestGZIPSerialisation(t *testing.T) {
-- chk := NewMemChunk(EncGZIP)
--
-- numSamples := 500000
--
-- for i := 0; i < numSamples; i++ {
-- require.NoError(t, chk.Append(logprotoEntry(int64(i), string(i))))
-- }
--
-- byt, err := chk.Bytes()
-- require.NoError(t, err)
--
-- bc, err := NewByteChunk(byt)
-- require.NoError(t, err)
--
-- it, err := bc.Iterator(time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, nil)
-- require.NoError(t, err)
-- for i := 0; i < numSamples; i++ {
-- require.True(t, it.Next())
--
-- e := it.Entry()
-- require.Equal(t, int64(i), e.Timestamp.UnixNano())
-- require.Equal(t, string(i), e.Line)
-- }
--
-- require.NoError(t, it.Error())
--
-- byt2, err := chk.Bytes()
-- require.NoError(t, err)
--
-- require.True(t, bytes.Equal(byt, byt2))
--}
--
--func TestGZIPChunkFilling(t *testing.T) {
-- chk := NewMemChunk(EncGZIP)
-- chk.blockSize = 1024
--
-- // We should be able to append only 10KB of logs.
-- maxBytes := chk.blockSize * blocksPerChunk
-- lineSize := 512
-- lines := maxBytes / lineSize
--
-- logLine := string(make([]byte, lineSize))
-- entry := &logproto.Entry{
-- Timestamp: time.Unix(0, 0),
-- Line: logLine,
-- }
--
-- i := int64(0)
-- for ; chk.SpaceFor(entry) && i < 30; i++ {
-- entry.Timestamp = time.Unix(0, i)
-- require.NoError(t, chk.Append(entry))
-- }
--
-- require.Equal(t, int64(lines), i)
--
-- it, err := chk.Iterator(time.Unix(0, 0), time.Unix(0, 100), logproto.FORWARD, nil)
-- require.NoError(t, err)
-- i = 0
-- for it.Next() {
-- entry := it.Entry()
-- require.Equal(t, i, entry.Timestamp.UnixNano())
-- i++
-- }
--
-- require.Equal(t, int64(lines), i)
--}
--
--func TestGZIPChunkTargetSize(t *testing.T) {
-- targetSize := 1024 * 1024
-- chk := NewMemChunkSize(EncGZIP, 1024, targetSize)
--
-- lineSize := 512
-- entry := &logproto.Entry{
-- Timestamp: time.Unix(0, 0),
-- Line: """",
-- }
--
-- // Use a random number to generate random log data, otherwise the gzip compression is way too good
-- // and the following loop has to run waaayyyyy to many times
-- // Using the same seed should guarantee the same random numbers and same test data.
-- r := rand.New(rand.NewSource(99))
--
-- i := int64(0)
--
-- for ; chk.SpaceFor(entry) && i < 5000; i++ {
-- logLine := make([]byte, lineSize)
-- for j := range logLine {
-- logLine[j] = byte(r.Int())
-- }
-- entry = &logproto.Entry{
-- Timestamp: time.Unix(0, 0),
-- Line: string(logLine),
-- }
-- entry.Timestamp = time.Unix(0, i)
-- require.NoError(t, chk.Append(entry))
-- }
--
-- // 5000 is a limit ot make sure the test doesn't run away, we shouldn't need this many log lines to make 1MB chunk
-- require.NotEqual(t, 5000, i)
--
-- require.NoError(t, chk.Close())
--
-- require.Equal(t, 0, chk.head.size)
--
-- // Even though the seed is static above and results should be deterministic,
-- // we will allow +/- 10% variance
-- minSize := int(float64(targetSize) * 0.9)
-- maxSize := int(float64(targetSize) * 1.1)
-- require.Greater(t, chk.CompressedSize(), minSize)
-- require.Less(t, chk.CompressedSize(), maxSize)
--
-- // Also verify our utilization is close to 1.0
-- ut := chk.Utilization()
-- require.Greater(t, ut, 0.99)
-- require.Less(t, ut, 1.01)
--
--}
--
--func TestMemChunk_AppendOutOfOrder(t *testing.T) {
-- t.Parallel()
--
-- type tester func(t *testing.T, chk *MemChunk)
--
-- tests := map[string]tester{
-- ""append out of order in the same block"": func(t *testing.T, chk *MemChunk) {
-- assert.NoError(t, chk.Append(logprotoEntry(5, ""test"")))
-- assert.NoError(t, chk.Append(logprotoEntry(6, ""test"")))
--
-- assert.EqualError(t, chk.Append(logprotoEntry(1, ""test"")), ErrOutOfOrder.Error())
-- },
-- ""append out of order in a new block right after cutting the previous one"": func(t *testing.T, chk *MemChunk) {
-- assert.NoError(t, chk.Append(logprotoEntry(5, ""test"")))
-- assert.NoError(t, chk.Append(logprotoEntry(6, ""test"")))
-- assert.NoError(t, chk.cut())
--
-- assert.EqualError(t, chk.Append(logprotoEntry(1, ""test"")), ErrOutOfOrder.Error())
-- },
-- ""append out of order in a new block after multiple cuts"": func(t *testing.T, chk *MemChunk) {
-- assert.NoError(t, chk.Append(logprotoEntry(5, ""test"")))
-- assert.NoError(t, chk.cut())
--
-- assert.NoError(t, chk.Append(logprotoEntry(6, ""test"")))
-- assert.NoError(t, chk.cut())
--
-- assert.EqualError(t, chk.Append(logprotoEntry(1, ""test"")), ErrOutOfOrder.Error())
-- },
-- }
--
-- for testName, tester := range tests {
-- tester := tester
--
-- t.Run(testName, func(t *testing.T) {
-- t.Parallel()
--
-- tester(t, NewMemChunk(EncGZIP))
-- })
-- }
--}
--
--var result []Chunk
--
--func BenchmarkWriteGZIP(b *testing.B) {
-- chunks := []Chunk{}
--
-- entry := &logproto.Entry{
-- Timestamp: time.Unix(0, 0),
-- Line: RandString(512),
-- }
-- i := int64(0)
--
-- for n := 0; n < b.N; n++ {
-- c := NewMemChunk(EncGZIP)
-- // adds until full so we trigger cut which serialize using gzip
-- for c.SpaceFor(entry) {
-- _ = c.Append(entry)
-- entry.Timestamp = time.Unix(0, i)
-- i++
-- }
-- chunks = append(chunks, c)
-- }
-- result = chunks
--}
--
--func BenchmarkReadGZIP(b *testing.B) {
-- chunks := []Chunk{}
-- i := int64(0)
-- for n := 0; n < 50; n++ {
-- entry := randSizeEntry(0)
-- c := NewMemChunk(EncGZIP)
-- // adds until full so we trigger cut which serialize using gzip
-- for c.SpaceFor(entry) {
-- _ = c.Append(entry)
-- i++
-- entry = randSizeEntry(i)
-- }
-- c.Close()
-- chunks = append(chunks, c)
-- }
-- entries := []logproto.Entry{}
-- b.ResetTimer()
-- for n := 0; n < b.N; n++ {
-- var wg sync.WaitGroup
-- for _, c := range chunks {
-- wg.Add(1)
-- go func(c Chunk) {
-- iterator, err := c.Iterator(time.Unix(0, 0), time.Now(), logproto.BACKWARD, nil)
-- if err != nil {
-- panic(err)
-- }
-- for iterator.Next() {
-- entries = append(entries, iterator.Entry())
-- }
-- iterator.Close()
-- wg.Done()
-- }(c)
-- }
-- wg.Wait()
-- }
--}
--
--func BenchmarkHeadBlockIterator(b *testing.B) {
--
-- for _, j := range []int{100000, 50000, 15000, 10000} {
-- b.Run(fmt.Sprintf(""Size %d"", j), func(b *testing.B) {
--
-- h := headBlock{}
--
-- for i := 0; i < j; i++ {
-- if err := h.append(int64(i), ""this is the append string""); err != nil {
-- b.Fatal(err)
-- }
-- }
--
-- b.ResetTimer()
--
-- for n := 0; n < b.N; n++ {
-- iter := h.iterator(0, math.MaxInt64, nil)
--
-- for iter.Next() {
-- _ = iter.Entry()
-- }
-- }
-- })
-- }
--}
--
--func randSizeEntry(ts int64) *logproto.Entry {
-- var line string
-- switch ts % 10 {
-- case 0:
-- line = RandString(27000)
-- case 1:
-- line = RandString(10000)
-- case 2, 3, 4, 5:
-- line = RandString(2048)
-- default:
-- line = RandString(4096)
-- }
-- return &logproto.Entry{
-- Timestamp: time.Unix(0, ts),
-- Line: line,
-- }
--}
--
--const charset = ""abcdefghijklmnopqrstuvwxyz"" +
-- ""ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789""
--
--func RandStringWithCharset(length int, charset string) string {
-- b := make([]byte, length)
-- for i := range b {
-- b[i] = charset[rand.Intn(len(charset)-1)]
-- }
-- return string(b)
--}
--
--func RandString(length int) string {
-- return RandStringWithCharset(length, charset)
--}
--
--func logprotoEntry(ts int64, line string) *logproto.Entry {
-- return &logproto.Entry{
-- Timestamp: time.Unix(0, ts),
-- Line: line,
-- }
--}
-diff --git a/pkg/chunkenc/interface.go b/pkg/chunkenc/interface.go
-index b9446b45a95e1..bec5966116b9d 100644
---- a/pkg/chunkenc/interface.go
-+++ b/pkg/chunkenc/interface.go
-@@ -2,7 +2,8 @@ package chunkenc
-
- import (
- ""errors""
-- ""io""
-+ ""fmt""
-+ ""strings""
- ""time""
-
- ""github.com/grafana/loki/pkg/iter""
-@@ -20,15 +21,23 @@ var (
- )
-
- // Encoding is the identifier for a chunk encoding.
--type Encoding uint8
-+type Encoding byte
-
- // The different available encodings.
- const (
- EncNone Encoding = iota
- EncGZIP
- EncDumb
-+ EncLZ4
-+ EncSnappy
- )
-
-+var supportedEncoding = []Encoding{
-+ EncGZIP,
-+ EncLZ4,
-+ EncSnappy,
-+}
-+
- func (e Encoding) String() string {
- switch e {
- case EncGZIP:
-@@ -37,11 +46,38 @@ func (e Encoding) String() string {
- return ""none""
- case EncDumb:
- return ""dumb""
-+ case EncLZ4:
-+ return ""lz4""
-+ case EncSnappy:
-+ return ""snappy""
- default:
- return ""unknown""
- }
- }
-
-+// ParseEncoding parses an chunk encoding (compression algorithm) by its name.
-+func ParseEncoding(enc string) (Encoding, error) {
-+ for _, e := range supportedEncoding {
-+ if strings.EqualFold(e.String(), enc) {
-+ return e, nil
-+ }
-+ }
-+ return 0, fmt.Errorf(""invalid encoding: %s, supported: %s"", enc, SupportedEncoding())
-+
-+}
-+
-+// SupportedEncoding returns the list of supported Encoding.
-+func SupportedEncoding() string {
-+ var sb strings.Builder
-+ for i := range supportedEncoding {
-+ sb.WriteString(supportedEncoding[i].String())
-+ if i != len(supportedEncoding)-1 {
-+ sb.WriteString("", "")
-+ }
-+ }
-+ return sb.String()
-+}
-+
- // Chunk is the interface for the compressed logs chunk format.
- type Chunk interface {
- Bounds() (time.Time, time.Time)
-@@ -56,17 +92,3 @@ type Chunk interface {
- CompressedSize() int
- Close() error
- }
--
--// CompressionWriter is the writer that compresses the data passed to it.
--type CompressionWriter interface {
-- Write(p []byte) (int, error)
-- Close() error
-- Flush() error
-- Reset(w io.Writer)
--}
--
--// CompressionReader reads the compressed data.
--type CompressionReader interface {
-- Read(p []byte) (int, error)
-- Reset(r io.Reader) error
--}
-diff --git a/pkg/chunkenc/interface_test.go b/pkg/chunkenc/interface_test.go
-new file mode 100644
-index 0000000000000..abb61a673eeb4
---- /dev/null
-+++ b/pkg/chunkenc/interface_test.go
-@@ -0,0 +1,26 @@
-+package chunkenc
-+
-+import ""testing""
-+
-+func TestParseEncoding(t *testing.T) {
-+ tests := []struct {
-+ enc string
-+ want Encoding
-+ wantErr bool
-+ }{
-+ {""gzip"", EncGZIP, false},
-+ {""bad"", 0, true},
-+ }
-+ for _, tt := range tests {
-+ t.Run(tt.enc, func(t *testing.T) {
-+ got, err := ParseEncoding(tt.enc)
-+ if (err != nil) != tt.wantErr {
-+ t.Errorf(""ParseEncoding() error = %v, wantErr %v"", err, tt.wantErr)
-+ return
-+ }
-+ if got != tt.want {
-+ t.Errorf(""ParseEncoding() = %v, want %v"", got, tt.want)
-+ }
-+ })
-+ }
-+}
-diff --git a/pkg/chunkenc/gzip.go b/pkg/chunkenc/memchunk.go
-similarity index 85%
-rename from pkg/chunkenc/gzip.go
-rename to pkg/chunkenc/memchunk.go
-index 2132205c523ff..cbf10b577868c 100644
---- a/pkg/chunkenc/gzip.go
-+++ b/pkg/chunkenc/memchunk.go
-@@ -23,6 +23,7 @@ var (
- magicNumber = uint32(0x12EE56A)
-
- chunkFormatV1 = byte(1)
-+ chunkFormatV2 = byte(2)
- )
-
- // The table gets initialized with sync.Once but may still cause a race
-@@ -55,8 +56,12 @@ type MemChunk struct {
- // Current in-mem block being appended to.
- head *headBlock
-
-+ // the chunk format default to v2
-+ format byte
- encoding Encoding
-- cPool CompressionPool
-+
-+ readers ReaderPool
-+ writers WriterPool
- }
-
- type block struct {
-@@ -99,32 +104,34 @@ func (hb *headBlock) append(ts int64, line string) error {
- return nil
- }
-
--func (hb *headBlock) serialise(pool CompressionPool) ([]byte, error) {
-- buf := &bytes.Buffer{}
-+func (hb *headBlock) serialise(pool WriterPool) ([]byte, error) {
-+ inBuf := serializeBytesBufferPool.Get().(*bytes.Buffer)
-+ outBuf := &bytes.Buffer{}
-+
- encBuf := make([]byte, binary.MaxVarintLen64)
-- compressedWriter := pool.GetWriter(buf)
-+ compressedWriter := pool.GetWriter(outBuf)
- for _, logEntry := range hb.entries {
- n := binary.PutVarint(encBuf, logEntry.t)
-- _, err := compressedWriter.Write(encBuf[:n])
-- if err != nil {
-- return nil, errors.Wrap(err, ""appending entry"")
-- }
-+ inBuf.Write(encBuf[:n])
-
- n = binary.PutUvarint(encBuf, uint64(len(logEntry.s)))
-- _, err = compressedWriter.Write(encBuf[:n])
-- if err != nil {
-- return nil, errors.Wrap(err, ""appending entry"")
-- }
-- _, err = compressedWriter.Write([]byte(logEntry.s))
-- if err != nil {
-- return nil, errors.Wrap(err, ""appending entry"")
-- }
-+ inBuf.Write(encBuf[:n])
-+
-+ inBuf.WriteString(logEntry.s)
-+ }
-+
-+ if _, err := compressedWriter.Write(inBuf.Bytes()); err != nil {
-+ return nil, errors.Wrap(err, ""appending entry"")
- }
- if err := compressedWriter.Close(); err != nil {
- return nil, errors.Wrap(err, ""flushing pending compress buffer"")
- }
-+
-+ inBuf.Reset()
-+ serializeBytesBufferPool.Put(inBuf)
-+
- pool.PutWriter(compressedWriter)
-- return buf.Bytes(), nil
-+ return outBuf.Bytes(), nil
- }
-
- type entry struct {
-@@ -132,6 +139,11 @@ type entry struct {
- s string
- }
-
-+// NewMemChunk returns a new in-mem chunk for query.
-+func NewMemChunk(enc Encoding) *MemChunk {
-+ return NewMemChunkSize(enc, 256*1024, 0)
-+}
-+
- // NewMemChunkSize returns a new in-mem chunk.
- // Mainly for config push size.
- func NewMemChunkSize(enc Encoding, blockSize, targetSize int) *MemChunk {
-@@ -140,34 +152,22 @@ func NewMemChunkSize(enc Encoding, blockSize, targetSize int) *MemChunk {
- targetSize: targetSize, // Desired chunk size in compressed bytes
- blocks: []block{},
-
-- head: &headBlock{},
-+ head: &headBlock{},
-+ format: chunkFormatV2,
-
- encoding: enc,
-- }
--
-- switch enc {
-- case EncGZIP:
-- c.cPool = &Gzip
-- default:
-- panic(""unknown encoding"")
-+ writers: getWriterPool(enc),
-+ readers: getReaderPool(enc),
- }
-
- return c
- }
-
--// NewMemChunk returns a new in-mem chunk for query.
--func NewMemChunk(enc Encoding) *MemChunk {
-- return NewMemChunkSize(enc, 256*1024, 0)
--}
--
- // NewByteChunk returns a MemChunk on the passed bytes.
- func NewByteChunk(b []byte) (*MemChunk, error) {
- bc := &MemChunk{
-- cPool: &Gzip,
-- encoding: EncGZIP,
-- head: &headBlock{}, // Dummy, empty headblock.
-+ head: &headBlock{}, // Dummy, empty headblock.
- }
--
- db := decbuf{b: b}
-
- // Verify the header.
-@@ -178,7 +178,18 @@ func NewByteChunk(b []byte) (*MemChunk, error) {
- if m != magicNumber {
- return nil, errors.Errorf(""invalid magic number %x"", m)
- }
-- if version != 1 {
-+ bc.format = version
-+ switch version {
-+ case chunkFormatV1:
-+ bc.readers, bc.writers = &Gzip, &Gzip
-+ case chunkFormatV2:
-+ // format v2 has a byte for block encoding.
-+ enc := Encoding(db.byte())
-+ if db.err() != nil {
-+ return nil, errors.Wrap(db.err(), ""verifying encoding"")
-+ }
-+ bc.readers, bc.writers = getReaderPool(enc), getWriterPool(enc)
-+ default:
- return nil, errors.Errorf(""invalid version %d"", version)
- }
-
-@@ -242,7 +253,11 @@ func (c *MemChunk) Bytes() ([]byte, error) {
-
- // Write the header (magicNum + version).
- eb.putBE32(magicNumber)
-- eb.putByte(chunkFormatV1)
-+ eb.putByte(c.format)
-+ if c.format == chunkFormatV2 {
-+ // chunk format v2 has a byte for encoding.
-+ eb.putByte(byte(c.encoding))
-+ }
-
- n, err := buf.Write(eb.get())
- if err != nil {
-@@ -401,7 +416,7 @@ func (c *MemChunk) cut() error {
- return nil
- }
-
-- b, err := c.head.serialise(c.cPool)
-+ b, err := c.head.serialise(c.writers)
- if err != nil {
- return err
- }
-@@ -451,7 +466,7 @@ func (c *MemChunk) Iterator(mintT, maxtT time.Time, direction logproto.Direction
-
- for _, b := range c.blocks {
- if maxt > b.mint && b.maxt > mint {
-- its = append(its, b.iterator(c.cPool, filter))
-+ its = append(its, b.iterator(c.readers, filter))
- }
- }
-
-@@ -472,7 +487,7 @@ func (c *MemChunk) Iterator(mintT, maxtT time.Time, direction logproto.Direction
- return iter.NewEntryIteratorBackward(iterForward)
- }
-
--func (b block) iterator(pool CompressionPool, filter logql.Filter) iter.EntryIterator {
-+func (b block) iterator(pool ReaderPool, filter logql.Filter) iter.EntryIterator {
- if len(b.b) == 0 {
- return emptyIterator
- }
-@@ -537,9 +552,11 @@ func (li *listIterator) Close() error { return nil }
- func (li *listIterator) Labels() string { return """" }
-
- type bufferedIterator struct {
-- s *bufio.Reader
-- reader CompressionReader
-- pool CompressionPool
-+ origBytes []byte
-+
-+ bufReader *bufio.Reader
-+ reader io.Reader
-+ pool ReaderPool
-
- cur logproto.Entry
-
-@@ -553,18 +570,24 @@ type bufferedIterator struct {
- filter logql.Filter
- }
-
--func newBufferedIterator(pool CompressionPool, b []byte, filter logql.Filter) *bufferedIterator {
-- r := pool.GetReader(bytes.NewBuffer(b))
-+func newBufferedIterator(pool ReaderPool, b []byte, filter logql.Filter) *bufferedIterator {
- return &bufferedIterator{
-- s: BufReaderPool.Get(r),
-- reader: r,
-- pool: pool,
-- filter: filter,
-- decBuf: make([]byte, binary.MaxVarintLen64),
-+ origBytes: b,
-+ reader: nil, // will be initialized later
-+ bufReader: nil, // will be initialized later
-+ pool: pool,
-+ filter: filter,
-+ decBuf: make([]byte, binary.MaxVarintLen64),
- }
- }
-
- func (si *bufferedIterator) Next() bool {
-+ if !si.closed && si.reader == nil {
-+ // initialize reader now, hopefully reusing one of the previous readers
-+ si.reader = si.pool.GetReader(bytes.NewBuffer(si.origBytes))
-+ si.bufReader = BufReaderPool.Get(si.reader)
-+ }
-+
- for {
- ts, line, ok := si.moveNext()
- if !ok {
-@@ -582,7 +605,7 @@ func (si *bufferedIterator) Next() bool {
-
- // moveNext moves the buffer to the next entry
- func (si *bufferedIterator) moveNext() (int64, []byte, bool) {
-- ts, err := binary.ReadVarint(si.s)
-+ ts, err := binary.ReadVarint(si.bufReader)
- if err != nil {
- if err != io.EOF {
- si.err = err
-@@ -590,7 +613,7 @@ func (si *bufferedIterator) moveNext() (int64, []byte, bool) {
- return 0, nil, false
- }
-
-- l, err := binary.ReadUvarint(si.s)
-+ l, err := binary.ReadUvarint(si.bufReader)
- if err != nil {
- if err != io.EOF {
- si.err = err
-@@ -612,13 +635,13 @@ func (si *bufferedIterator) moveNext() (int64, []byte, bool) {
- }
-
- // Then process reading the line.
-- n, err := si.s.Read(si.buf[:lineSize])
-+ n, err := si.bufReader.Read(si.buf[:lineSize])
- if err != nil && err != io.EOF {
- si.err = err
- return 0, nil, false
- }
- for n < lineSize {
-- r, err := si.s.Read(si.buf[n:lineSize])
-+ r, err := si.bufReader.Read(si.buf[n:lineSize])
- if err != nil {
- si.err = err
- return 0, nil, false
-@@ -638,11 +661,12 @@ func (si *bufferedIterator) Close() error {
- if !si.closed {
- si.closed = true
- si.pool.PutReader(si.reader)
-- BufReaderPool.Put(si.s)
-+ BufReaderPool.Put(si.bufReader)
- if si.buf != nil {
- BytesBufferPool.Put(si.buf)
- }
-- si.s = nil
-+ si.origBytes = nil
-+ si.bufReader = nil
- si.buf = nil
- si.decBuf = nil
- si.reader = nil
-diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go
-new file mode 100644
-index 0000000000000..433d478a5d070
---- /dev/null
-+++ b/pkg/chunkenc/memchunk_test.go
-@@ -0,0 +1,418 @@
-+package chunkenc
-+
-+import (
-+ ""bytes""
-+ ""fmt""
-+ ""math""
-+ ""math/rand""
-+ ""testing""
-+ ""time""
-+
-+ ""github.com/stretchr/testify/assert""
-+
-+ ""github.com/dustin/go-humanize""
-+ ""github.com/grafana/loki/pkg/chunkenc/testdata""
-+ ""github.com/grafana/loki/pkg/logproto""
-+ ""github.com/stretchr/testify/require""
-+)
-+
-+var testEncoding = []Encoding{
-+ EncNone,
-+ EncGZIP,
-+ EncLZ4,
-+ EncSnappy,
-+}
-+
-+func TestBlock(t *testing.T) {
-+ for _, enc := range testEncoding {
-+ t.Run(enc.String(), func(t *testing.T) {
-+ chk := NewMemChunk(enc)
-+ cases := []struct {
-+ ts int64
-+ str string
-+ cut bool
-+ }{
-+ {
-+ ts: 1,
-+ str: ""hello, world!"",
-+ },
-+ {
-+ ts: 2,
-+ str: ""hello, world2!"",
-+ },
-+ {
-+ ts: 3,
-+ str: ""hello, world3!"",
-+ },
-+ {
-+ ts: 4,
-+ str: ""hello, world4!"",
-+ },
-+ {
-+ ts: 5,
-+ str: ""hello, world5!"",
-+ },
-+ {
-+ ts: 6,
-+ str: ""hello, world6!"",
-+ cut: true,
-+ },
-+ {
-+ ts: 7,
-+ str: ""hello, world7!"",
-+ },
-+ {
-+ ts: 8,
-+ str: ""hello, worl\nd8!"",
-+ },
-+ {
-+ ts: 8,
-+ str: ""hello, world 8, 2!"",
-+ },
-+ {
-+ ts: 8,
-+ str: ""hello, world 8, 3!"",
-+ },
-+ {
-+ ts: 9,
-+ str: """",
-+ },
-+ }
-+
-+ for _, c := range cases {
-+ require.NoError(t, chk.Append(logprotoEntry(c.ts, c.str)))
-+ if c.cut {
-+ require.NoError(t, chk.cut())
-+ }
-+ }
-+
-+ it, err := chk.Iterator(time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, nil)
-+ require.NoError(t, err)
-+
-+ idx := 0
-+ for it.Next() {
-+ e := it.Entry()
-+ require.Equal(t, cases[idx].ts, e.Timestamp.UnixNano())
-+ require.Equal(t, cases[idx].str, e.Line)
-+ idx++
-+ }
-+
-+ require.NoError(t, it.Error())
-+ require.Equal(t, len(cases), idx)
-+
-+ t.Run(""bounded-iteration"", func(t *testing.T) {
-+ it, err := chk.Iterator(time.Unix(0, 3), time.Unix(0, 7), logproto.FORWARD, nil)
-+ require.NoError(t, err)
-+
-+ idx := 2
-+ for it.Next() {
-+ e := it.Entry()
-+ require.Equal(t, cases[idx].ts, e.Timestamp.UnixNano())
-+ require.Equal(t, cases[idx].str, e.Line)
-+ idx++
-+ }
-+ require.NoError(t, it.Error())
-+ require.Equal(t, 6, idx)
-+ })
-+ })
-+ }
-+}
-+
-+func TestReadFormatV1(t *testing.T) {
-+ c := NewMemChunk(EncGZIP)
-+ fillChunk(c)
-+ // overrides default v2 format
-+ c.format = chunkFormatV1
-+
-+ b, err := c.Bytes()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ r, err := NewByteChunk(b)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ it, err := r.Iterator(time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, nil)
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+
-+ i := int64(0)
-+ for it.Next() {
-+ require.Equal(t, i, it.Entry().Timestamp.UnixNano())
-+ require.Equal(t, testdata.LogString(i), it.Entry().Line)
-+
-+ i++
-+ }
-+}
-+
-+func TestSerialization(t *testing.T) {
-+ for _, enc := range testEncoding {
-+ t.Run(enc.String(), func(t *testing.T) {
-+ chk := NewMemChunk(enc)
-+
-+ numSamples := 500000
-+
-+ for i := 0; i < numSamples; i++ {
-+ require.NoError(t, chk.Append(logprotoEntry(int64(i), string(i))))
-+ }
-+
-+ byt, err := chk.Bytes()
-+ require.NoError(t, err)
-+
-+ bc, err := NewByteChunk(byt)
-+ require.NoError(t, err)
-+
-+ it, err := bc.Iterator(time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, nil)
-+ require.NoError(t, err)
-+ for i := 0; i < numSamples; i++ {
-+ require.True(t, it.Next())
-+
-+ e := it.Entry()
-+ require.Equal(t, int64(i), e.Timestamp.UnixNano())
-+ require.Equal(t, string(i), e.Line)
-+ }
-+
-+ require.NoError(t, it.Error())
-+
-+ byt2, err := chk.Bytes()
-+ require.NoError(t, err)
-+
-+ require.True(t, bytes.Equal(byt, byt2))
-+ })
-+ }
-+}
-+
-+func TestChunkFilling(t *testing.T) {
-+ for _, enc := range testEncoding {
-+ t.Run(enc.String(), func(t *testing.T) {
-+ chk := NewMemChunk(enc)
-+ chk.blockSize = 1024
-+
-+ // We should be able to append only 10KB of logs.
-+ maxBytes := chk.blockSize * blocksPerChunk
-+ lineSize := 512
-+ lines := maxBytes / lineSize
-+
-+ logLine := string(make([]byte, lineSize))
-+ entry := &logproto.Entry{
-+ Timestamp: time.Unix(0, 0),
-+ Line: logLine,
-+ }
-+
-+ i := int64(0)
-+ for ; chk.SpaceFor(entry) && i < 30; i++ {
-+ entry.Timestamp = time.Unix(0, i)
-+ require.NoError(t, chk.Append(entry))
-+ }
-+
-+ require.Equal(t, int64(lines), i)
-+
-+ it, err := chk.Iterator(time.Unix(0, 0), time.Unix(0, 100), logproto.FORWARD, nil)
-+ require.NoError(t, err)
-+ i = 0
-+ for it.Next() {
-+ entry := it.Entry()
-+ require.Equal(t, i, entry.Timestamp.UnixNano())
-+ i++
-+ }
-+
-+ require.Equal(t, int64(lines), i)
-+ })
-+ }
-+}
-+
-+func TestGZIPChunkTargetSize(t *testing.T) {
-+ targetSize := 1024 * 1024
-+ chk := NewMemChunkSize(EncGZIP, 1024, targetSize)
-+
-+ lineSize := 512
-+ entry := &logproto.Entry{
-+ Timestamp: time.Unix(0, 0),
-+ Line: """",
-+ }
-+
-+ // Use a random number to generate random log data, otherwise the gzip compression is way too good
-+ // and the following loop has to run waaayyyyy to many times
-+ // Using the same seed should guarantee the same random numbers and same test data.
-+ r := rand.New(rand.NewSource(99))
-+
-+ i := int64(0)
-+
-+ for ; chk.SpaceFor(entry) && i < 5000; i++ {
-+ logLine := make([]byte, lineSize)
-+ for j := range logLine {
-+ logLine[j] = byte(r.Int())
-+ }
-+ entry = &logproto.Entry{
-+ Timestamp: time.Unix(0, 0),
-+ Line: string(logLine),
-+ }
-+ entry.Timestamp = time.Unix(0, i)
-+ require.NoError(t, chk.Append(entry))
-+ }
-+
-+ // 5000 is a limit ot make sure the test doesn't run away, we shouldn't need this many log lines to make 1MB chunk
-+ require.NotEqual(t, 5000, i)
-+
-+ require.NoError(t, chk.Close())
-+
-+ require.Equal(t, 0, chk.head.size)
-+
-+ // Even though the seed is static above and results should be deterministic,
-+ // we will allow +/- 10% variance
-+ minSize := int(float64(targetSize) * 0.9)
-+ maxSize := int(float64(targetSize) * 1.1)
-+ require.Greater(t, chk.CompressedSize(), minSize)
-+ require.Less(t, chk.CompressedSize(), maxSize)
-+
-+ // Also verify our utilization is close to 1.0
-+ ut := chk.Utilization()
-+ require.Greater(t, ut, 0.99)
-+ require.Less(t, ut, 1.01)
-+
-+}
-+
-+func TestMemChunk_AppendOutOfOrder(t *testing.T) {
-+ t.Parallel()
-+
-+ type tester func(t *testing.T, chk *MemChunk)
-+
-+ tests := map[string]tester{
-+ ""append out of order in the same block"": func(t *testing.T, chk *MemChunk) {
-+ assert.NoError(t, chk.Append(logprotoEntry(5, ""test"")))
-+ assert.NoError(t, chk.Append(logprotoEntry(6, ""test"")))
-+
-+ assert.EqualError(t, chk.Append(logprotoEntry(1, ""test"")), ErrOutOfOrder.Error())
-+ },
-+ ""append out of order in a new block right after cutting the previous one"": func(t *testing.T, chk *MemChunk) {
-+ assert.NoError(t, chk.Append(logprotoEntry(5, ""test"")))
-+ assert.NoError(t, chk.Append(logprotoEntry(6, ""test"")))
-+ assert.NoError(t, chk.cut())
-+
-+ assert.EqualError(t, chk.Append(logprotoEntry(1, ""test"")), ErrOutOfOrder.Error())
-+ },
-+ ""append out of order in a new block after multiple cuts"": func(t *testing.T, chk *MemChunk) {
-+ assert.NoError(t, chk.Append(logprotoEntry(5, ""test"")))
-+ assert.NoError(t, chk.cut())
-+
-+ assert.NoError(t, chk.Append(logprotoEntry(6, ""test"")))
-+ assert.NoError(t, chk.cut())
-+
-+ assert.EqualError(t, chk.Append(logprotoEntry(1, ""test"")), ErrOutOfOrder.Error())
-+ },
-+ }
-+
-+ for testName, tester := range tests {
-+ tester := tester
-+
-+ t.Run(testName, func(t *testing.T) {
-+ t.Parallel()
-+
-+ tester(t, NewMemChunk(EncGZIP))
-+ })
-+ }
-+}
-+
-+func TestChunkSize(t *testing.T) {
-+ for _, enc := range testEncoding {
-+ t.Run(enc.String(), func(t *testing.T) {
-+ c := NewMemChunk(enc)
-+ inserted := fillChunk(c)
-+ b, err := c.Bytes()
-+ if err != nil {
-+ t.Fatal(err)
-+ }
-+ t.Log(""Chunk size"", humanize.Bytes(uint64(len(b))))
-+ t.Log(""characters "", inserted)
-+ })
-+
-+ }
-+}
-+
-+var result []Chunk
-+
-+func BenchmarkWrite(b *testing.B) {
-+ chunks := []Chunk{}
-+
-+ entry := &logproto.Entry{
-+ Timestamp: time.Unix(0, 0),
-+ Line: testdata.LogString(0),
-+ }
-+ i := int64(0)
-+
-+ for _, enc := range testEncoding {
-+ b.Run(enc.String(), func(b *testing.B) {
-+ for n := 0; n < b.N; n++ {
-+ c := NewMemChunk(enc)
-+ // adds until full so we trigger cut which serialize using gzip
-+ for c.SpaceFor(entry) {
-+ _ = c.Append(entry)
-+ entry.Timestamp = time.Unix(0, i)
-+ entry.Line = testdata.LogString(i)
-+ i++
-+ }
-+ chunks = append(chunks, c)
-+ }
-+ result = chunks
-+ })
-+ }
-+
-+}
-+
-+func BenchmarkRead(b *testing.B) {
-+ for _, enc := range testEncoding {
-+ b.Run(enc.String(), func(b *testing.B) {
-+ chunks := generateData(enc)
-+ b.ResetTimer()
-+ bytesRead := int64(0)
-+ now := time.Now()
-+ for n := 0; n < b.N; n++ {
-+ for _, c := range chunks {
-+ // use forward iterator for benchmark -- backward iterator does extra allocations by keeping entries in memory
-+ iterator, err := c.Iterator(time.Unix(0, 0), time.Now(), logproto.FORWARD, nil)
-+ if err != nil {
-+ panic(err)
-+ }
-+ for iterator.Next() {
-+ e := iterator.Entry()
-+ bytesRead += int64(len(e.Line))
-+ }
-+ if err := iterator.Close(); err != nil {
-+ b.Fatal(err)
-+ }
-+ }
-+ }
-+ b.Log(""bytes per second "", humanize.Bytes(uint64(float64(bytesRead)/time.Since(now).Seconds())))
-+ b.Log(""n="", b.N)
-+ })
-+ }
-+}
-+
-+func BenchmarkHeadBlockIterator(b *testing.B) {
-+
-+ for _, j := range []int{100000, 50000, 15000, 10000} {
-+ b.Run(fmt.Sprintf(""Size %d"", j), func(b *testing.B) {
-+
-+ h := headBlock{}
-+
-+ for i := 0; i < j; i++ {
-+ if err := h.append(int64(i), ""this is the append string""); err != nil {
-+ b.Fatal(err)
-+ }
-+ }
-+
-+ b.ResetTimer()
-+
-+ for n := 0; n < b.N; n++ {
-+ iter := h.iterator(0, math.MaxInt64, nil)
-+
-+ for iter.Next() {
-+ _ = iter.Entry()
-+ }
-+ }
-+ })
-+ }
-+}
-diff --git a/pkg/chunkenc/pool.go b/pkg/chunkenc/pool.go
-index 12a9e9df61904..51461d44fcbfe 100644
---- a/pkg/chunkenc/pool.go
-+++ b/pkg/chunkenc/pool.go
-@@ -2,25 +2,39 @@ package chunkenc
-
- import (
- ""bufio""
-+ ""bytes""
- ""io""
- ""sync""
-
-+ ""github.com/golang/snappy""
- ""github.com/klauspost/compress/gzip""
-+ ""github.com/pierrec/lz4""
- ""github.com/prometheus/prometheus/pkg/pool""
- )
-
--// CompressionPool is a pool of CompressionWriter and CompressionReader
-+// WriterPool is a pool of io.Writer
- // This is used by every chunk to avoid unnecessary allocations.
--type CompressionPool interface {
-- GetWriter(io.Writer) CompressionWriter
-- PutWriter(CompressionWriter)
-- GetReader(io.Reader) CompressionReader
-- PutReader(CompressionReader)
-+type WriterPool interface {
-+ GetWriter(io.Writer) io.WriteCloser
-+ PutWriter(io.WriteCloser)
-+}
-+
-+// ReaderPool similar to WriterPool but for reading chunks.
-+type ReaderPool interface {
-+ GetReader(io.Reader) io.Reader
-+ PutReader(io.Reader)
- }
-
- var (
- // Gzip is the gun zip compression pool
-- Gzip GzipPool
-+ Gzip = GzipPool{level: gzip.DefaultCompression}
-+ // LZ4 is the l4z compression pool
-+ LZ4 LZ4Pool
-+ // Snappy is the snappy compression pool
-+ Snappy SnappyPool
-+ // Noop is the no compression pool
-+ Noop NoopPool
-+
- // BufReaderPool is bufio.Reader pool
- BufReaderPool = &BufioReaderPool{
- pool: sync.Pool{
-@@ -29,54 +43,180 @@ var (
- }
- // BytesBufferPool is a bytes buffer used for lines decompressed.
- // Buckets [0.5KB,1KB,2KB,4KB,8KB]
-- BytesBufferPool = pool.New(1<<9, 1<<13, 2, func(size int) interface{} { return make([]byte, 0, size) })
-+ BytesBufferPool = pool.New(1<<9, 1<<13, 2, func(size int) interface{} { return make([]byte, 0, size) })
-+ serializeBytesBufferPool = sync.Pool{
-+ New: func() interface{} {
-+ return &bytes.Buffer{}
-+ },
-+ }
- )
-
-+func getWriterPool(enc Encoding) WriterPool {
-+ return getReaderPool(enc).(WriterPool)
-+}
-+
-+func getReaderPool(enc Encoding) ReaderPool {
-+ switch enc {
-+ case EncGZIP:
-+ return &Gzip
-+ case EncLZ4:
-+ return &LZ4
-+ case EncSnappy:
-+ return &Snappy
-+ case EncNone:
-+ return &Noop
-+ default:
-+ panic(""unknown encoding"")
-+ }
-+}
-+
- // GzipPool is a gun zip compression pool
- type GzipPool struct {
- readers sync.Pool
- writers sync.Pool
-+ level int
- }
-
- // GetReader gets or creates a new CompressionReader and reset it to read from src
--func (pool *GzipPool) GetReader(src io.Reader) (reader CompressionReader) {
-+func (pool *GzipPool) GetReader(src io.Reader) io.Reader {
- if r := pool.readers.Get(); r != nil {
-- reader = r.(CompressionReader)
-+ reader := r.(*gzip.Reader)
- err := reader.Reset(src)
- if err != nil {
- panic(err)
- }
-- } else {
-- var err error
-- reader, err = gzip.NewReader(src)
-- if err != nil {
-- panic(err)
-- }
-+ return reader
-+ }
-+ reader, err := gzip.NewReader(src)
-+ if err != nil {
-+ panic(err)
- }
- return reader
- }
-
- // PutReader places back in the pool a CompressionReader
--func (pool *GzipPool) PutReader(reader CompressionReader) {
-+func (pool *GzipPool) PutReader(reader io.Reader) {
- pool.readers.Put(reader)
- }
-
- // GetWriter gets or creates a new CompressionWriter and reset it to write to dst
--func (pool *GzipPool) GetWriter(dst io.Writer) (writer CompressionWriter) {
-+func (pool *GzipPool) GetWriter(dst io.Writer) io.WriteCloser {
- if w := pool.writers.Get(); w != nil {
-- writer = w.(CompressionWriter)
-+ writer := w.(*gzip.Writer)
- writer.Reset(dst)
-- } else {
-- writer = gzip.NewWriter(dst)
-+ return writer
-+ }
-+
-+ level := pool.level
-+ if level == 0 {
-+ level = gzip.DefaultCompression
-+ }
-+ w, err := gzip.NewWriterLevel(dst, level)
-+ if err != nil {
-+ panic(err) // never happens, error is only returned on wrong compression level.
- }
-- return writer
-+ return w
- }
-
- // PutWriter places back in the pool a CompressionWriter
--func (pool *GzipPool) PutWriter(writer CompressionWriter) {
-+func (pool *GzipPool) PutWriter(writer io.WriteCloser) {
- pool.writers.Put(writer)
- }
-
-+type LZ4Pool struct {
-+ readers sync.Pool
-+ writers sync.Pool
-+}
-+
-+// GetReader gets or creates a new CompressionReader and reset it to read from src
-+func (pool *LZ4Pool) GetReader(src io.Reader) io.Reader {
-+ if r := pool.readers.Get(); r != nil {
-+ reader := r.(*lz4.Reader)
-+ reader.Reset(src)
-+ return reader
-+ }
-+ return lz4.NewReader(src)
-+}
-+
-+// PutReader places back in the pool a CompressionReader
-+func (pool *LZ4Pool) PutReader(reader io.Reader) {
-+ pool.readers.Put(reader)
-+}
-+
-+// GetWriter gets or creates a new CompressionWriter and reset it to write to dst
-+func (pool *LZ4Pool) GetWriter(dst io.Writer) io.WriteCloser {
-+ if w := pool.writers.Get(); w != nil {
-+ writer := w.(*lz4.Writer)
-+ writer.Reset(dst)
-+ return writer
-+ }
-+ return lz4.NewWriter(dst)
-+}
-+
-+// PutWriter places back in the pool a CompressionWriter
-+func (pool *LZ4Pool) PutWriter(writer io.WriteCloser) {
-+ pool.writers.Put(writer)
-+}
-+
-+type SnappyPool struct {
-+ readers sync.Pool
-+ writers sync.Pool
-+}
-+
-+// GetReader gets or creates a new CompressionReader and reset it to read from src
-+func (pool *SnappyPool) GetReader(src io.Reader) io.Reader {
-+ if r := pool.readers.Get(); r != nil {
-+ reader := r.(*snappy.Reader)
-+ reader.Reset(src)
-+ return reader
-+ }
-+ return snappy.NewReader(src)
-+}
-+
-+// PutReader places back in the pool a CompressionReader
-+func (pool *SnappyPool) PutReader(reader io.Reader) {
-+ pool.readers.Put(reader)
-+}
-+
-+// GetWriter gets or creates a new CompressionWriter and reset it to write to dst
-+func (pool *SnappyPool) GetWriter(dst io.Writer) io.WriteCloser {
-+ if w := pool.writers.Get(); w != nil {
-+ writer := w.(*snappy.Writer)
-+ writer.Reset(dst)
-+ return writer
-+ }
-+ return snappy.NewBufferedWriter(dst)
-+}
-+
-+// PutWriter places back in the pool a CompressionWriter
-+func (pool *SnappyPool) PutWriter(writer io.WriteCloser) {
-+ pool.writers.Put(writer)
-+}
-+
-+type NoopPool struct{}
-+
-+// GetReader gets or creates a new CompressionReader and reset it to read from src
-+func (pool *NoopPool) GetReader(src io.Reader) io.Reader {
-+ return src
-+}
-+
-+// PutReader places back in the pool a CompressionReader
-+func (pool *NoopPool) PutReader(reader io.Reader) {}
-+
-+type noopCloser struct {
-+ io.Writer
-+}
-+
-+func (noopCloser) Close() error { return nil }
-+
-+// GetWriter gets or creates a new CompressionWriter and reset it to write to dst
-+func (pool *NoopPool) GetWriter(dst io.Writer) io.WriteCloser {
-+ return noopCloser{dst}
-+}
-+
-+// PutWriter places back in the pool a CompressionWriter
-+func (pool *NoopPool) PutWriter(writer io.WriteCloser) {}
-+
- // BufioReaderPool is a bufio reader that uses sync.Pool.
- type BufioReaderPool struct {
- pool sync.Pool
-diff --git a/pkg/chunkenc/testdata/testdata.go b/pkg/chunkenc/testdata/testdata.go
-new file mode 100644
-index 0000000000000..265ca4d64052a
---- /dev/null
-+++ b/pkg/chunkenc/testdata/testdata.go
-@@ -0,0 +1,1013 @@
-+package testdata
-+
-+import ""strings""
-+
-+// LogString returns a test log line. Returns the same line for the same index.
-+func LogString(index int64) string {
-+ if index > int64(len(logs)-1) {
-+ index = index % int64(len(logs))
-+ }
-+ return logs[index]
-+}
-+
-+var logs = strings.Split(`level=info ts=2019-12-12T15:00:08.325Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576130400000 maxt=1576152000000 ulid=01DVX9ZHNM71GRCJS7M34Q0EV7 sources=""[01DVWNC6NWY1A60AZV3Z6DGS65 01DVWW7XXX75GHA6ZDTD170CSZ 01DVX33N5W86CWJJVRPAVXJRWJ]"" duration=2.897213221s
-+level=info ts=2019-12-12T15:00:08.296Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576130400000 maxt=1576152000000 ulid=01DVX9ZHQRVN42AF196NYJ9C4C sources=""[01DVWNC6NSPJRCSBZ4QD3SXS66 01DVWW7XY69Y4YT09HR0RSR8KY 01DVX33N5SMVPB1TMD9J1M8GGK]"" duration=2.800759388s
-+level=info ts=2019-12-12T15:00:05.285Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1037 last=1039 duration=3.030078405s
-+level=info ts=2019-12-12T15:00:05.225Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1037 last=1039 duration=3.019791992s
-+level=info ts=2019-12-12T15:00:02.255Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=125.980176ms
-+level=info ts=2019-12-12T15:00:02.206Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=127.111334ms
-+level=info ts=2019-12-12T15:00:01.874Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576152000000 maxt=1576159200000 ulid=01DVX9ZCE8WZCTQJWSYDGHVQV8 duration=1.801853505s
-+level=info ts=2019-12-12T15:00:01.854Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576152000000 maxt=1576159200000 ulid=01DVX9ZCDWEBXRYWA7585TN2RV duration=1.794588392s
-+level=info ts=2019-12-12T13:00:05.461Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1034 last=1036 duration=3.044019343s
-+level=info ts=2019-12-12T13:00:05.332Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1034 last=1036 duration=3.040243488s
-+level=info ts=2019-12-12T13:00:02.417Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=128.883109ms
-+level=info ts=2019-12-12T13:00:02.291Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=126.278558ms
-+level=info ts=2019-12-12T13:00:02.048Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576144800000 maxt=1576152000000 ulid=01DVX33N5W86CWJJVRPAVXJRWJ duration=1.987867109s
-+level=info ts=2019-12-12T13:00:01.914Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576144800000 maxt=1576152000000 ulid=01DVX33N5SMVPB1TMD9J1M8GGK duration=1.856432758s
-+level=info ts=2019-12-12T12:58:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-12T12:52:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-12T11:00:05.320Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1031 last=1033 duration=2.999621843s
-+level=info ts=2019-12-12T11:00:05.315Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1031 last=1033 duration=2.962560692s
-+level=info ts=2019-12-12T11:00:02.352Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=131.600701ms
-+level=info ts=2019-12-12T11:00:02.321Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=134.547131ms
-+level=info ts=2019-12-12T11:00:01.975Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576137600000 maxt=1576144800000 ulid=01DVWW7XY69Y4YT09HR0RSR8KY duration=1.905948839s
-+level=info ts=2019-12-12T11:00:01.889Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576137600000 maxt=1576144800000 ulid=01DVWW7XXX75GHA6ZDTD170CSZ duration=1.828298188s
-+level=info ts=2019-12-12T10:55:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-12T10:49:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-12T10:33:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-12T10:25:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-12T10:21:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-12T10:14:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-12T09:00:16.465Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576065600000 maxt=1576130400000 ulid=01DVWNCFJRNW4RP8C56D4QNRXH sources=""[01DVVC60FYTRXZ9457XT10Y7AH 01DVW0S6A5HFTYBYD34SGAZJSR 01DVWNCC9SYJDQP0Y2RXK8XJC9]"" duration=7.289011992s
-+level=info ts=2019-12-12T09:00:15.812Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576065600000 maxt=1576130400000 ulid=01DVWNCF9JNSMSKZHW8STXQARA sources=""[01DVVC60DBGMXD5DXR6Y5XWNXF 01DVW0S67R7JFBFTFWMNVS8YR3 01DVWNCC599NDRZWRRSZF4XGHF]"" duration=6.930550254s
-+level=info ts=2019-12-12T09:00:08.717Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576108800000 maxt=1576130400000 ulid=01DVWNCC9SYJDQP0Y2RXK8XJC9 sources=""[01DVW0S0XW63CVRA3EPRSC8NWQ 01DVW7MR5W18322RVFY6WM9GR2 01DVWEGFDW0C09KSCRQ2F8DGN3]"" duration=2.900180235s
-+level=info ts=2019-12-12T09:00:08.440Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576108800000 maxt=1576130400000 ulid=01DVWNCC599NDRZWRRSZF4XGHF sources=""[01DVW0S0XS1SQQQK3CQYCHN9HV 01DVW7MR5ZN3K38ZHBJ243HDZJ 01DVWEGFE0DGKKDG4V9AGAPPBQ]"" duration=2.767053211s
-+level=info ts=2019-12-12T09:00:05.604Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1028 last=1030 duration=2.998418095s
-+level=info ts=2019-12-12T09:00:05.470Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1028 last=1030 duration=3.008684806s
-+level=info ts=2019-12-12T09:00:02.606Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=126.82085ms
-+level=info ts=2019-12-12T09:00:02.461Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=127.770206ms
-+level=info ts=2019-12-12T09:00:01.995Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576130400000 maxt=1576137600000 ulid=01DVWNC6NWY1A60AZV3Z6DGS65 duration=1.934602237s
-+level=info ts=2019-12-12T09:00:01.960Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576130400000 maxt=1576137600000 ulid=01DVWNC6NSPJRCSBZ4QD3SXS66 duration=1.902822647s
-+level=info ts=2019-12-12T08:59:54.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-12T08:54:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-12T08:12:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-12T08:05:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-12T07:00:05.421Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1025 last=1027 duration=3.037037204s
-+level=info ts=2019-12-12T07:00:05.263Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1025 last=1027 duration=2.984857831s
-+level=info ts=2019-12-12T07:00:02.383Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=126.79721ms
-+level=info ts=2019-12-12T07:00:02.278Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=131.228064ms
-+level=info ts=2019-12-12T07:00:02.052Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576123200000 maxt=1576130400000 ulid=01DVWEGFE0DGKKDG4V9AGAPPBQ duration=1.987940522s
-+level=info ts=2019-12-12T07:00:01.927Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576123200000 maxt=1576130400000 ulid=01DVWEGFDW0C09KSCRQ2F8DGN3 duration=1.866990386s
-+level=info ts=2019-12-12T05:00:05.355Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1022 last=1024 duration=3.046145151s
-+level=info ts=2019-12-12T05:00:05.309Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1022 last=1024 duration=3.019897535s
-+level=info ts=2019-12-12T05:00:02.309Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=128.294946ms
-+level=info ts=2019-12-12T05:00:02.289Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=145.150847ms
-+level=info ts=2019-12-12T05:00:01.939Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576116000000 maxt=1576123200000 ulid=01DVW7MR5ZN3K38ZHBJ243HDZJ duration=1.875204968s
-+level=info ts=2019-12-12T05:00:01.813Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576116000000 maxt=1576123200000 ulid=01DVW7MR5W18322RVFY6WM9GR2 duration=1.753345795s
-+level=info ts=2019-12-12T04:38:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-12T04:33:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-12T04:00:54.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-12T03:56:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-12T03:00:08.433Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576087200000 maxt=1576108800000 ulid=01DVW0S6A5HFTYBYD34SGAZJSR sources=""[01DVVC5V5WESMMH77FZVCJ80Q8 01DVVK1JDWNVFGWS4JPY2K4CAS 01DVVSX9NWR5V8SSJAPKQ2TCTH]"" duration=2.860812672s
-+level=info ts=2019-12-12T03:00:08.279Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576087200000 maxt=1576108800000 ulid=01DVW0S67R7JFBFTFWMNVS8YR3 sources=""[01DVVC5V6145SMRFE0WR0P3YTQ 01DVVK1JE1SSYY4EKS4HAT4SK3 01DVVSX9NRE3DWK67A2J17BE0T]"" duration=2.782760638s
-+level=info ts=2019-12-12T03:00:05.372Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1019 last=1021 duration=2.990754756s
-+level=info ts=2019-12-12T03:00:05.289Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1019 last=1021 duration=3.007795347s
-+level=info ts=2019-12-12T03:00:02.381Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=127.007667ms
-+level=info ts=2019-12-12T03:00:02.282Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=133.138336ms
-+level=info ts=2019-12-12T03:00:01.987Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576108800000 maxt=1576116000000 ulid=01DVW0S0XW63CVRA3EPRSC8NWQ duration=1.927367458s
-+level=info ts=2019-12-12T03:00:01.906Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576108800000 maxt=1576116000000 ulid=01DVW0S0XS1SQQQK3CQYCHN9HV duration=1.84874308s
-+level=info ts=2019-12-12T02:39:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-12T02:33:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-12T01:00:05.500Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1016 last=1018 duration=3.027246961s
-+level=info ts=2019-12-12T01:00:05.265Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1016 last=1018 duration=2.989822576s
-+level=info ts=2019-12-12T01:00:02.473Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=124.134851ms
-+level=info ts=2019-12-12T01:00:02.275Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=126.268006ms
-+level=info ts=2019-12-12T01:00:02.092Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576101600000 maxt=1576108800000 ulid=01DVVSX9NRE3DWK67A2J17BE0T duration=2.035218414s
-+level=info ts=2019-12-12T01:00:01.907Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576101600000 maxt=1576108800000 ulid=01DVVSX9NWR5V8SSJAPKQ2TCTH duration=1.847566214s
-+level=info ts=2019-12-11T23:00:05.552Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1013 last=1015 duration=3.042911717s
-+level=info ts=2019-12-11T23:00:05.255Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1013 last=1015 duration=3.007686626s
-+level=info ts=2019-12-11T23:00:02.509Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=135.735201ms
-+level=info ts=2019-12-11T23:00:02.247Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=126.374582ms
-+level=info ts=2019-12-11T23:00:02.154Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576094400000 maxt=1576101600000 ulid=01DVVK1JE1SSYY4EKS4HAT4SK3 duration=2.088724625s
-+level=info ts=2019-12-11T23:00:01.873Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576094400000 maxt=1576101600000 ulid=01DVVK1JDWNVFGWS4JPY2K4CAS duration=1.813033164s
-+level=info ts=2019-12-11T21:00:08.427Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576065600000 maxt=1576087200000 ulid=01DVVC60FYTRXZ9457XT10Y7AH sources=""[01DVTQJNDXSY7N5V60ZX7X1C3J 01DVTYECNW5T3AHHB2EXATYFMJ 01DVV5A3XWVRTNS7G7BBDQ9G2W]"" duration=2.925663083s
-+level=info ts=2019-12-11T21:00:08.281Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576065600000 maxt=1576087200000 ulid=01DVVC60DBGMXD5DXR6Y5XWNXF sources=""[01DVTQJNDRV9NDCK9H2BCH04R0 01DVTYECNS4AZH3ZMCER87DYWG 01DVV5A3XRVMTB2E7V3MZ6RGCA]"" duration=2.862756811s
-+level=info ts=2019-12-11T21:00:05.288Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1010 last=1012 duration=2.998716456s
-+level=info ts=2019-12-11T21:00:05.204Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1010 last=1012 duration=3.013679702s
-+level=info ts=2019-12-11T21:00:02.289Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=124.171081ms
-+level=info ts=2019-12-11T21:00:02.190Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=114.925741ms
-+level=info ts=2019-12-11T21:00:01.942Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576087200000 maxt=1576094400000 ulid=01DVVC5V5WESMMH77FZVCJ80Q8 duration=1.881893506s
-+level=info ts=2019-12-11T21:00:01.837Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576087200000 maxt=1576094400000 ulid=01DVVC5V6145SMRFE0WR0P3YTQ duration=1.772164011s
-+level=info ts=2019-12-11T19:00:05.276Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1007 last=1009 duration=3.031727362s
-+level=info ts=2019-12-11T19:00:05.222Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1007 last=1009 duration=3.003072336s
-+level=info ts=2019-12-11T19:00:02.244Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=125.675247ms
-+level=info ts=2019-12-11T19:00:02.219Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=127.466308ms
-+level=info ts=2019-12-11T19:00:01.888Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576080000000 maxt=1576087200000 ulid=01DVV5A3XRVMTB2E7V3MZ6RGCA duration=1.832443683s
-+level=info ts=2019-12-11T19:00:01.845Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576080000000 maxt=1576087200000 ulid=01DVV5A3XWVRTNS7G7BBDQ9G2W duration=1.784935995s
-+level=info ts=2019-12-11T18:31:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T18:24:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T17:00:05.233Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1004 last=1006 duration=3.008189996s
-+level=info ts=2019-12-11T17:00:05.223Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1004 last=1006 duration=2.97892s
-+level=info ts=2019-12-11T17:00:02.244Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=132.385042ms
-+level=info ts=2019-12-11T17:00:02.225Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=125.500534ms
-+level=info ts=2019-12-11T17:00:01.870Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576072800000 maxt=1576080000000 ulid=01DVTYECNW5T3AHHB2EXATYFMJ duration=1.810447322s
-+level=info ts=2019-12-11T17:00:01.870Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576072800000 maxt=1576080000000 ulid=01DVTYECNS4AZH3ZMCER87DYWG duration=1.813347748s
-+level=info ts=2019-12-11T15:00:16.297Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576000800000 maxt=1576065600000 ulid=01DVTQJY4WBY96QVV4XQJTR2JC sources=""[01DVSECF6Q4JXFDGMFQB3J1Z9E 01DVT2ZN0DMXXJJDHKS0M8JWMS 01DVTQJTX0GZ1S7J51CN1RJNQX]"" duration=7.308935842s
-+level=info ts=2019-12-11T15:00:15.941Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576000800000 maxt=1576065600000 ulid=01DVTQJY58270MEPGVGGDZZRKJ sources=""[01DVSECF6J4NZRNHABZ2MSG7V7 01DVT2ZN4RB65KG77XPHPNVSAM 01DVTQJTYGFS18MWME9Z2NFJSW]"" duration=6.941637414s
-+level=info ts=2019-12-11T15:00:08.544Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576044000000 maxt=1576065600000 ulid=01DVTQJTX0GZ1S7J51CN1RJNQX sources=""[01DVT2ZFNYB7DEH57ZX4HW2DAV 01DVT9V6XW9ENV15NHKR20T9B4 01DVTGPY5WTBSSEQ37JQ2VPCTQ]"" duration=2.880290482s
-+level=info ts=2019-12-11T15:00:08.541Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576044000000 maxt=1576065600000 ulid=01DVTQJTYGFS18MWME9Z2NFJSW sources=""[01DVT2ZFP3R7RB9H6BS3JVAMXJ 01DVT9V6Y21E8YXRKNGA9RPB7D 01DVTGPY5XGARMV8B8VBWQ23W3]"" duration=2.829184147s
-+level=info ts=2019-12-11T15:00:05.505Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1001 last=1003 duration=3.006477625s
-+level=info ts=2019-12-11T15:00:05.452Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=1001 last=1003 duration=2.990895181s
-+level=info ts=2019-12-11T15:00:02.498Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=129.237566ms
-+level=info ts=2019-12-11T15:00:02.461Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=129.961097ms
-+level=info ts=2019-12-11T15:00:02.022Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576065600000 maxt=1576072800000 ulid=01DVTQJNDRV9NDCK9H2BCH04R0 duration=1.96598488s
-+level=info ts=2019-12-11T15:00:01.933Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576065600000 maxt=1576072800000 ulid=01DVTQJNDXSY7N5V60ZX7X1C3J duration=1.871872199s
-+level=info ts=2019-12-11T14:15:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T14:07:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T13:18:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T13:12:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T13:00:05.395Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=998 last=1000 duration=3.010358861s
-+level=info ts=2019-12-11T13:00:05.249Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=998 last=1000 duration=3.032196282s
-+level=info ts=2019-12-11T13:00:02.385Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=131.568186ms
-+level=info ts=2019-12-11T13:00:02.217Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=136.017788ms
-+level=info ts=2019-12-11T13:00:02.021Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576058400000 maxt=1576065600000 ulid=01DVTGPY5XGARMV8B8VBWQ23W3 duration=1.959903s
-+level=info ts=2019-12-11T13:00:01.865Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576058400000 maxt=1576065600000 ulid=01DVTGPY5WTBSSEQ37JQ2VPCTQ duration=1.805149859s
-+level=info ts=2019-12-11T11:46:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T11:39:44.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T11:35:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T11:26:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T11:15:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T11:06:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T11:01:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T11:00:05.591Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=995 last=997 duration=3.063684941s
-+level=info ts=2019-12-11T11:00:05.297Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=995 last=997 duration=3.051047495s
-+level=info ts=2019-12-11T11:00:02.527Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=131.530749ms
-+level=info ts=2019-12-11T11:00:02.246Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=123.08975ms
-+level=info ts=2019-12-11T11:00:02.096Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576051200000 maxt=1576058400000 ulid=01DVT9V6Y21E8YXRKNGA9RPB7D duration=2.029825916s
-+level=info ts=2019-12-11T11:00:01.819Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576051200000 maxt=1576058400000 ulid=01DVT9V6XW9ENV15NHKR20T9B4 duration=1.7583013s
-+level=info ts=2019-12-11T10:54:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T10:46:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T10:39:44.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T10:34:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T10:26:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T10:18:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T10:12:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T09:56:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T09:48:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T09:00:08.553Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576022400000 maxt=1576044000000 ulid=01DVT2ZN4RB65KG77XPHPNVSAM sources=""[01DVSEC9XXK1J3B0186KYQECZT 01DVSN815STR0D0B8245RWNF13 01DVSW3RDRKTJVEZWGYE07XBXE]"" duration=2.896352595s
-+level=info ts=2019-12-11T09:00:08.407Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576022400000 maxt=1576044000000 ulid=01DVT2ZN0DMXXJJDHKS0M8JWMS sources=""[01DVSEC9XW0W8V42SPRR0YMM0X 01DVSN815W8YTW3DPQTJVRNTS4 01DVSW3RDWE1WHSM8AEW0ARA3S]"" duration=2.890101974s
-+level=info ts=2019-12-11T09:00:05.444Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=992 last=994 duration=3.058184317s
-+level=info ts=2019-12-11T09:00:05.306Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=992 last=994 duration=2.99204816s
-+level=info ts=2019-12-11T09:00:02.385Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=128.295437ms
-+level=info ts=2019-12-11T09:00:02.313Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=128.456638ms
-+level=info ts=2019-12-11T09:00:02.023Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576044000000 maxt=1576051200000 ulid=01DVT2ZFP3R7RB9H6BS3JVAMXJ duration=1.955843851s
-+level=info ts=2019-12-11T09:00:01.935Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576044000000 maxt=1576051200000 ulid=01DVT2ZFNYB7DEH57ZX4HW2DAV duration=1.873653026s
-+level=info ts=2019-12-11T07:00:05.441Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=989 last=991 duration=3.013763908s
-+level=info ts=2019-12-11T07:00:05.272Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=989 last=991 duration=2.979497994s
-+level=info ts=2019-12-11T07:00:02.427Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=126.635643ms
-+level=info ts=2019-12-11T07:00:02.293Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=121.051415ms
-+level=info ts=2019-12-11T07:00:02.056Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576036800000 maxt=1576044000000 ulid=01DVSW3RDWE1WHSM8AEW0ARA3S duration=1.995603695s
-+level=info ts=2019-12-11T07:00:01.941Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576036800000 maxt=1576044000000 ulid=01DVSW3RDRKTJVEZWGYE07XBXE duration=1.885680378s
-+level=info ts=2019-12-11T06:20:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T06:14:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T05:02:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=5 to=4
-+level=info ts=2019-12-11T05:01:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=5
-+level=info ts=2019-12-11T05:00:05.488Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=986 last=988 duration=3.043360624s
-+level=info ts=2019-12-11T05:00:05.288Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=986 last=988 duration=2.998209654s
-+level=info ts=2019-12-11T05:00:02.445Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=130.642245ms
-+level=info ts=2019-12-11T05:00:02.290Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=128.363621ms
-+level=info ts=2019-12-11T05:00:02.066Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576029600000 maxt=1576036800000 ulid=01DVSN815STR0D0B8245RWNF13 duration=2.008689142s
-+level=info ts=2019-12-11T05:00:01.938Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576029600000 maxt=1576036800000 ulid=01DVSN815W8YTW3DPQTJVRNTS4 duration=1.877943808s
-+level=info ts=2019-12-11T04:55:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T04:35:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T04:28:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T04:15:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T04:07:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T04:03:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T03:57:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T03:52:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T03:43:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T03:32:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T03:24:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T03:19:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T03:12:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T03:00:08.325Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576000800000 maxt=1576022400000 ulid=01DVSECF6Q4JXFDGMFQB3J1Z9E sources=""[01DVRSS45W7DXE05RGBYGH58PY 01DVS0MVDWGK47AZ3HY5GQEMK4 01DVS7GJNW7BF3R6KK7GW291R0]"" duration=2.861556831s
-+level=info ts=2019-12-11T03:00:08.255Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1576000800000 maxt=1576022400000 ulid=01DVSECF6J4NZRNHABZ2MSG7V7 sources=""[01DVRSS4632MEB6SYC6SB7DTGE 01DVS0MVDR5Z67QJD6T94CXHRA 01DVS7GJNSYRFT48H9CDRP82YV]"" duration=2.796902205s
-+level=info ts=2019-12-11T03:00:05.253Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=983 last=985 duration=3.004398083s
-+level=info ts=2019-12-11T03:00:05.245Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=983 last=985 duration=3.023743067s
-+level=info ts=2019-12-11T03:00:02.248Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=127.893231ms
-+level=info ts=2019-12-11T03:00:02.221Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=132.662929ms
-+level=info ts=2019-12-11T03:00:01.903Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576022400000 maxt=1576029600000 ulid=01DVSEC9XW0W8V42SPRR0YMM0X duration=1.842688968s
-+level=info ts=2019-12-11T03:00:01.847Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576022400000 maxt=1576029600000 ulid=01DVSEC9XXK1J3B0186KYQECZT duration=1.78558499s
-+level=info ts=2019-12-11T02:18:44.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=5 to=4
-+level=info ts=2019-12-11T02:18:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=5
-+level=info ts=2019-12-11T02:11:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T01:59:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-11T01:52:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-11T01:00:05.272Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=980 last=982 duration=3.025045534s
-+level=info ts=2019-12-11T01:00:05.189Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=980 last=982 duration=2.992866718s
-+level=info ts=2019-12-11T01:00:02.247Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=123.561834ms
-+level=info ts=2019-12-11T01:00:02.196Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=108.589195ms
-+level=info ts=2019-12-11T01:00:01.905Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576015200000 maxt=1576022400000 ulid=01DVS7GJNW7BF3R6KK7GW291R0 duration=1.844635186s
-+level=info ts=2019-12-11T01:00:01.866Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576015200000 maxt=1576022400000 ulid=01DVS7GJNSYRFT48H9CDRP82YV duration=1.809175377s
-+level=info ts=2019-12-11T00:31:25.063Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=7 to=5
-+level=info ts=2019-12-11T00:30:25.063Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=10 to=7
-+level=info ts=2019-12-11T00:29:55.063Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=14 to=10
-+level=info ts=2019-12-11T00:29:25.064Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=85 to=14
-+level=info ts=2019-12-11T00:29:15.063Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=5 to=85
-+level=info ts=2019-12-10T23:00:05.385Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=977 last=979 duration=3.157877457s
-+level=info ts=2019-12-10T23:00:05.136Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=977 last=979 duration=3.013728541s
-+level=info ts=2019-12-10T23:00:02.227Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=133.349607ms
-+level=info ts=2019-12-10T23:00:02.123Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=110.615384ms
-+level=info ts=2019-12-10T23:00:01.832Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576008000000 maxt=1576015200000 ulid=01DVS0MVDWGK47AZ3HY5GQEMK4 duration=1.772580137s
-+level=info ts=2019-12-10T23:00:01.780Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576008000000 maxt=1576015200000 ulid=01DVS0MVDR5Z67QJD6T94CXHRA duration=1.724738556s
-+level=info ts=2019-12-10T21:00:18.426Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1575936000000 maxt=1576000800000 ulid=01DVRSSD7JM2Y5MQNCZ7QZRSWK sources=""[01DVQGJZNRDFP0P161HP7GJX44 01DVR563YKZY789FPAM3DD8DKX 01DVRSS9Q04WN7F254ZCSQ4YP5]"" duration=9.096172888s
-+level=info ts=2019-12-10T21:00:16.394Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1575936000000 maxt=1576000800000 ulid=01DVRSSCTH9ED87EXNSR9J8PE6 sources=""[01DVQGJY3RF9X7R93QY6V579W3 01DVR563QG1PZ1AY7RPKSCMKND 01DVRSS9HMN2EC6QQQ2XP7R90D]"" duration=7.481260173s
-+level=info ts=2019-12-10T21:00:08.859Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1575979200000 maxt=1576000800000 ulid=01DVRSS9Q04WN7F254ZCSQ4YP5 sources=""[01DVR55YDW6Q96ZHGXD1T7HVF4 01DVRC1NNWE3ZMEQ6035ZJTF49 01DVRJXCXW4W35MBB4E9RXX1QD]"" duration=3.130772971s
-+level=info ts=2019-12-10T21:00:08.473Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1575979200000 maxt=1576000800000 ulid=01DVRSS9HMN2EC6QQQ2XP7R90D sources=""[01DVR55YE3CA9B12S48FTTFSVD 01DVRC1NP4CCWPRMCC7667R1FZ 01DVRJXCY0DY2R6DVWWGNXNPRQ]"" duration=2.917254733s
-+level=info ts=2019-12-10T21:00:05.490Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=974 last=976 duration=3.159908932s
-+level=info ts=2019-12-10T21:00:05.339Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=974 last=976 duration=3.090229598s
-+level=info ts=2019-12-10T21:00:02.330Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=129.067188ms
-+level=info ts=2019-12-10T21:00:02.249Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=108.272575ms
-+level=info ts=2019-12-10T21:00:01.804Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576000800000 maxt=1576008000000 ulid=01DVRSS45W7DXE05RGBYGH58PY duration=1.743999568s
-+level=info ts=2019-12-10T21:00:01.774Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1576000800000 maxt=1576008000000 ulid=01DVRSS4632MEB6SYC6SB7DTGE duration=1.706876662s
-+level=info ts=2019-12-10T20:35:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=3 to=4
-+level=info ts=2019-12-10T20:29:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-10T19:00:06.012Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=971 last=973 duration=3.248527735s
-+level=info ts=2019-12-10T19:00:05.641Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=971 last=973 duration=3.1047498s
-+level=info ts=2019-12-10T19:00:02.763Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=210.397069ms
-+level=info ts=2019-12-10T19:00:02.536Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=171.333573ms
-+level=info ts=2019-12-10T19:00:02.259Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1575993600000 maxt=1576000800000 ulid=01DVRJXCXW4W35MBB4E9RXX1QD duration=2.199162459s
-+level=info ts=2019-12-10T19:00:02.080Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1575993600000 maxt=1576000800000 ulid=01DVRJXCY0DY2R6DVWWGNXNPRQ duration=2.016321337s
-+level=info ts=2019-12-10T17:00:05.549Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=968 last=970 duration=3.183706512s
-+level=info ts=2019-12-10T17:00:05.319Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=968 last=970 duration=3.088304654s
-+level=info ts=2019-12-10T17:00:02.365Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=133.008474ms
-+level=info ts=2019-12-10T17:00:02.231Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=114.89207ms
-+level=info ts=2019-12-10T17:00:01.942Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1575986400000 maxt=1575993600000 ulid=01DVRC1NNWE3ZMEQ6035ZJTF49 duration=1.881731957s
-+level=info ts=2019-12-10T17:00:01.864Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1575986400000 maxt=1575993600000 ulid=01DVRC1NP4CCWPRMCC7667R1FZ duration=1.795832733s
-+level=info ts=2019-12-10T15:00:09.507Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1575957600000 maxt=1575979200000 ulid=01DVR563YKZY789FPAM3DD8DKX sources=""[01DVQGJRNW1DY7K8KW6B2RY4FF 01DVQQEFXWYP18MDFKY58VJCSG 01DVQYA75W7ERXK6FBEMYYEX6S]"" duration=3.791514409s
-+level=info ts=2019-12-10T15:00:08.520Z caller=compact.go:441 component=tsdb msg=""compact blocks"" count=3 mint=1575957600000 maxt=1575979200000 ulid=01DVR563QG1PZ1AY7RPKSCMKND sources=""[01DVQGJRP3CC86KKVR5MZ1YYTK 01DVQQEFY0CEBFC4QE02GW9S4F 01DVQYA75TMRETNZRWPV46G5Y0]"" duration=3.032106451s
-+level=info ts=2019-12-10T15:00:05.484Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=965 last=967 duration=3.124836463s
-+level=info ts=2019-12-10T15:00:05.277Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=965 last=967 duration=3.055778688s
-+level=info ts=2019-12-10T15:00:02.359Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=130.554076ms
-+level=info ts=2019-12-10T15:00:02.221Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=114.665423ms
-+level=info ts=2019-12-10T15:00:01.972Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1575979200000 maxt=1575986400000 ulid=01DVR55YDW6Q96ZHGXD1T7HVF4 duration=1.912209972s
-+level=info ts=2019-12-10T15:00:01.878Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1575979200000 maxt=1575986400000 ulid=01DVR55YE3CA9B12S48FTTFSVD duration=1.811316924s
-+2019-12-10 13:50:13.598594155 +0000 UTC
-+level=info ts=2019-12-10T13:50:13.596Z caller=main.go:771 msg=""Completed loading of configuration file"" filename=/etc/prometheus/prometheus.yml
-+level=info ts=2019-12-10T13:50:13.553Z caller=kubernetes.go:192 component=""discovery manager scrape"" discovery=k8s msg=""Using pod service account via in-cluster config""
-+level=info ts=2019-12-10T13:50:13.552Z caller=kubernetes.go:192 component=""discovery manager scrape"" discovery=k8s msg=""Using pod service account via in-cluster config""
-+level=info ts=2019-12-10T13:50:13.551Z caller=kubernetes.go:192 component=""discovery manager scrape"" discovery=k8s msg=""Using pod service account via in-cluster config""
-+level=info ts=2019-12-10T13:50:13.551Z caller=kubernetes.go:192 component=""discovery manager scrape"" discovery=k8s msg=""Using pod service account via in-cluster config""
-+level=info ts=2019-12-10T13:50:13.546Z caller=main.go:743 msg=""Loading configuration file"" filename=/etc/prometheus/prometheus.yml
-+curl -X POST --fail -o - -sS http://localhost:80/prometheus/-/reload
-+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..2019_12_05_07_22_08.390693530"": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC
-+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..data"": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC
-+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..data/prometheus.yml"": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC
-+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..data/recording.rules"": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC
-+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..data/alerts.rules"": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC
-+2019/12/10 13:50:13 DEBUG: Watching /etc/prometheus/..data
-+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..data"": CREATE at 2019-12-10 13:50:13.341280871 +0000 UTC
-+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..data_tmp"": RENAME at 2019-12-10 13:50:13.341280871 +0000 UTC
-+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..2019_12_10_13_50_13.738754268"": CHMOD at 2019-12-10 13:50:13.341280871 +0000 UTC
-+2019/12/10 13:50:13 DEBUG: Watching /etc/prometheus/..2019_12_10_13_50_13.738754268
-+2019/12/10 13:50:13 DEBUG: ""/etc/prometheus/..2019_12_10_13_50_13.738754268"": CREATE at 2019-12-10 13:50:13.341280871 +0000 UTC
-+2019-12-10 13:49:53.296090488 +0000 UTC
-+level=info ts=2019-12-10T13:49:53.294Z caller=main.go:771 msg=""Completed loading of configuration file"" filename=/etc/prometheus/prometheus.yml
-+level=info ts=2019-12-10T13:49:53.254Z caller=kubernetes.go:192 component=""discovery manager scrape"" discovery=k8s msg=""Using pod service account via in-cluster config""
-+level=info ts=2019-12-10T13:49:53.253Z caller=kubernetes.go:192 component=""discovery manager scrape"" discovery=k8s msg=""Using pod service account via in-cluster config""
-+level=info ts=2019-12-10T13:49:53.252Z caller=kubernetes.go:192 component=""discovery manager scrape"" discovery=k8s msg=""Using pod service account via in-cluster config""
-+level=info ts=2019-12-10T13:49:53.251Z caller=kubernetes.go:192 component=""discovery manager scrape"" discovery=k8s msg=""Using pod service account via in-cluster config""
-+level=info ts=2019-12-10T13:49:53.248Z caller=main.go:743 msg=""Loading configuration file"" filename=/etc/prometheus/prometheus.yml
-+curl -X POST --fail -o - -sS http://localhost:80/prometheus/-/reload
-+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..2019_12_05_07_22_36.096562123"": REMOVE at 2019-12-10 13:49:53.044039978 +0000 UTC
-+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..data"": REMOVE at 2019-12-10 13:49:53.043039894 +0000 UTC
-+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..data/prometheus.yml"": REMOVE at 2019-12-10 13:49:53.043039894 +0000 UTC
-+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..data/recording.rules"": REMOVE at 2019-12-10 13:49:53.043039894 +0000 UTC
-+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..data/alerts.rules"": REMOVE at 2019-12-10 13:49:53.043039894 +0000 UTC
-+2019/12/10 13:49:53 DEBUG: Watching /etc/prometheus/..data
-+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..data"": CREATE at 2019-12-10 13:49:53.043039894 +0000 UTC
-+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..data_tmp"": RENAME at 2019-12-10 13:49:53.044039978 +0000 UTC
-+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..2019_12_10_13_49_53.355073198"": CHMOD at 2019-12-10 13:49:53.043039894 +0000 UTC
-+2019/12/10 13:49:53 DEBUG: Watching /etc/prometheus/..2019_12_10_13_49_53.355073198
-+2019/12/10 13:49:53 DEBUG: ""/etc/prometheus/..2019_12_10_13_49_53.355073198"": CREATE at 2019-12-10 13:49:53.043039894 +0000 UTC
-+level=info ts=2019-12-10T13:00:06.007Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=962 last=964 duration=3.145696569s
-+level=info ts=2019-12-10T13:00:05.601Z caller=head.go:666 component=tsdb msg=""WAL checkpoint complete"" first=962 last=964 duration=3.062580976s
-+level=info ts=2019-12-10T13:00:02.861Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=169.077152ms
-+level=info ts=2019-12-10T13:00:02.539Z caller=head.go:596 component=tsdb msg=""head GC completed"" duration=152.173262ms
-+level=info ts=2019-12-10T13:00:02.425Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1575972000000 maxt=1575979200000 ulid=01DVQYA75W7ERXK6FBEMYYEX6S duration=2.364066751s
-+level=info ts=2019-12-10T13:00:02.150Z caller=compact.go:496 component=tsdb msg=""write block"" mint=1575972000000 maxt=1575979200000 ulid=01DVQYA75TMRETNZRWPV46G5Y0 duration=2.092629264s
-+level=info ts=2019-12-10T11:48:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=2 to=1
-+level=info ts=2019-12-10T11:47:34.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=3 to=2
-+level=info ts=2019-12-10T11:47:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=5 to=3
-+level=info ts=2019-12-10T11:46:54.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=7 to=5
-+level=info ts=2019-12-10T11:46:34.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=10 to=7
-+level=info ts=2019-12-10T11:46:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=16 to=10
-+level=info ts=2019-12-10T11:45:54.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=26 to=16
-+level=info ts=2019-12-10T11:45:34.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=39 to=26
-+level=info ts=2019-12-10T11:45:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=63 to=39
-+level=info ts=2019-12-10T11:44:54.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=100 to=63
-+level=info ts=2019-12-10T11:44:45.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=2 to=1
-+level=info ts=2019-12-10T11:44:34.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=156 to=100
-+level=info ts=2019-12-10T11:44:15.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=3 to=2
-+level=info ts=2019-12-10T11:44:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=253 to=156
-+level=info ts=2019-12-10T11:43:55.064Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=4 to=3
-+level=info ts=2019-12-10T11:43:54.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=405 to=253
-+level=info ts=2019-12-10T11:43:45.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=5 to=4
-+level=info ts=2019-12-10T11:43:34.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=619 to=405
-+level=info ts=2019-12-10T11:43:25.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=8 to=5
-+level=info ts=2019-12-10T11:43:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=1000 to=619
-+level=info ts=2019-12-10T11:43:05.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=11 to=8
-+level=info ts=2019-12-10T11:42:45.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=22 to=11
-+level=info ts=2019-12-10T11:42:25.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=16 to=22
-+level=error ts=2019-12-10T11:42:11.074Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 400 Bad Request: out of order sample""
-+level=error ts=2019-12-10T11:42:11.073Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 400 Bad Request: out of order sample""
-+level=info ts=2019-12-10T11:42:05.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=11 to=16
-+level=info ts=2019-12-10T11:41:55.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=8 to=11
-+level=info ts=2019-12-10T11:39:35.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=6 to=8
-+level=info ts=2019-12-10T11:38:55.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=4 to=6
-+level=info ts=2019-12-10T11:38:35.064Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=2 to=4
-+level=error ts=2019-12-10T11:38:12.281Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples""
-+level=error ts=2019-12-10T11:38:12.281Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=69 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 69 samples""
-+level=error ts=2019-12-10T11:38:12.235Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:12.235Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:12.204Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples""
-+level=error ts=2019-12-10T11:38:12.183Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=89 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples""
-+level=error ts=2019-12-10T11:38:12.129Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=88 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples""
-+level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=91 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 91 samples""
-+level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=86 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples""
-+level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=90 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples""
-+level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=56 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 56 samples""
-+level=error ts=2019-12-10T11:38:12.125Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=52 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 52 samples""
-+level=error ts=2019-12-10T11:38:12.124Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:12.123Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples""
-+level=error ts=2019-12-10T11:38:12.122Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=69 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 69 samples""
-+level=error ts=2019-12-10T11:38:12.120Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:12.120Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=67 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples""
-+level=error ts=2019-12-10T11:38:12.120Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=78 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 78 samples""
-+level=error ts=2019-12-10T11:38:12.120Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:12.119Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:12.119Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:12.081Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples""
-+level=error ts=2019-12-10T11:38:12.037Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:12.037Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:12.037Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:12.024Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:12.024Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:12.024Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=89 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples""
-+level=error ts=2019-12-10T11:38:11.920Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:11.917Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:11.916Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:11.913Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=95 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples""
-+level=error ts=2019-12-10T11:38:11.913Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=82 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples""
-+level=error ts=2019-12-10T11:38:11.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:11.721Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:11.176Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=99 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples""
-+level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=85 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples""
-+level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=70 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples""
-+level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=72 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples""
-+level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples""
-+level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=46 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 46 samples""
-+level=error ts=2019-12-10T11:38:11.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:11.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=83 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples""
-+level=error ts=2019-12-10T11:38:11.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:11.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=92 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 92 samples""
-+level=error ts=2019-12-10T11:38:11.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:11.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:11.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples""
-+level=error ts=2019-12-10T11:38:11.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:10.955Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:10.953Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:10.942Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=86 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples""
-+level=error ts=2019-12-10T11:38:10.942Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:10.942Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:10.941Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=58 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 58 samples""
-+level=error ts=2019-12-10T11:38:10.941Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:10.940Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:10.940Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:10.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:10.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=23 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 23 samples""
-+level=error ts=2019-12-10T11:38:10.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=59 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 59 samples""
-+level=error ts=2019-12-10T11:38:10.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=81 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples""
-+level=error ts=2019-12-10T11:38:10.878Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:10.878Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:10.878Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:10.878Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:10.877Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples""
-+level=error ts=2019-12-10T11:38:10.877Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=70 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples""
-+level=error ts=2019-12-10T11:38:10.877Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=72 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples""
-+level=error ts=2019-12-10T11:38:10.039Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.902Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples""
-+level=error ts=2019-12-10T11:38:09.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=92 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 92 samples""
-+level=error ts=2019-12-10T11:38:09.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=95 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples""
-+level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=73 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 73 samples""
-+level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=68 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 68 samples""
-+level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=35 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 35 samples""
-+level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.895Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.895Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples""
-+level=error ts=2019-12-10T11:38:09.895Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples""
-+level=error ts=2019-12-10T11:38:09.876Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=88 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples""
-+level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=92 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 92 samples""
-+level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=85 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples""
-+level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.832Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.825Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.825Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples""
-+level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=17 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 17 samples""
-+level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.815Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=94 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 94 samples""
-+level=error ts=2019-12-10T11:38:09.806Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=81 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples""
-+level=error ts=2019-12-10T11:38:09.736Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.736Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=48 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 48 samples""
-+level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples""
-+level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=67 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples""
-+level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=43 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 43 samples""
-+level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=90 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples""
-+level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=19 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 19 samples""
-+level=error ts=2019-12-10T11:38:09.733Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples""
-+level=error ts=2019-12-10T11:38:09.700Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:09.700Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=50 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 50 samples""
-+level=error ts=2019-12-10T11:38:09.700Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=77 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples""
-+level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=86 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples""
-+level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=83 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples""
-+level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.796Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=90 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples""
-+level=error ts=2019-12-10T11:38:08.796Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=37 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 37 samples""
-+level=error ts=2019-12-10T11:38:08.796Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=67 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples""
-+level=error ts=2019-12-10T11:38:08.796Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=77 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples""
-+level=error ts=2019-12-10T11:38:08.795Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=74 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples""
-+level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=94 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 94 samples""
-+level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=89 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples""
-+level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.694Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=77 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples""
-+level=error ts=2019-12-10T11:38:08.694Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.694Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.693Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=58 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 58 samples""
-+level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=91 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 91 samples""
-+level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=59 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 59 samples""
-+level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.691Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.678Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples""
-+level=error ts=2019-12-10T11:38:08.677Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.648Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.647Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=90 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples""
-+level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=71 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples""
-+level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.602Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=83 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples""
-+level=error ts=2019-12-10T11:38:08.596Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples""
-+level=error ts=2019-12-10T11:38:08.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.553Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=95 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples""
-+level=error ts=2019-12-10T11:38:08.552Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.552Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=73 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 73 samples""
-+level=error ts=2019-12-10T11:38:08.552Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=87 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 87 samples""
-+level=error ts=2019-12-10T11:38:08.541Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.501Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples""
-+level=error ts=2019-12-10T11:38:08.501Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=91 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 91 samples""
-+level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=72 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples""
-+level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples""
-+level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=70 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples""
-+level=error ts=2019-12-10T11:38:08.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=65 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 65 samples""
-+level=error ts=2019-12-10T11:38:08.441Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=88 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples""
-+level=error ts=2019-12-10T11:38:08.433Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.431Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=89 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples""
-+level=error ts=2019-12-10T11:38:08.407Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=80 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples""
-+level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.394Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.394Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.394Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=82 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples""
-+level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=90 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples""
-+level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.604Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.602Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=60 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 60 samples""
-+level=error ts=2019-12-10T11:38:07.602Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples""
-+level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=71 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples""
-+level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples""
-+level=error ts=2019-12-10T11:38:07.600Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.599Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.599Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples""
-+level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=93 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 93 samples""
-+level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=72 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples""
-+level=error ts=2019-12-10T11:38:07.599Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.596Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=70 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples""
-+level=error ts=2019-12-10T11:38:07.596Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=74 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples""
-+level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples""
-+level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=99 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples""
-+level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=91 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 91 samples""
-+level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=73 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 73 samples""
-+level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=81 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples""
-+level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=86 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples""
-+level=error ts=2019-12-10T11:38:07.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=53 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 53 samples""
-+level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=39 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 39 samples""
-+level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.587Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.513Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.512Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=59 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 59 samples""
-+level=error ts=2019-12-10T11:38:07.512Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.510Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples""
-+level=error ts=2019-12-10T11:38:07.506Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.506Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=67 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples""
-+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples""
-+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples""
-+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=60 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 60 samples""
-+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=99 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples""
-+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples""
-+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.494Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.489Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.488Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.484Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.483Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.483Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=66 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 66 samples""
-+level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=71 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples""
-+level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=66 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 66 samples""
-+level=error ts=2019-12-10T11:38:07.478Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=76 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 76 samples""
-+level=error ts=2019-12-10T11:38:07.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=77 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples""
-+level=error ts=2019-12-10T11:38:07.455Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.455Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.455Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=65 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 65 samples""
-+level=error ts=2019-12-10T11:38:07.454Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=80 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples""
-+level=error ts=2019-12-10T11:38:07.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=80 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples""
-+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=37 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 37 samples""
-+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=30 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 30 samples""
-+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=74 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples""
-+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples""
-+level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=70 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples""
-+level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples""
-+level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples""
-+level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=74 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples""
-+level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=63 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 63 samples""
-+level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=95 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples""
-+level=error ts=2019-12-10T11:38:07.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=99 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples""
-+level=error ts=2019-12-10T11:38:07.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=57 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 57 samples""
-+level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.444Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.444Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.444Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples""
-+level=error ts=2019-12-10T11:38:07.443Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.390Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.388Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=85 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples""
-+level=error ts=2019-12-10T11:38:07.388Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.388Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=89 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples""
-+level=error ts=2019-12-10T11:38:07.387Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.387Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples""
-+level=error ts=2019-12-10T11:38:07.387Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=63 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 63 samples""
-+level=error ts=2019-12-10T11:38:07.376Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.358Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples""
-+level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=82 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples""
-+level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples""
-+level=error ts=2019-12-10T11:38:07.310Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.259Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.259Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=85 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples""
-+level=error ts=2019-12-10T11:38:07.045Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.044Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=83 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples""
-+level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.043Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.043Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=69 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 69 samples""
-+level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:07.041Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=49 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 49 samples""
-+level=error ts=2019-12-10T11:38:06.947Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=81 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples""
-+level=error ts=2019-12-10T11:38:06.947Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.946Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.944Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.943Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples""
-+level=error ts=2019-12-10T11:38:06.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.938Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=95 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples""
-+level=error ts=2019-12-10T11:38:06.937Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.848Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=54 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 54 samples""
-+level=error ts=2019-12-10T11:38:06.848Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.842Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.841Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.747Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=85 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples""
-+level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=65 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 65 samples""
-+level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.715Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.646Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=95 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples""
-+level=error ts=2019-12-10T11:38:06.646Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=37 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 37 samples""
-+level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=82 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples""
-+level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples""
-+level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.639Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.638Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=93 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 93 samples""
-+level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=95 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples""
-+level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=56 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 56 samples""
-+level=error ts=2019-12-10T11:38:06.590Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.590Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples""
-+level=error ts=2019-12-10T11:38:06.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.588Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=83 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples""
-+level=error ts=2019-12-10T11:38:06.588Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.581Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.581Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.563Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=77 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples""
-+level=error ts=2019-12-10T11:38:06.563Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.563Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.562Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.562Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.561Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.560Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.551Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.550Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.544Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.544Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples""
-+level=error ts=2019-12-10T11:38:06.542Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.542Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.542Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.540Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.538Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.538Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.512Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.506Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.505Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=69 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 69 samples""
-+level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples""
-+level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples""
-+level=error ts=2019-12-10T11:38:06.489Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=77 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples""
-+level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=83 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples""
-+level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples""
-+level=error ts=2019-12-10T11:38:06.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples""
-+level=error ts=2019-12-10T11:38:06.459Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=85 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples""
-+level=error ts=2019-12-10T11:38:06.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=85 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples""
-+level=error ts=2019-12-10T11:38:06.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples""
-+level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=70 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples""
-+level=error ts=2019-12-10T11:38:06.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples""
-+level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=66 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 66 samples""
-+level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=80 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples""
-+level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=33 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 33 samples""
-+level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=75 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples""
-+level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=71 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples""
-+level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=62 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 62 samples""
-+level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=82 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples""
-+level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=58 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 58 samples""
-+level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples""
-+level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=72 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples""
-+level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=82 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples""
-+level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples""
-+level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=86 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples""
-+level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.353Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=60 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 60 samples""
-+level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=22 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 22 samples""
-+level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples""
-+level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.347Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.347Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.346Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=82 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples""
-+level=error ts=2019-12-10T11:38:06.346Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.340Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=74 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples""
-+level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.338Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.338Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.337Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.330Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.329Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.329Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=83 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples""
-+level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples""
-+level=error ts=2019-12-10T11:38:06.327Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples""
-+level=error ts=2019-12-10T11:38:06.244Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.231Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.230Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.229Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.229Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.229Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=94 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 94 samples""
-+level=error ts=2019-12-10T11:38:06.206Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples""
-+level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.198Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.091Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=81 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples""
-+level=error ts=2019-12-10T11:38:06.091Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.091Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.088Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:06.087Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:05.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:05.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:05.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=97 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples""
-+level=error ts=2019-12-10T11:38:05.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=info ts=2019-12-10T11:38:05.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""Remote storage resharding"" from=1 to=2
-+level=error ts=2019-12-10T11:38:04.977Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.976Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.821Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.821Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples""
-+level=error ts=2019-12-10T11:38:04.821Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.815Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.815Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.720Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples""
-+level=error ts=2019-12-10T11:38:04.720Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.720Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.719Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=98 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples""
-+level=error ts=2019-12-10T11:38:04.719Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.717Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.716Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=87 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 87 samples""
-+level=error ts=2019-12-10T11:38:04.716Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=99 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples""
-+level=error ts=2019-12-10T11:38:04.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.709Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=66 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 66 samples""
-+level=error ts=2019-12-10T11:38:04.709Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=64 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 64 samples""
-+level=error ts=2019-12-10T11:38:04.631Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.631Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples""
-+level=error ts=2019-12-10T11:38:04.631Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.627Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.626Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.626Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.626Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.621Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.620Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.620Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.620Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.619Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.136Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.136Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.135Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.018Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.018Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.017Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.016Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.015Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.015Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.013Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.013Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=80 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples""
-+level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.010Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.009Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.009Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.008Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.007Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=88 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples""
-+level=error ts=2019-12-10T11:38:04.007Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.007Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.005Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.005Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.004Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples""
-+level=error ts=2019-12-10T11:38:04.003Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.002Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.002Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:04.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.923Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.923Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=79 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples""
-+level=error ts=2019-12-10T11:38:03.922Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.908Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.908Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.907Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=96 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples""
-+level=error ts=2019-12-10T11:38:03.907Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.906Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=83 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples""
-+level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=71 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples""
-+level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.902Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=86 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples""
-+level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=84 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples""
-+level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=90 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples""
-+level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.723Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=80 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples""
-+level=error ts=2019-12-10T11:38:03.723Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.723Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=76 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 76 samples""
-+level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=95 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples""
-+level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=88 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples""
-+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=78 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 78 samples""
-+level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg=""non-recoverable error"" count=100 err=""server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples""
-+`, ""\n"")
-diff --git a/pkg/chunkenc/util_test.go b/pkg/chunkenc/util_test.go
-new file mode 100644
-index 0000000000000..f906bd93442b4
---- /dev/null
-+++ b/pkg/chunkenc/util_test.go
-@@ -0,0 +1,53 @@
-+package chunkenc
-+
-+import (
-+ ""time""
-+
-+ ""github.com/grafana/loki/pkg/chunkenc/testdata""
-+ ""github.com/grafana/loki/pkg/logproto""
-+)
-+
-+func logprotoEntry(ts int64, line string) *logproto.Entry {
-+ return &logproto.Entry{
-+ Timestamp: time.Unix(0, ts),
-+ Line: line,
-+ }
-+}
-+
-+func generateData(enc Encoding) []Chunk {
-+ chunks := []Chunk{}
-+ i := int64(0)
-+ for n := 0; n < 50; n++ {
-+ entry := logprotoEntry(0, testdata.LogString(0))
-+ c := NewMemChunk(enc)
-+ for c.SpaceFor(entry) {
-+ _ = c.Append(entry)
-+ i++
-+ entry = logprotoEntry(i, testdata.LogString(i))
-+ }
-+ c.Close()
-+ chunks = append(chunks, c)
-+ }
-+ return chunks
-+}
-+
-+func fillChunk(c Chunk) int64 {
-+ i := int64(0)
-+ inserted := int64(0)
-+ entry := &logproto.Entry{
-+ Timestamp: time.Unix(0, 0),
-+ Line: testdata.LogString(i),
-+ }
-+ for c.SpaceFor(entry) {
-+ err := c.Append(entry)
-+ if err != nil {
-+ panic(err)
-+ }
-+ i++
-+ inserted += int64(len(entry.Line))
-+ entry.Timestamp = time.Unix(0, i)
-+ entry.Line = testdata.LogString(i)
-+
-+ }
-+ return inserted
-+}
-diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
-index ccf2aee0f25ac..53bc7a0f2bca3 100644
---- a/pkg/ingester/ingester.go
-+++ b/pkg/ingester/ingester.go
-@@ -4,6 +4,7 @@ import (
- ""context""
- ""errors""
- ""flag""
-+ ""fmt""
- ""net/http""
- ""sync""
- ""time""
-@@ -18,6 +19,7 @@ import (
- ""github.com/cortexproject/cortex/pkg/ring""
- ""github.com/cortexproject/cortex/pkg/util""
-
-+ ""github.com/grafana/loki/pkg/chunkenc""
- ""github.com/grafana/loki/pkg/ingester/client""
- ""github.com/grafana/loki/pkg/logproto""
- ""github.com/grafana/loki/pkg/util/validation""
-@@ -48,6 +50,7 @@ type Config struct {
- MaxChunkIdle time.Duration `yaml:""chunk_idle_period""`
- BlockSize int `yaml:""chunk_block_size""`
- TargetChunkSize int `yaml:""chunk_target_size""`
-+ ChunkEncoding string `yaml:""chunk_encoding""`
-
- // For testing, you can override the address and ID of this ingester.
- ingesterClientFactory func(cfg client.Config, addr string) (grpc_health_v1.HealthClient, error)
-@@ -65,6 +68,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
- f.DurationVar(&cfg.MaxChunkIdle, ""ingester.chunks-idle-period"", 30*time.Minute, """")
- f.IntVar(&cfg.BlockSize, ""ingester.chunks-block-size"", 256*1024, """")
- f.IntVar(&cfg.TargetChunkSize, ""ingester.chunk-target-size"", 0, """")
-+ f.StringVar(&cfg.ChunkEncoding, ""ingester.chunk-encoding"", chunkenc.EncGZIP.String(), fmt.Sprintf(""The algorithm to use for compressing chunk. (%s)"", chunkenc.SupportedEncoding()))
- }
-
- // Ingester builds chunks for incoming log streams.
-@@ -89,7 +93,8 @@ type Ingester struct {
- flushQueues []*util.PriorityQueue
- flushQueuesDone sync.WaitGroup
-
-- limits *validation.Overrides
-+ limits *validation.Overrides
-+ factory func() chunkenc.Chunk
- }
-
- // ChunkStore is the interface we need to store chunks.
-@@ -102,6 +107,10 @@ func New(cfg Config, clientConfig client.Config, store ChunkStore, limits *valid
- if cfg.ingesterClientFactory == nil {
- cfg.ingesterClientFactory = client.New
- }
-+ enc, err := chunkenc.ParseEncoding(cfg.ChunkEncoding)
-+ if err != nil {
-+ return nil, err
-+ }
-
- i := &Ingester{
- cfg: cfg,
-@@ -112,6 +121,9 @@ func New(cfg Config, clientConfig client.Config, store ChunkStore, limits *valid
- flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes),
- quitting: make(chan struct{}),
- limits: limits,
-+ factory: func() chunkenc.Chunk {
-+ return chunkenc.NewMemChunkSize(enc, cfg.BlockSize, cfg.TargetChunkSize)
-+ },
- }
-
- i.flushQueuesDone.Add(cfg.ConcurrentFlushes)
-@@ -120,7 +132,6 @@ func New(cfg Config, clientConfig client.Config, store ChunkStore, limits *valid
- go i.flushLoop(j)
- }
-
-- var err error
- i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, ""ingester"")
- if err != nil {
- return nil, err
-@@ -191,7 +202,7 @@ func (i *Ingester) getOrCreateInstance(instanceID string) *instance {
- defer i.instancesMtx.Unlock()
- inst, ok = i.instances[instanceID]
- if !ok {
-- inst = newInstance(instanceID, i.cfg.BlockSize, i.cfg.TargetChunkSize, i.limits)
-+ inst = newInstance(instanceID, i.factory, i.limits)
- i.instances[instanceID] = inst
- }
- return inst
-diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go
-index f7a295e515189..a9d4d6e78dbae 100644
---- a/pkg/ingester/instance.go
-+++ b/pkg/ingester/instance.go
-@@ -16,6 +16,7 @@ import (
- ""github.com/cortexproject/cortex/pkg/ingester/index""
- cutil ""github.com/cortexproject/cortex/pkg/util""
-
-+ ""github.com/grafana/loki/pkg/chunkenc""
- ""github.com/grafana/loki/pkg/helpers""
- ""github.com/grafana/loki/pkg/iter""
- ""github.com/grafana/loki/pkg/logproto""
-@@ -60,15 +61,14 @@ type instance struct {
- streamsCreatedTotal prometheus.Counter
- streamsRemovedTotal prometheus.Counter
-
-- blockSize int
-- targetChunkSize int // Compressed bytes
-- tailers map[uint32]*tailer
-- tailerMtx sync.RWMutex
-+ tailers map[uint32]*tailer
-+ tailerMtx sync.RWMutex
-
-- limits *validation.Overrides
-+ limits *validation.Overrides
-+ factory func() chunkenc.Chunk
- }
-
--func newInstance(instanceID string, blockSize, targetChunkSize int, limits *validation.Overrides) *instance {
-+func newInstance(instanceID string, factory func() chunkenc.Chunk, limits *validation.Overrides) *instance {
- i := &instance{
- streams: map[model.Fingerprint]*stream{},
- index: index.New(),
-@@ -77,10 +77,9 @@ func newInstance(instanceID string, blockSize, targetChunkSize int, limits *vali
- streamsCreatedTotal: streamsCreatedTotal.WithLabelValues(instanceID),
- streamsRemovedTotal: streamsRemovedTotal.WithLabelValues(instanceID),
-
-- blockSize: blockSize,
-- targetChunkSize: targetChunkSize,
-- tailers: map[uint32]*tailer{},
-- limits: limits,
-+ factory: factory,
-+ tailers: map[uint32]*tailer{},
-+ limits: limits,
- }
- i.mapper = newFPMapper(i.getLabelsFromFingerprint)
- return i
-@@ -98,7 +97,7 @@ func (i *instance) consumeChunk(ctx context.Context, labels []client.LabelAdapte
- stream, ok := i.streams[fp]
- if !ok {
- sortedLabels := i.index.Add(labels, fp)
-- stream = newStream(fp, sortedLabels, i.blockSize, i.targetChunkSize)
-+ stream = newStream(fp, sortedLabels, i.factory)
- i.streams[fp] = stream
- i.streamsCreatedTotal.Inc()
- memoryStreams.Inc()
-@@ -156,7 +155,7 @@ func (i *instance) getOrCreateStream(labels []client.LabelAdapter) (*stream, err
- return nil, httpgrpc.Errorf(http.StatusTooManyRequests, ""per-user streams limit (%d) exceeded"", i.limits.MaxStreamsPerUser(i.instanceID))
- }
- sortedLabels := i.index.Add(labels, fp)
-- stream = newStream(fp, sortedLabels, i.blockSize, i.targetChunkSize)
-+ stream = newStream(fp, sortedLabels, i.factory)
- i.streams[fp] = stream
- memoryStreams.Inc()
- i.streamsCreatedTotal.Inc()
-diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go
-index c25b841efe6c5..39daee96374f2 100644
---- a/pkg/ingester/instance_test.go
-+++ b/pkg/ingester/instance_test.go
-@@ -10,6 +10,7 @@ import (
-
- ""github.com/prometheus/prometheus/pkg/labels""
-
-+ ""github.com/grafana/loki/pkg/chunkenc""
- ""github.com/grafana/loki/pkg/logproto""
-
- ""github.com/stretchr/testify/require""
-@@ -17,11 +18,15 @@ import (
- ""github.com/grafana/loki/pkg/util/validation""
- )
-
-+var defaultFactory = func() chunkenc.Chunk {
-+ return chunkenc.NewMemChunkSize(chunkenc.EncGZIP, 512, 0)
-+}
-+
- func TestLabelsCollisions(t *testing.T) {
- o, err := validation.NewOverrides(validation.Limits{MaxStreamsPerUser: 1000})
- require.NoError(t, err)
-
-- i := newInstance(""test"", 512, 0, o)
-+ i := newInstance(""test"", defaultFactory, o)
-
- // avoid entries from the future.
- tt := time.Now().Add(-5 * time.Minute)
-@@ -47,7 +52,7 @@ func TestConcurrentPushes(t *testing.T) {
- o, err := validation.NewOverrides(validation.Limits{MaxStreamsPerUser: 1000})
- require.NoError(t, err)
-
-- inst := newInstance(""test"", 512, 0, o)
-+ inst := newInstance(""test"", defaultFactory, o)
-
- const (
- concurrent = 10
-diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go
-index b881b86a92aaa..706a141ff1b6e 100644
---- a/pkg/ingester/stream.go
-+++ b/pkg/ingester/stream.go
-@@ -54,11 +54,10 @@ func init() {
- type stream struct {
- // Newest chunk at chunks[n-1].
- // Not thread-safe; assume accesses to this are locked by caller.
-- chunks []chunkDesc
-- fp model.Fingerprint // possibly remapped fingerprint, used in the streams map
-- labels labels.Labels
-- blockSize int
-- targetChunkSize int // Compressed bytes
-+ chunks []chunkDesc
-+ fp model.Fingerprint // possibly remapped fingerprint, used in the streams map
-+ labels labels.Labels
-+ factory func() chunkenc.Chunk
-
- tailers map[uint32]*tailer
- tailerMtx sync.RWMutex
-@@ -77,13 +76,12 @@ type entryWithError struct {
- e error
- }
-
--func newStream(fp model.Fingerprint, labels labels.Labels, blockSize, targetChunkSize int) *stream {
-+func newStream(fp model.Fingerprint, labels labels.Labels, factory func() chunkenc.Chunk) *stream {
- return &stream{
-- fp: fp,
-- labels: labels,
-- blockSize: blockSize,
-- targetChunkSize: targetChunkSize,
-- tailers: map[uint32]*tailer{},
-+ fp: fp,
-+ labels: labels,
-+ factory: factory,
-+ tailers: map[uint32]*tailer{},
- }
- }
-
-@@ -105,7 +103,7 @@ func (s *stream) consumeChunk(_ context.Context, chunk *logproto.Chunk) error {
- func (s *stream) Push(_ context.Context, entries []logproto.Entry) error {
- if len(s.chunks) == 0 {
- s.chunks = append(s.chunks, chunkDesc{
-- chunk: chunkenc.NewMemChunkSize(chunkenc.EncGZIP, s.blockSize, s.targetChunkSize),
-+ chunk: s.factory(),
- })
- chunksCreatedTotal.Inc()
- }
-@@ -132,7 +130,7 @@ func (s *stream) Push(_ context.Context, entries []logproto.Entry) error {
- chunksCreatedTotal.Inc()
-
- s.chunks = append(s.chunks, chunkDesc{
-- chunk: chunkenc.NewMemChunkSize(chunkenc.EncGZIP, s.blockSize, s.targetChunkSize),
-+ chunk: s.factory(),
- })
- chunk = &s.chunks[len(s.chunks)-1]
- }
-diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE
-index 74487567632c8..1eb75ef68e448 100644
---- a/vendor/github.com/klauspost/compress/LICENSE
-+++ b/vendor/github.com/klauspost/compress/LICENSE
-@@ -1,4 +1,5 @@
- Copyright (c) 2012 The Go Authors. All rights reserved.
-+Copyright (c) 2019 Klaus Post. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
-diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.go b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go
-deleted file mode 100644
-index 8298d309aefaa..0000000000000
---- a/vendor/github.com/klauspost/compress/flate/crc32_amd64.go
-+++ /dev/null
-@@ -1,42 +0,0 @@
--//+build !noasm
--//+build !appengine
--//+build !gccgo
--
--// Copyright 2015, Klaus Post, see LICENSE for details.
--
--package flate
--
--import (
-- ""github.com/klauspost/cpuid""
--)
--
--// crc32sse returns a hash for the first 4 bytes of the slice
--// len(a) must be >= 4.
--//go:noescape
--func crc32sse(a []byte) uint32
--
--// crc32sseAll calculates hashes for each 4-byte set in a.
--// dst must be east len(a) - 4 in size.
--// The size is not checked by the assembly.
--//go:noescape
--func crc32sseAll(a []byte, dst []uint32)
--
--// matchLenSSE4 returns the number of matching bytes in a and b
--// up to length 'max'. Both slices must be at least 'max'
--// bytes in size.
--//
--// TODO: drop the ""SSE4"" name, since it doesn't use any SSE instructions.
--//
--//go:noescape
--func matchLenSSE4(a, b []byte, max int) int
--
--// histogram accumulates a histogram of b in h.
--// h must be at least 256 entries in length,
--// and must be cleared before calling this function.
--//go:noescape
--func histogram(b []byte, h []int32)
--
--// Detect SSE 4.2 feature.
--func init() {
-- useSSE42 = cpuid.CPU.SSE42()
--}
-diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.s b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s
-deleted file mode 100644
-index a7994372702b7..0000000000000
---- a/vendor/github.com/klauspost/compress/flate/crc32_amd64.s
-+++ /dev/null
-@@ -1,214 +0,0 @@
--//+build !noasm
--//+build !appengine
--//+build !gccgo
--
--// Copyright 2015, Klaus Post, see LICENSE for details.
--
--// func crc32sse(a []byte) uint32
--TEXT ·crc32sse(SB), 4, $0
-- MOVQ a+0(FP), R10
-- XORQ BX, BX
--
-- // CRC32 dword (R10), EBX
-- BYTE $0xF2; BYTE $0x41; BYTE $0x0f
-- BYTE $0x38; BYTE $0xf1; BYTE $0x1a
--
-- MOVL BX, ret+24(FP)
-- RET
--
--// func crc32sseAll(a []byte, dst []uint32)
--TEXT ·crc32sseAll(SB), 4, $0
-- MOVQ a+0(FP), R8 // R8: src
-- MOVQ a_len+8(FP), R10 // input length
-- MOVQ dst+24(FP), R9 // R9: dst
-- SUBQ $4, R10
-- JS end
-- JZ one_crc
-- MOVQ R10, R13
-- SHRQ $2, R10 // len/4
-- ANDQ $3, R13 // len&3
-- XORQ BX, BX
-- ADDQ $1, R13
-- TESTQ R10, R10
-- JZ rem_loop
--
--crc_loop:
-- MOVQ (R8), R11
-- XORQ BX, BX
-- XORQ DX, DX
-- XORQ DI, DI
-- MOVQ R11, R12
-- SHRQ $8, R11
-- MOVQ R12, AX
-- MOVQ R11, CX
-- SHRQ $16, R12
-- SHRQ $16, R11
-- MOVQ R12, SI
--
-- // CRC32 EAX, EBX
-- BYTE $0xF2; BYTE $0x0f
-- BYTE $0x38; BYTE $0xf1; BYTE $0xd8
--
-- // CRC32 ECX, EDX
-- BYTE $0xF2; BYTE $0x0f
-- BYTE $0x38; BYTE $0xf1; BYTE $0xd1
--
-- // CRC32 ESI, EDI
-- BYTE $0xF2; BYTE $0x0f
-- BYTE $0x38; BYTE $0xf1; BYTE $0xfe
-- MOVL BX, (R9)
-- MOVL DX, 4(R9)
-- MOVL DI, 8(R9)
--
-- XORQ BX, BX
-- MOVL R11, AX
--
-- // CRC32 EAX, EBX
-- BYTE $0xF2; BYTE $0x0f
-- BYTE $0x38; BYTE $0xf1; BYTE $0xd8
-- MOVL BX, 12(R9)
--
-- ADDQ $16, R9
-- ADDQ $4, R8
-- XORQ BX, BX
-- SUBQ $1, R10
-- JNZ crc_loop
--
--rem_loop:
-- MOVL (R8), AX
--
-- // CRC32 EAX, EBX
-- BYTE $0xF2; BYTE $0x0f
-- BYTE $0x38; BYTE $0xf1; BYTE $0xd8
--
-- MOVL BX, (R9)
-- ADDQ $4, R9
-- ADDQ $1, R8
-- XORQ BX, BX
-- SUBQ $1, R13
-- JNZ rem_loop
--
--end:
-- RET
--
--one_crc:
-- MOVQ $1, R13
-- XORQ BX, BX
-- JMP rem_loop
--
--// func matchLenSSE4(a, b []byte, max int) int
--TEXT ·matchLenSSE4(SB), 4, $0
-- MOVQ a_base+0(FP), SI
-- MOVQ b_base+24(FP), DI
-- MOVQ DI, DX
-- MOVQ max+48(FP), CX
--
--cmp8:
-- // As long as we are 8 or more bytes before the end of max, we can load and
-- // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
-- CMPQ CX, $8
-- JLT cmp1
-- MOVQ (SI), AX
-- MOVQ (DI), BX
-- CMPQ AX, BX
-- JNE bsf
-- ADDQ $8, SI
-- ADDQ $8, DI
-- SUBQ $8, CX
-- JMP cmp8
--
--bsf:
-- // If those 8 bytes were not equal, XOR the two 8 byte values, and return
-- // the index of the first byte that differs. The BSF instruction finds the
-- // least significant 1 bit, the amd64 architecture is little-endian, and
-- // the shift by 3 converts a bit index to a byte index.
-- XORQ AX, BX
-- BSFQ BX, BX
-- SHRQ $3, BX
-- ADDQ BX, DI
--
-- // Subtract off &b[0] to convert from &b[ret] to ret, and return.
-- SUBQ DX, DI
-- MOVQ DI, ret+56(FP)
-- RET
--
--cmp1:
-- // In the slices' tail, compare 1 byte at a time.
-- CMPQ CX, $0
-- JEQ matchLenEnd
-- MOVB (SI), AX
-- MOVB (DI), BX
-- CMPB AX, BX
-- JNE matchLenEnd
-- ADDQ $1, SI
-- ADDQ $1, DI
-- SUBQ $1, CX
-- JMP cmp1
--
--matchLenEnd:
-- // Subtract off &b[0] to convert from &b[ret] to ret, and return.
-- SUBQ DX, DI
-- MOVQ DI, ret+56(FP)
-- RET
--
--// func histogram(b []byte, h []int32)
--TEXT ·histogram(SB), 4, $0
-- MOVQ b+0(FP), SI // SI: &b
-- MOVQ b_len+8(FP), R9 // R9: len(b)
-- MOVQ h+24(FP), DI // DI: Histogram
-- MOVQ R9, R8
-- SHRQ $3, R8
-- JZ hist1
-- XORQ R11, R11
--
--loop_hist8:
-- MOVQ (SI), R10
--
-- MOVB R10, R11
-- INCL (DI)(R11*4)
-- SHRQ $8, R10
--
-- MOVB R10, R11
-- INCL (DI)(R11*4)
-- SHRQ $8, R10
--
-- MOVB R10, R11
-- INCL (DI)(R11*4)
-- SHRQ $8, R10
--
-- MOVB R10, R11
-- INCL (DI)(R11*4)
-- SHRQ $8, R10
--
-- MOVB R10, R11
-- INCL (DI)(R11*4)
-- SHRQ $8, R10
--
-- MOVB R10, R11
-- INCL (DI)(R11*4)
-- SHRQ $8, R10
--
-- MOVB R10, R11
-- INCL (DI)(R11*4)
-- SHRQ $8, R10
--
-- INCL (DI)(R10*4)
--
-- ADDQ $8, SI
-- DECQ R8
-- JNZ loop_hist8
--
--hist1:
-- ANDQ $7, R9
-- JZ end_hist
-- XORQ R10, R10
--
--loop_hist1:
-- MOVB (SI), R10
-- INCL (DI)(R10*4)
-- INCQ SI
-- DECQ R9
-- JNZ loop_hist1
--
--end_hist:
-- RET
-diff --git a/vendor/github.com/klauspost/compress/flate/crc32_noasm.go b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go
-deleted file mode 100644
-index dcf43bd50a80e..0000000000000
---- a/vendor/github.com/klauspost/compress/flate/crc32_noasm.go
-+++ /dev/null
-@@ -1,35 +0,0 @@
--//+build !amd64 noasm appengine gccgo
--
--// Copyright 2015, Klaus Post, see LICENSE for details.
--
--package flate
--
--func init() {
-- useSSE42 = false
--}
--
--// crc32sse should never be called.
--func crc32sse(a []byte) uint32 {
-- panic(""no assembler"")
--}
--
--// crc32sseAll should never be called.
--func crc32sseAll(a []byte, dst []uint32) {
-- panic(""no assembler"")
--}
--
--// matchLenSSE4 should never be called.
--func matchLenSSE4(a, b []byte, max int) int {
-- panic(""no assembler"")
-- return 0
--}
--
--// histogram accumulates a histogram of b in h.
--//
--// len(h) must be >= 256, and h's elements must be all zeroes.
--func histogram(b []byte, h []int32) {
-- h = h[:256]
-- for _, t := range b {
-- h[t]++
-- }
--}
-diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
-index 6287951204e8d..20c94f5968439 100644
---- a/vendor/github.com/klauspost/compress/flate/deflate.go
-+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
-@@ -50,8 +50,6 @@ const (
- skipNever = math.MaxInt32
- )
-
--var useSSE42 bool
--
- type compressionLevel struct {
- good, lazy, nice, chain, fastSkipHashing, level int
- }
-@@ -97,9 +95,8 @@ type advancedState struct {
- hashOffset int
-
- // input window: unprocessed data is window[index:windowEnd]
-- index int
-- bulkHasher func([]byte, []uint32)
-- hashMatch [maxMatchLength + minMatchLength]uint32
-+ index int
-+ hashMatch [maxMatchLength + minMatchLength]uint32
- }
-
- type compressor struct {
-@@ -120,7 +117,7 @@ type compressor struct {
-
- // queued output tokens
- tokens tokens
-- snap fastEnc
-+ fast fastEnc
- state *advancedState
- }
-
-@@ -164,14 +161,14 @@ func (d *compressor) fillDeflate(b []byte) int {
- return n
- }
-
--func (d *compressor) writeBlock(tok tokens, index int, eof bool) error {
-+func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
- if index > 0 || eof {
- var window []byte
- if d.blockStart <= index {
- window = d.window[d.blockStart:index]
- }
- d.blockStart = index
-- d.w.writeBlock(tok.tokens[:tok.n], eof, window)
-+ d.w.writeBlock(tok, eof, window)
- return d.w.err
- }
- return nil
-@@ -180,20 +177,20 @@ func (d *compressor) writeBlock(tok tokens, index int, eof bool) error {
- // writeBlockSkip writes the current block and uses the number of tokens
- // to determine if the block should be stored on no matches, or
- // only huffman encoded.
--func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error {
-+func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
- if index > 0 || eof {
- if d.blockStart <= index {
- window := d.window[d.blockStart:index]
- // If we removed less than a 64th of all literals
- // we huffman compress the block.
- if int(tok.n) > len(window)-int(tok.n>>6) {
-- d.w.writeBlockHuff(eof, window)
-+ d.w.writeBlockHuff(eof, window, d.sync)
- } else {
- // Write a dynamic huffman block.
-- d.w.writeBlockDynamic(tok.tokens[:tok.n], eof, window)
-+ d.w.writeBlockDynamic(tok, eof, window, d.sync)
- }
- } else {
-- d.w.writeBlock(tok.tokens[:tok.n], eof, nil)
-+ d.w.writeBlock(tok, eof, nil)
- }
- d.blockStart = index
- return d.w.err
-@@ -208,8 +205,16 @@ func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error {
- func (d *compressor) fillWindow(b []byte) {
- // Do not fill window if we are in store-only mode,
- // use constant or Snappy compression.
-- switch d.compressionLevel.level {
-- case 0, 1, 2:
-+ if d.level == 0 {
-+ return
-+ }
-+ if d.fast != nil {
-+ // encode the last data, but discard the result
-+ if len(b) > maxMatchOffset {
-+ b = b[len(b)-maxMatchOffset:]
-+ }
-+ d.fast.Encode(&d.tokens, b)
-+ d.tokens.Reset()
- return
- }
- s := d.state
-@@ -236,7 +241,7 @@ func (d *compressor) fillWindow(b []byte) {
- }
-
- dst := s.hashMatch[:dstSize]
-- s.bulkHasher(tocheck, dst)
-+ bulkHash4(tocheck, dst)
- var newH uint32
- for i, val := range dst {
- di := i + startindex
-@@ -284,62 +289,7 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead
-
- for i := prevHead; tries > 0; tries-- {
- if wEnd == win[i+length] {
-- n := matchLen(win[i:], wPos, minMatchLook)
--
-- if n > length && (n > minMatchLength || pos-i <= 4096) {
-- length = n
-- offset = pos - i
-- ok = true
-- if n >= nice {
-- // The match is good enough that we don't try to find a better one.
-- break
-- }
-- wEnd = win[pos+n]
-- }
-- }
-- if i == minIndex {
-- // hashPrev[i & windowMask] has already been overwritten, so stop now.
-- break
-- }
-- i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
-- if i < minIndex || i < 0 {
-- break
-- }
-- }
-- return
--}
--
--// Try to find a match starting at index whose length is greater than prevSize.
--// We only look at chainCount possibilities before giving up.
--// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
--func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
-- minMatchLook := maxMatchLength
-- if lookahead < minMatchLook {
-- minMatchLook = lookahead
-- }
--
-- win := d.window[0 : pos+minMatchLook]
--
-- // We quit when we get a match that's at least nice long
-- nice := len(win) - pos
-- if d.nice < nice {
-- nice = d.nice
-- }
--
-- // If we've got a match that's good enough, only look in 1/4 the chain.
-- tries := d.chain
-- length = prevLength
-- if length >= d.good {
-- tries >>= 2
-- }
--
-- wEnd := win[pos+length]
-- wPos := win[pos:]
-- minIndex := pos - windowSize
--
-- for i := prevHead; tries > 0; tries-- {
-- if wEnd == win[i+length] {
-- n := matchLenSSE4(win[i:], wPos, minMatchLook)
-+ n := matchLen(win[i:i+minMatchLook], wPos)
-
- if n > length && (n > minMatchLength || pos-i <= 4096) {
- length = n
-@@ -372,42 +322,27 @@ func (d *compressor) writeStoredBlock(buf []byte) error {
- return d.w.err
- }
-
--const hashmul = 0x1e35a7bd
--
- // hash4 returns a hash representation of the first 4 bytes
- // of the supplied slice.
- // The caller must ensure that len(b) >= 4.
- func hash4(b []byte) uint32 {
-- return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits)
-+ b = b[:4]
-+ return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits)
- }
-
- // bulkHash4 will compute hashes using the same
- // algorithm as hash4
- func bulkHash4(b []byte, dst []uint32) {
-- if len(b) < minMatchLength {
-+ if len(b) < 4 {
- return
- }
- hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
-- dst[0] = (hb * hashmul) >> (32 - hashBits)
-- end := len(b) - minMatchLength + 1
-+ dst[0] = hash4u(hb, hashBits)
-+ end := len(b) - 4 + 1
- for i := 1; i < end; i++ {
- hb = (hb << 8) | uint32(b[i+3])
-- dst[i] = (hb * hashmul) >> (32 - hashBits)
-- }
--}
--
--// matchLen returns the number of matching bytes in a and b
--// up to length 'max'. Both slices must be at least 'max'
--// bytes in size.
--func matchLen(a, b []byte, max int) int {
-- a = a[:max]
-- b = b[:len(a)]
-- for i, av := range a {
-- if b[i] != av {
-- return i
-- }
-+ dst[i] = hash4u(hb, hashBits)
- }
-- return max
- }
-
- func (d *compressor) initDeflate() {
-@@ -424,149 +359,6 @@ func (d *compressor) initDeflate() {
- s.offset = 0
- s.hash = 0
- s.chainHead = -1
-- s.bulkHasher = bulkHash4
-- if useSSE42 {
-- s.bulkHasher = crc32sseAll
-- }
--}
--
--// Assumes that d.fastSkipHashing != skipNever,
--// otherwise use deflateLazy
--func (d *compressor) deflate() {
-- s := d.state
-- // Sanity enables additional runtime tests.
-- // It's intended to be used during development
-- // to supplement the currently ad-hoc unit tests.
-- const sanity = false
--
-- if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
-- return
-- }
--
-- s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
-- if s.index < s.maxInsertIndex {
-- s.hash = hash4(d.window[s.index : s.index+minMatchLength])
-- }
--
-- for {
-- if sanity && s.index > d.windowEnd {
-- panic(""index > windowEnd"")
-- }
-- lookahead := d.windowEnd - s.index
-- if lookahead < minMatchLength+maxMatchLength {
-- if !d.sync {
-- return
-- }
-- if sanity && s.index > d.windowEnd {
-- panic(""index > windowEnd"")
-- }
-- if lookahead == 0 {
-- if d.tokens.n > 0 {
-- if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil {
-- return
-- }
-- d.tokens.n = 0
-- }
-- return
-- }
-- }
-- if s.index < s.maxInsertIndex {
-- // Update the hash
-- s.hash = hash4(d.window[s.index : s.index+minMatchLength])
-- ch := s.hashHead[s.hash&hashMask]
-- s.chainHead = int(ch)
-- s.hashPrev[s.index&windowMask] = ch
-- s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset)
-- }
-- s.length = minMatchLength - 1
-- s.offset = 0
-- minIndex := s.index - windowSize
-- if minIndex < 0 {
-- minIndex = 0
-- }
--
-- if s.chainHead-s.hashOffset >= minIndex && lookahead > minMatchLength-1 {
-- if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
-- s.length = newLength
-- s.offset = newOffset
-- }
-- }
-- if s.length >= minMatchLength {
-- s.ii = 0
-- // There was a match at the previous step, and the current match is
-- // not better. Output the previous match.
-- // ""s.length-3"" should NOT be ""s.length-minMatchLength"", since the format always assume 3
-- d.tokens.tokens[d.tokens.n] = matchToken(uint32(s.length-3), uint32(s.offset-minOffsetSize))
-- d.tokens.n++
-- // Insert in the hash table all strings up to the end of the match.
-- // index and index-1 are already inserted. If there is not enough
-- // lookahead, the last two strings are not inserted into the hash
-- // table.
-- if s.length <= d.fastSkipHashing {
-- var newIndex int
-- newIndex = s.index + s.length
-- // Calculate missing hashes
-- end := newIndex
-- if end > s.maxInsertIndex {
-- end = s.maxInsertIndex
-- }
-- end += minMatchLength - 1
-- startindex := s.index + 1
-- if startindex > s.maxInsertIndex {
-- startindex = s.maxInsertIndex
-- }
-- tocheck := d.window[startindex:end]
-- dstSize := len(tocheck) - minMatchLength + 1
-- if dstSize > 0 {
-- dst := s.hashMatch[:dstSize]
-- bulkHash4(tocheck, dst)
-- var newH uint32
-- for i, val := range dst {
-- di := i + startindex
-- newH = val & hashMask
-- // Get previous value with the same hash.
-- // Our chain should point to the previous value.
-- s.hashPrev[di&windowMask] = s.hashHead[newH]
-- // Set the head of the hash chain to us.
-- s.hashHead[newH] = uint32(di + s.hashOffset)
-- }
-- s.hash = newH
-- }
-- s.index = newIndex
-- } else {
-- // For matches this long, we don't bother inserting each individual
-- // item into the table.
-- s.index += s.length
-- if s.index < s.maxInsertIndex {
-- s.hash = hash4(d.window[s.index : s.index+minMatchLength])
-- }
-- }
-- if d.tokens.n == maxFlateBlockTokens {
-- // The block includes the current character
-- if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil {
-- return
-- }
-- d.tokens.n = 0
-- }
-- } else {
-- s.ii++
-- end := s.index + int(s.ii>>uint(d.fastSkipHashing)) + 1
-- if end > d.windowEnd {
-- end = d.windowEnd
-- }
-- for i := s.index; i < end; i++ {
-- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
-- d.tokens.n++
-- if d.tokens.n == maxFlateBlockTokens {
-- if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil {
-- return
-- }
-- d.tokens.n = 0
-- }
-- }
-- s.index = end
-- }
-- }
- }
-
- // deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
-@@ -603,15 +395,14 @@ func (d *compressor) deflateLazy() {
- // Flush current output block if any.
- if d.byteAvailable {
- // There is still one pending token that needs to be flushed
-- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
-- d.tokens.n++
-+ d.tokens.AddLiteral(d.window[s.index-1])
- d.byteAvailable = false
- }
- if d.tokens.n > 0 {
-- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
-+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
-- d.tokens.n = 0
-+ d.tokens.Reset()
- }
- return
- }
-@@ -642,8 +433,7 @@ func (d *compressor) deflateLazy() {
- if prevLength >= minMatchLength && s.length <= prevLength {
- // There was a match at the previous step, and the current match is
- // not better. Output the previous match.
-- d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
-- d.tokens.n++
-+ d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
-
- // Insert in the hash table all strings up to the end of the match.
- // index and index-1 are already inserted. If there is not enough
-@@ -684,10 +474,10 @@ func (d *compressor) deflateLazy() {
- s.length = minMatchLength - 1
- if d.tokens.n == maxFlateBlockTokens {
- // The block includes the current character
-- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
-+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
-- d.tokens.n = 0
-+ d.tokens.Reset()
- }
- } else {
- // Reset, if we got a match this run.
-@@ -697,13 +487,12 @@ func (d *compressor) deflateLazy() {
- // We have a byte waiting. Emit it.
- if d.byteAvailable {
- s.ii++
-- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
-- d.tokens.n++
-+ d.tokens.AddLiteral(d.window[s.index-1])
- if d.tokens.n == maxFlateBlockTokens {
-- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
-+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
-- d.tokens.n = 0
-+ d.tokens.Reset()
- }
- s.index++
-
-@@ -716,343 +505,24 @@ func (d *compressor) deflateLazy() {
- break
- }
-
-- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
-- d.tokens.n++
-+ d.tokens.AddLiteral(d.window[s.index-1])
- if d.tokens.n == maxFlateBlockTokens {
-- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
-+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
-- d.tokens.n = 0
-+ d.tokens.Reset()
- }
- s.index++
- }
- // Flush last byte
-- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
-- d.tokens.n++
-+ d.tokens.AddLiteral(d.window[s.index-1])
- d.byteAvailable = false
- // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
- if d.tokens.n == maxFlateBlockTokens {
-- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
-+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
-- d.tokens.n = 0
-- }
-- }
-- } else {
-- s.index++
-- d.byteAvailable = true
-- }
-- }
-- }
--}
--
--// Assumes that d.fastSkipHashing != skipNever,
--// otherwise use deflateLazySSE
--func (d *compressor) deflateSSE() {
-- s := d.state
-- // Sanity enables additional runtime tests.
-- // It's intended to be used during development
-- // to supplement the currently ad-hoc unit tests.
-- const sanity = false
--
-- if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
-- return
-- }
--
-- s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
-- if s.index < s.maxInsertIndex {
-- s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
-- }
--
-- for {
-- if sanity && s.index > d.windowEnd {
-- panic(""index > windowEnd"")
-- }
-- lookahead := d.windowEnd - s.index
-- if lookahead < minMatchLength+maxMatchLength {
-- if !d.sync {
-- return
-- }
-- if sanity && s.index > d.windowEnd {
-- panic(""index > windowEnd"")
-- }
-- if lookahead == 0 {
-- if d.tokens.n > 0 {
-- if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil {
-- return
-- }
-- d.tokens.n = 0
-- }
-- return
-- }
-- }
-- if s.index < s.maxInsertIndex {
-- // Update the hash
-- s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
-- ch := s.hashHead[s.hash]
-- s.chainHead = int(ch)
-- s.hashPrev[s.index&windowMask] = ch
-- s.hashHead[s.hash] = uint32(s.index + s.hashOffset)
-- }
-- s.length = minMatchLength - 1
-- s.offset = 0
-- minIndex := s.index - windowSize
-- if minIndex < 0 {
-- minIndex = 0
-- }
--
-- if s.chainHead-s.hashOffset >= minIndex && lookahead > minMatchLength-1 {
-- if newLength, newOffset, ok := d.findMatchSSE(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
-- s.length = newLength
-- s.offset = newOffset
-- }
-- }
-- if s.length >= minMatchLength {
-- s.ii = 0
-- // There was a match at the previous step, and the current match is
-- // not better. Output the previous match.
-- // ""s.length-3"" should NOT be ""s.length-minMatchLength"", since the format always assume 3
-- d.tokens.tokens[d.tokens.n] = matchToken(uint32(s.length-3), uint32(s.offset-minOffsetSize))
-- d.tokens.n++
-- // Insert in the hash table all strings up to the end of the match.
-- // index and index-1 are already inserted. If there is not enough
-- // lookahead, the last two strings are not inserted into the hash
-- // table.
-- if s.length <= d.fastSkipHashing {
-- var newIndex int
-- newIndex = s.index + s.length
-- // Calculate missing hashes
-- end := newIndex
-- if end > s.maxInsertIndex {
-- end = s.maxInsertIndex
-- }
-- end += minMatchLength - 1
-- startindex := s.index + 1
-- if startindex > s.maxInsertIndex {
-- startindex = s.maxInsertIndex
-- }
-- tocheck := d.window[startindex:end]
-- dstSize := len(tocheck) - minMatchLength + 1
-- if dstSize > 0 {
-- dst := s.hashMatch[:dstSize]
--
-- crc32sseAll(tocheck, dst)
-- var newH uint32
-- for i, val := range dst {
-- di := i + startindex
-- newH = val & hashMask
-- // Get previous value with the same hash.
-- // Our chain should point to the previous value.
-- s.hashPrev[di&windowMask] = s.hashHead[newH]
-- // Set the head of the hash chain to us.
-- s.hashHead[newH] = uint32(di + s.hashOffset)
-- }
-- s.hash = newH
-- }
-- s.index = newIndex
-- } else {
-- // For matches this long, we don't bother inserting each individual
-- // item into the table.
-- s.index += s.length
-- if s.index < s.maxInsertIndex {
-- s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
-- }
-- }
-- if d.tokens.n == maxFlateBlockTokens {
-- // The block includes the current character
-- if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil {
-- return
-- }
-- d.tokens.n = 0
-- }
-- } else {
-- s.ii++
-- end := s.index + int(s.ii>>5) + 1
-- if end > d.windowEnd {
-- end = d.windowEnd
-- }
-- for i := s.index; i < end; i++ {
-- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
-- d.tokens.n++
-- if d.tokens.n == maxFlateBlockTokens {
-- if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil {
-- return
-- }
-- d.tokens.n = 0
-- }
-- }
-- s.index = end
-- }
-- }
--}
--
--// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
--// meaning it always has lazy matching on.
--func (d *compressor) deflateLazySSE() {
-- s := d.state
-- // Sanity enables additional runtime tests.
-- // It's intended to be used during development
-- // to supplement the currently ad-hoc unit tests.
-- const sanity = false
--
-- if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
-- return
-- }
--
-- s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
-- if s.index < s.maxInsertIndex {
-- s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
-- }
--
-- for {
-- if sanity && s.index > d.windowEnd {
-- panic(""index > windowEnd"")
-- }
-- lookahead := d.windowEnd - s.index
-- if lookahead < minMatchLength+maxMatchLength {
-- if !d.sync {
-- return
-- }
-- if sanity && s.index > d.windowEnd {
-- panic(""index > windowEnd"")
-- }
-- if lookahead == 0 {
-- // Flush current output block if any.
-- if d.byteAvailable {
-- // There is still one pending token that needs to be flushed
-- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
-- d.tokens.n++
-- d.byteAvailable = false
-- }
-- if d.tokens.n > 0 {
-- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
-- return
-- }
-- d.tokens.n = 0
-- }
-- return
-- }
-- }
-- if s.index < s.maxInsertIndex {
-- // Update the hash
-- s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
-- ch := s.hashHead[s.hash]
-- s.chainHead = int(ch)
-- s.hashPrev[s.index&windowMask] = ch
-- s.hashHead[s.hash] = uint32(s.index + s.hashOffset)
-- }
-- prevLength := s.length
-- prevOffset := s.offset
-- s.length = minMatchLength - 1
-- s.offset = 0
-- minIndex := s.index - windowSize
-- if minIndex < 0 {
-- minIndex = 0
-- }
--
-- if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
-- if newLength, newOffset, ok := d.findMatchSSE(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
-- s.length = newLength
-- s.offset = newOffset
-- }
-- }
-- if prevLength >= minMatchLength && s.length <= prevLength {
-- // There was a match at the previous step, and the current match is
-- // not better. Output the previous match.
-- d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
-- d.tokens.n++
--
-- // Insert in the hash table all strings up to the end of the match.
-- // index and index-1 are already inserted. If there is not enough
-- // lookahead, the last two strings are not inserted into the hash
-- // table.
-- var newIndex int
-- newIndex = s.index + prevLength - 1
-- // Calculate missing hashes
-- end := newIndex
-- if end > s.maxInsertIndex {
-- end = s.maxInsertIndex
-- }
-- end += minMatchLength - 1
-- startindex := s.index + 1
-- if startindex > s.maxInsertIndex {
-- startindex = s.maxInsertIndex
-- }
-- tocheck := d.window[startindex:end]
-- dstSize := len(tocheck) - minMatchLength + 1
-- if dstSize > 0 {
-- dst := s.hashMatch[:dstSize]
-- crc32sseAll(tocheck, dst)
-- var newH uint32
-- for i, val := range dst {
-- di := i + startindex
-- newH = val & hashMask
-- // Get previous value with the same hash.
-- // Our chain should point to the previous value.
-- s.hashPrev[di&windowMask] = s.hashHead[newH]
-- // Set the head of the hash chain to us.
-- s.hashHead[newH] = uint32(di + s.hashOffset)
-- }
-- s.hash = newH
-- }
--
-- s.index = newIndex
-- d.byteAvailable = false
-- s.length = minMatchLength - 1
-- if d.tokens.n == maxFlateBlockTokens {
-- // The block includes the current character
-- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
-- return
-- }
-- d.tokens.n = 0
-- }
-- } else {
-- // Reset, if we got a match this run.
-- if s.length >= minMatchLength {
-- s.ii = 0
-- }
-- // We have a byte waiting. Emit it.
-- if d.byteAvailable {
-- s.ii++
-- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
-- d.tokens.n++
-- if d.tokens.n == maxFlateBlockTokens {
-- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
-- return
-- }
-- d.tokens.n = 0
-- }
-- s.index++
--
-- // If we have a long run of no matches, skip additional bytes
-- // Resets when s.ii overflows after 64KB.
-- if s.ii > 31 {
-- n := int(s.ii >> 6)
-- for j := 0; j < n; j++ {
-- if s.index >= d.windowEnd-1 {
-- break
-- }
--
-- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
-- d.tokens.n++
-- if d.tokens.n == maxFlateBlockTokens {
-- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
-- return
-- }
-- d.tokens.n = 0
-- }
-- s.index++
-- }
-- // Flush last byte
-- d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
-- d.tokens.n++
-- d.byteAvailable = false
-- // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
-- if d.tokens.n == maxFlateBlockTokens {
-- if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
-- return
-- }
-- d.tokens.n = 0
-+ d.tokens.Reset()
- }
- }
- } else {
-@@ -1085,17 +555,17 @@ func (d *compressor) storeHuff() {
- if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
- return
- }
-- d.w.writeBlockHuff(false, d.window[:d.windowEnd])
-+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
- d.err = d.w.err
- d.windowEnd = 0
- }
-
--// storeHuff will compress and store the currently added data,
-+// storeFast will compress and store the currently added data,
- // if enough has been accumulated or we at the end of the stream.
- // Any error that occurred will be in d.err
--func (d *compressor) storeSnappy() {
-+func (d *compressor) storeFast() {
- // We only compress if we have maxStoreBlockSize.
-- if d.windowEnd < maxStoreBlockSize {
-+ if d.windowEnd < len(d.window) {
- if !d.sync {
- return
- }
-@@ -1106,32 +576,30 @@ func (d *compressor) storeSnappy() {
- }
- if d.windowEnd <= 32 {
- d.err = d.writeStoredBlock(d.window[:d.windowEnd])
-- d.tokens.n = 0
-- d.windowEnd = 0
- } else {
-- d.w.writeBlockHuff(false, d.window[:d.windowEnd])
-+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], true)
- d.err = d.w.err
- }
-- d.tokens.n = 0
-+ d.tokens.Reset()
- d.windowEnd = 0
-- d.snap.Reset()
-+ d.fast.Reset()
- return
- }
- }
-
-- d.snap.Encode(&d.tokens, d.window[:d.windowEnd])
-+ d.fast.Encode(&d.tokens, d.window[:d.windowEnd])
- // If we made zero matches, store the block as is.
-- if int(d.tokens.n) == d.windowEnd {
-+ if d.tokens.n == 0 {
- d.err = d.writeStoredBlock(d.window[:d.windowEnd])
- // If we removed less than 1/16th, huffman compress the block.
- } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) {
-- d.w.writeBlockHuff(false, d.window[:d.windowEnd])
-+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
- d.err = d.w.err
- } else {
-- d.w.writeBlockDynamic(d.tokens.tokens[:d.tokens.n], false, d.window[:d.windowEnd])
-+ d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync)
- d.err = d.w.err
- }
-- d.tokens.n = 0
-+ d.tokens.Reset()
- d.windowEnd = 0
- }
-
-@@ -1176,36 +644,26 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
- d.fill = (*compressor).fillBlock
- d.step = (*compressor).store
- case level == ConstantCompression:
-+ d.w.logReusePenalty = uint(4)
- d.window = make([]byte, maxStoreBlockSize)
- d.fill = (*compressor).fillBlock
- d.step = (*compressor).storeHuff
-- case level >= 1 && level <= 4:
-- d.snap = newFastEnc(level)
-- d.window = make([]byte, maxStoreBlockSize)
-- d.fill = (*compressor).fillBlock
-- d.step = (*compressor).storeSnappy
- case level == DefaultCompression:
- level = 5
- fallthrough
-- case 5 <= level && level <= 9:
-+ case level >= 1 && level <= 6:
-+ d.w.logReusePenalty = uint(level + 1)
-+ d.fast = newFastEnc(level)
-+ d.window = make([]byte, maxStoreBlockSize)
-+ d.fill = (*compressor).fillBlock
-+ d.step = (*compressor).storeFast
-+ case 7 <= level && level <= 9:
-+ d.w.logReusePenalty = uint(level)
- d.state = &advancedState{}
- d.compressionLevel = levels[level]
- d.initDeflate()
- d.fill = (*compressor).fillDeflate
-- if d.fastSkipHashing == skipNever {
-- if useSSE42 {
-- d.step = (*compressor).deflateLazySSE
-- } else {
-- d.step = (*compressor).deflateLazy
-- }
-- } else {
-- if useSSE42 {
-- d.step = (*compressor).deflateSSE
-- } else {
-- d.step = (*compressor).deflate
--
-- }
-- }
-+ d.step = (*compressor).deflateLazy
- default:
- return fmt.Errorf(""flate: invalid compression level %d: want value in range [-2, 9]"", level)
- }
-@@ -1218,10 +676,10 @@ func (d *compressor) reset(w io.Writer) {
- d.sync = false
- d.err = nil
- // We only need to reset a few things for Snappy.
-- if d.snap != nil {
-- d.snap.Reset()
-+ if d.fast != nil {
-+ d.fast.Reset()
- d.windowEnd = 0
-- d.tokens.n = 0
-+ d.tokens.Reset()
- return
- }
- switch d.compressionLevel.chain {
-@@ -1240,7 +698,7 @@ func (d *compressor) reset(w io.Writer) {
- s.hashOffset = 1
- s.index, d.windowEnd = 0, 0
- d.blockStart, d.byteAvailable = 0, false
-- d.tokens.n = 0
-+ d.tokens.Reset()
- s.length = minMatchLength - 1
- s.offset = 0
- s.hash = 0
-diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
-new file mode 100644
-index 0000000000000..b0a470f92e0eb
---- /dev/null
-+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
-@@ -0,0 +1,257 @@
-+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
-+// Modified for deflate by Klaus Post (c) 2015.
-+// Use of this source code is governed by a BSD-style
-+// license that can be found in the LICENSE file.
-+
-+package flate
-+
-+import (
-+ ""fmt""
-+ ""math/bits""
-+)
-+
-+type fastEnc interface {
-+ Encode(dst *tokens, src []byte)
-+ Reset()
-+}
-+
-+func newFastEnc(level int) fastEnc {
-+ switch level {
-+ case 1:
-+ return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
-+ case 2:
-+ return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
-+ case 3:
-+ return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
-+ case 4:
-+ return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
-+ case 5:
-+ return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
-+ case 6:
-+ return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
-+ default:
-+ panic(""invalid level specified"")
-+ }
-+}
-+
-+const (
-+ tableBits = 16 // Bits used in the table
-+ tableSize = 1 << tableBits // Size of the table
-+ tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
-+ baseMatchOffset = 1 // The smallest match offset
-+ baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
-+ maxMatchOffset = 1 << 15 // The largest match offset
-+
-+ bTableBits = 18 // Bits used in the big tables
-+ bTableSize = 1 << bTableBits // Size of the table
-+ allocHistory = maxMatchOffset * 10 // Size to preallocate for history.
-+ bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize // Reset the buffer offset when reaching this.
-+)
-+
-+const (
-+ prime3bytes = 506832829
-+ prime4bytes = 2654435761
-+ prime5bytes = 889523592379
-+ prime6bytes = 227718039650203
-+ prime7bytes = 58295818150454627
-+ prime8bytes = 0xcf1bbcdcb7a56463
-+)
-+
-+func load32(b []byte, i int) uint32 {
-+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
-+ b = b[i:]
-+ b = b[:4]
-+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-+}
-+
-+func load64(b []byte, i int) uint64 {
-+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
-+ b = b[i:]
-+ b = b[:8]
-+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
-+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-+}
-+
-+func load3232(b []byte, i int32) uint32 {
-+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
-+ b = b[i:]
-+ b = b[:4]
-+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-+}
-+
-+func load6432(b []byte, i int32) uint64 {
-+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
-+ b = b[i:]
-+ b = b[:8]
-+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
-+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-+}
-+
-+func hash(u uint32) uint32 {
-+ return (u * 0x1e35a7bd) >> tableShift
-+}
-+
-+type tableEntry struct {
-+ val uint32
-+ offset int32
-+}
-+
-+// fastGen maintains the table for matches,
-+// and the previous byte block for level 2.
-+// This is the generic implementation.
-+type fastGen struct {
-+ hist []byte
-+ cur int32
-+}
-+
-+func (e *fastGen) addBlock(src []byte) int32 {
-+ // check if we have space already
-+ if len(e.hist)+len(src) > cap(e.hist) {
-+ if cap(e.hist) == 0 {
-+ e.hist = make([]byte, 0, allocHistory)
-+ } else {
-+ if cap(e.hist) < maxMatchOffset*2 {
-+ panic(""unexpected buffer size"")
-+ }
-+ // Move down
-+ offset := int32(len(e.hist)) - maxMatchOffset
-+ copy(e.hist[0:maxMatchOffset], e.hist[offset:])
-+ e.cur += offset
-+ e.hist = e.hist[:maxMatchOffset]
-+ }
-+ }
-+ s := int32(len(e.hist))
-+ e.hist = append(e.hist, src...)
-+ return s
-+}
-+
-+// hash4 returns the hash of u to fit in a hash table with h bits.
-+// Preferably h should be a constant and should always be <32.
-+func hash4u(u uint32, h uint8) uint32 {
-+ return (u * prime4bytes) >> ((32 - h) & 31)
-+}
-+
-+type tableEntryPrev struct {
-+ Cur tableEntry
-+ Prev tableEntry
-+}
-+
-+// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
-+// Preferably h should be a constant and should always be <32.
-+func hash4x64(u uint64, h uint8) uint32 {
-+ return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
-+}
-+
-+// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
-+// Preferably h should be a constant and should always be <64.
-+func hash7(u uint64, h uint8) uint32 {
-+ return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
-+}
-+
-+// hash8 returns the hash of u to fit in a hash table with h bits.
-+// Preferably h should be a constant and should always be <64.
-+func hash8(u uint64, h uint8) uint32 {
-+ return uint32((u * prime8bytes) >> ((64 - h) & 63))
-+}
-+
-+// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
-+// Preferably h should be a constant and should always be <64.
-+func hash6(u uint64, h uint8) uint32 {
-+ return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
-+}
-+
-+// matchlen will return the match length between offsets and t in src.
-+// The maximum length returned is maxMatchLength - 4.
-+// It is assumed that s > t, that t >=0 and s < len(src).
-+func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
-+ if debugDecode {
-+ if t >= s {
-+ panic(fmt.Sprint(""t >=s:"", t, s))
-+ }
-+ if int(s) >= len(src) {
-+ panic(fmt.Sprint(""s >= len(src):"", s, len(src)))
-+ }
-+ if t < 0 {
-+ panic(fmt.Sprint(""t < 0:"", t))
-+ }
-+ if s-t > maxMatchOffset {
-+ panic(fmt.Sprint(s, ""-"", t, ""("", s-t, "") > maxMatchLength ("", maxMatchOffset, "")""))
-+ }
-+ }
-+ s1 := int(s) + maxMatchLength - 4
-+ if s1 > len(src) {
-+ s1 = len(src)
-+ }
-+
-+ // Extend the match to be as long as possible.
-+ return int32(matchLen(src[s:s1], src[t:]))
-+}
-+
-+// matchlenLong will return the match length between offsets and t in src.
-+// It is assumed that s > t, that t >=0 and s < len(src).
-+func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
-+ if debugDecode {
-+ if t >= s {
-+ panic(fmt.Sprint(""t >=s:"", t, s))
-+ }
-+ if int(s) >= len(src) {
-+ panic(fmt.Sprint(""s >= len(src):"", s, len(src)))
-+ }
-+ if t < 0 {
-+ panic(fmt.Sprint(""t < 0:"", t))
-+ }
-+ if s-t > maxMatchOffset {
-+ panic(fmt.Sprint(s, ""-"", t, ""("", s-t, "") > maxMatchLength ("", maxMatchOffset, "")""))
-+ }
-+ }
-+ // Extend the match to be as long as possible.
-+ return int32(matchLen(src[s:], src[t:]))
-+}
-+
-+// Reset the encoding table.
-+func (e *fastGen) Reset() {
-+ if cap(e.hist) < int(maxMatchOffset*8) {
-+ l := maxMatchOffset * 8
-+ // Make it at least 1MB.
-+ if l < 1<<20 {
-+ l = 1 << 20
-+ }
-+ e.hist = make([]byte, 0, l)
-+ }
-+ // We offset current position so everything will be out of reach
-+ e.cur += maxMatchOffset + int32(len(e.hist))
-+ e.hist = e.hist[:0]
-+}
-+
-+// matchLen returns the maximum length.
-+// 'a' must be the shortest of the two.
-+func matchLen(a, b []byte) int {
-+ b = b[:len(a)]
-+ var checked int
-+ if len(a) > 4 {
-+ // Try 4 bytes first
-+ if diff := load32(a, 0) ^ load32(b, 0); diff != 0 {
-+ return bits.TrailingZeros32(diff) >> 3
-+ }
-+ // Switch to 8 byte matching.
-+ checked = 4
-+ a = a[4:]
-+ b = b[4:]
-+ for len(a) >= 8 {
-+ b = b[:len(a)]
-+ if diff := load64(a, 0) ^ load64(b, 0); diff != 0 {
-+ return checked + (bits.TrailingZeros64(diff) >> 3)
-+ }
-+ checked += 8
-+ a = a[8:]
-+ b = b[8:]
-+ }
-+ }
-+ b = b[:len(a)]
-+ for i := range a {
-+ if a[i] != b[i] {
-+ return int(i) + checked
-+ }
-+ }
-+ return len(a) + checked
-+}
-diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
-index f46c654189fc6..dd74ffb87232b 100644
---- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
-+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
-@@ -85,26 +85,48 @@ type huffmanBitWriter struct {
- // Data waiting to be written is bytes[0:nbytes]
- // and then the low nbits of bits.
- bits uint64
-- nbits uint
-- bytes [256]byte
-- codegenFreq [codegenCodeCount]int32
-+ nbits uint16
- nbytes uint8
-- literalFreq []int32
-- offsetFreq []int32
-- codegen []uint8
- literalEncoding *huffmanEncoder
- offsetEncoding *huffmanEncoder
- codegenEncoding *huffmanEncoder
- err error
-+ lastHeader int
-+ // Set between 0 (reused block can be up to 2x the size)
-+ logReusePenalty uint
-+ lastHuffMan bool
-+ bytes [256]byte
-+ literalFreq [lengthCodesStart + 32]uint16
-+ offsetFreq [32]uint16
-+ codegenFreq [codegenCodeCount]uint16
-+
-+ // codegen must have an extra space for the final symbol.
-+ codegen [literalCount + offsetCodeCount + 1]uint8
- }
-
-+// Huffman reuse.
-+//
-+// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections.
-+//
-+// This is controlled by several variables:
-+//
-+// If lastHeader is non-zero the Huffman table can be reused.
-+// This also indicates that a Huffman table has been generated that can output all
-+// possible symbols.
-+// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated
-+// an EOB with the previous table must be written.
-+//
-+// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid.
-+//
-+// An incoming block estimates the output size of a new table using a 'fresh' by calculating the
-+// optimal size and adding a penalty in 'logReusePenalty'.
-+// A Huffman table is not optimal, which is why we add a penalty, and generating a new table
-+// is slower both for compression and decompression.
-+
- func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
- return &huffmanBitWriter{
- writer: w,
-- literalFreq: make([]int32, lengthCodesStart+32),
-- offsetFreq: make([]int32, 32),
-- codegen: make([]uint8, maxNumLit+offsetCodeCount+1),
-- literalEncoding: newHuffmanEncoder(maxNumLit),
-+ literalEncoding: newHuffmanEncoder(literalCount),
- codegenEncoding: newHuffmanEncoder(codegenCodeCount),
- offsetEncoding: newHuffmanEncoder(offsetCodeCount),
- }
-@@ -113,7 +135,41 @@ func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
- func (w *huffmanBitWriter) reset(writer io.Writer) {
- w.writer = writer
- w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
-- w.bytes = [256]byte{}
-+ w.lastHeader = 0
-+ w.lastHuffMan = false
-+}
-+
-+func (w *huffmanBitWriter) canReuse(t *tokens) (offsets, lits bool) {
-+ offsets, lits = true, true
-+ a := t.offHist[:offsetCodeCount]
-+ b := w.offsetFreq[:len(a)]
-+ for i := range a {
-+ if b[i] == 0 && a[i] != 0 {
-+ offsets = false
-+ break
-+ }
-+ }
-+
-+ a = t.extraHist[:literalCount-256]
-+ b = w.literalFreq[256:literalCount]
-+ b = b[:len(a)]
-+ for i := range a {
-+ if b[i] == 0 && a[i] != 0 {
-+ lits = false
-+ break
-+ }
-+ }
-+ if lits {
-+ a = t.litHist[:]
-+ b = w.literalFreq[:len(a)]
-+ for i := range a {
-+ if b[i] == 0 && a[i] != 0 {
-+ lits = false
-+ break
-+ }
-+ }
-+ }
-+ return
- }
-
- func (w *huffmanBitWriter) flush() {
-@@ -144,30 +200,11 @@ func (w *huffmanBitWriter) write(b []byte) {
- _, w.err = w.writer.Write(b)
- }
-
--func (w *huffmanBitWriter) writeBits(b int32, nb uint) {
-- w.bits |= uint64(b) << w.nbits
-+func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
-+ w.bits |= uint64(b) << (w.nbits & 63)
- w.nbits += nb
- if w.nbits >= 48 {
-- bits := w.bits
-- w.bits >>= 48
-- w.nbits -= 48
-- n := w.nbytes
-- w.bytes[n] = byte(bits)
-- w.bytes[n+1] = byte(bits >> 8)
-- w.bytes[n+2] = byte(bits >> 16)
-- w.bytes[n+3] = byte(bits >> 24)
-- w.bytes[n+4] = byte(bits >> 32)
-- w.bytes[n+5] = byte(bits >> 40)
-- n += 6
-- if n >= bufferFlushSize {
-- if w.err != nil {
-- n = 0
-- return
-- }
-- w.write(w.bytes[:n])
-- n = 0
-- }
-- w.nbytes = n
-+ w.writeOutBits()
- }
- }
-
-@@ -213,7 +250,7 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE
- // a copy of the frequencies, and as the place where we put the result.
- // This is fine because the output is always shorter than the input used
- // so far.
-- codegen := w.codegen // cache
-+ codegen := w.codegen[:] // cache
- // Copy the concatenated code sizes to codegen. Put a marker at the end.
- cgnl := codegen[:numLiterals]
- for i := range cgnl {
-@@ -292,30 +329,54 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE
- codegen[outIndex] = badCode
- }
-
--// dynamicSize returns the size of dynamically encoded data in bits.
--func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
-+func (w *huffmanBitWriter) codegens() int {
-+ numCodegens := len(w.codegenFreq)
-+ for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
-+ numCodegens--
-+ }
-+ return numCodegens
-+}
-+
-+func (w *huffmanBitWriter) headerSize() (size, numCodegens int) {
- numCodegens = len(w.codegenFreq)
- for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
- numCodegens--
- }
-- header := 3 + 5 + 5 + 4 + (3 * numCodegens) +
-+ return 3 + 5 + 5 + 4 + (3 * numCodegens) +
- w.codegenEncoding.bitLength(w.codegenFreq[:]) +
- int(w.codegenFreq[16])*2 +
- int(w.codegenFreq[17])*3 +
-- int(w.codegenFreq[18])*7
-+ int(w.codegenFreq[18])*7, numCodegens
-+}
-+
-+// dynamicSize returns the size of dynamically encoded data in bits.
-+func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
-+ header, numCodegens := w.headerSize()
- size = header +
-- litEnc.bitLength(w.literalFreq) +
-- offEnc.bitLength(w.offsetFreq) +
-+ litEnc.bitLength(w.literalFreq[:]) +
-+ offEnc.bitLength(w.offsetFreq[:]) +
- extraBits
--
- return size, numCodegens
- }
-
-+// extraBitSize will return the number of bits that will be written
-+// as ""extra"" bits on matches.
-+func (w *huffmanBitWriter) extraBitSize() int {
-+ total := 0
-+ for i, n := range w.literalFreq[257:literalCount] {
-+ total += int(n) * int(lengthExtraBits[i&31])
-+ }
-+ for i, n := range w.offsetFreq[:offsetCodeCount] {
-+ total += int(n) * int(offsetExtraBits[i&31])
-+ }
-+ return total
-+}
-+
- // fixedSize returns the size of dynamically encoded data in bits.
- func (w *huffmanBitWriter) fixedSize(extraBits int) int {
- return 3 +
-- fixedLiteralEncoding.bitLength(w.literalFreq) +
-- fixedOffsetEncoding.bitLength(w.offsetFreq) +
-+ fixedLiteralEncoding.bitLength(w.literalFreq[:]) +
-+ fixedOffsetEncoding.bitLength(w.offsetFreq[:]) +
- extraBits
- }
-
-@@ -333,30 +394,36 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
- }
-
- func (w *huffmanBitWriter) writeCode(c hcode) {
-+ // The function does not get inlined if we ""& 63"" the shift.
- w.bits |= uint64(c.code) << w.nbits
-- w.nbits += uint(c.len)
-+ w.nbits += c.len
- if w.nbits >= 48 {
-- bits := w.bits
-- w.bits >>= 48
-- w.nbits -= 48
-- n := w.nbytes
-- w.bytes[n] = byte(bits)
-- w.bytes[n+1] = byte(bits >> 8)
-- w.bytes[n+2] = byte(bits >> 16)
-- w.bytes[n+3] = byte(bits >> 24)
-- w.bytes[n+4] = byte(bits >> 32)
-- w.bytes[n+5] = byte(bits >> 40)
-- n += 6
-- if n >= bufferFlushSize {
-- if w.err != nil {
-- n = 0
-- return
-- }
-- w.write(w.bytes[:n])
-+ w.writeOutBits()
-+ }
-+}
-+
-+// writeOutBits will write bits to the buffer.
-+func (w *huffmanBitWriter) writeOutBits() {
-+ bits := w.bits
-+ w.bits >>= 48
-+ w.nbits -= 48
-+ n := w.nbytes
-+ w.bytes[n] = byte(bits)
-+ w.bytes[n+1] = byte(bits >> 8)
-+ w.bytes[n+2] = byte(bits >> 16)
-+ w.bytes[n+3] = byte(bits >> 24)
-+ w.bytes[n+4] = byte(bits >> 32)
-+ w.bytes[n+5] = byte(bits >> 40)
-+ n += 6
-+ if n >= bufferFlushSize {
-+ if w.err != nil {
- n = 0
-+ return
- }
-- w.nbytes = n
-+ w.write(w.bytes[:n])
-+ n = 0
- }
-+ w.nbytes = n
- }
-
- // Write the header of a dynamic Huffman block to the output stream.
-@@ -395,15 +462,12 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n
- case 16:
- w.writeBits(int32(w.codegen[i]), 2)
- i++
-- break
- case 17:
- w.writeBits(int32(w.codegen[i]), 3)
- i++
-- break
- case 18:
- w.writeBits(int32(w.codegen[i]), 7)
- i++
-- break
- }
- }
- }
-@@ -412,6 +476,11 @@ func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
- if w.err != nil {
- return
- }
-+ if w.lastHeader > 0 {
-+ // We owe an EOB
-+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
-+ w.lastHeader = 0
-+ }
- var flag int32
- if isEof {
- flag = 1
-@@ -426,6 +495,12 @@ func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
- if w.err != nil {
- return
- }
-+ if w.lastHeader > 0 {
-+ // We owe an EOB
-+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
-+ w.lastHeader = 0
-+ }
-+
- // Indicate that we are a fixed Huffman block
- var value int32 = 2
- if isEof {
-@@ -439,29 +514,23 @@ func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
- // is larger than the original bytes, the data will be written as a
- // stored block.
- // If the input is nil, the tokens will always be Huffman encoded.
--func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
-+func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
- if w.err != nil {
- return
- }
-
-- tokens = append(tokens, endBlockMarker)
-- numLiterals, numOffsets := w.indexTokens(tokens)
--
-+ tokens.AddEOB()
-+ if w.lastHeader > 0 {
-+ // We owe an EOB
-+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
-+ w.lastHeader = 0
-+ }
-+ numLiterals, numOffsets := w.indexTokens(tokens, false)
-+ w.generate(tokens)
- var extraBits int
- storedSize, storable := w.storedSize(input)
- if storable {
-- // We only bother calculating the costs of the extra bits required by
-- // the length of offset fields (which will be the same for both fixed
-- // and dynamic encoding), if we need to compare those two encodings
-- // against stored encoding.
-- for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ {
-- // First eight length codes have extra size = 0.
-- extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart])
-- }
-- for offsetCode := 4; offsetCode < numOffsets; offsetCode++ {
-- // First four offset codes have extra size = 0.
-- extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode&63])
-- }
-+ extraBits = w.extraBitSize()
- }
-
- // Figure out smallest code.
-@@ -500,7 +569,7 @@ func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
- }
-
- // Write the tokens.
-- w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes)
-+ w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes)
- }
-
- // writeBlockDynamic encodes a block using a dynamic Huffman table.
-@@ -508,72 +577,103 @@ func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
- // histogram distribution.
- // If input is supplied and the compression savings are below 1/16th of the
- // input size the block is stored.
--func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) {
-+func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) {
- if w.err != nil {
- return
- }
-
-- tokens = append(tokens, endBlockMarker)
-- numLiterals, numOffsets := w.indexTokens(tokens)
-+ sync = sync || eof
-+ if sync {
-+ tokens.AddEOB()
-+ }
-
-- // Generate codegen and codegenFrequencies, which indicates how to encode
-- // the literalEncoding and the offsetEncoding.
-- w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
-- w.codegenEncoding.generate(w.codegenFreq[:], 7)
-- size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0)
-+ // We cannot reuse pure huffman table.
-+ if w.lastHuffMan && w.lastHeader > 0 {
-+ // We will not try to reuse.
-+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
-+ w.lastHeader = 0
-+ w.lastHuffMan = false
-+ }
-+ if !sync {
-+ tokens.Fill()
-+ }
-+ numLiterals, numOffsets := w.indexTokens(tokens, !sync)
-
-- // Store bytes, if we don't get a reasonable improvement.
-- if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
-- w.writeStoredHeader(len(input), eof)
-- w.writeBytes(input)
-- return
-+ var size int
-+ // Check if we should reuse.
-+ if w.lastHeader > 0 {
-+ // Estimate size for using a new table
-+ newSize := w.lastHeader + tokens.EstimatedBits()
-+
-+ // The estimated size is calculated as an optimal table.
-+ // We add a penalty to make it more realistic and re-use a bit more.
-+ newSize += newSize >> (w.logReusePenalty & 31)
-+ extra := w.extraBitSize()
-+ reuseSize, _ := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extra)
-+
-+ // Check if a new table is better.
-+ if newSize < reuseSize {
-+ // Write the EOB we owe.
-+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
-+ size = newSize
-+ w.lastHeader = 0
-+ } else {
-+ size = reuseSize
-+ }
-+ // Check if we get a reasonable size decrease.
-+ if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
-+ w.writeStoredHeader(len(input), eof)
-+ w.writeBytes(input)
-+ w.lastHeader = 0
-+ return
-+ }
- }
-
-- // Write Huffman table.
-- w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
-+ // We want a new block/table
-+ if w.lastHeader == 0 {
-+ w.generate(tokens)
-+ // Generate codegen and codegenFrequencies, which indicates how to encode
-+ // the literalEncoding and the offsetEncoding.
-+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
-+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
-+ var numCodegens int
-+ size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, w.extraBitSize())
-+ // Store bytes, if we don't get a reasonable improvement.
-+ if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
-+ w.writeStoredHeader(len(input), eof)
-+ w.writeBytes(input)
-+ w.lastHeader = 0
-+ return
-+ }
-+
-+ // Write Huffman table.
-+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
-+ w.lastHeader, _ = w.headerSize()
-+ w.lastHuffMan = false
-+ }
-
-+ if sync {
-+ w.lastHeader = 0
-+ }
- // Write the tokens.
-- w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes)
-+ w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
- }
-
- // indexTokens indexes a slice of tokens, and updates
- // literalFreq and offsetFreq, and generates literalEncoding
- // and offsetEncoding.
- // The number of literal and offset tokens is returned.
--func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) {
-- for i := range w.literalFreq {
-- w.literalFreq[i] = 0
-- }
-- for i := range w.offsetFreq {
-- w.offsetFreq[i] = 0
-- }
-+func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
-+ copy(w.literalFreq[:], t.litHist[:])
-+ copy(w.literalFreq[256:], t.extraHist[:])
-+ copy(w.offsetFreq[:], t.offHist[:offsetCodeCount])
-
-- if len(tokens) == 0 {
-+ if t.n == 0 {
- return
- }
--
-- // Only last token should be endBlockMarker.
-- if tokens[len(tokens)-1] == endBlockMarker {
-- w.literalFreq[endBlockMarker]++
-- tokens = tokens[:len(tokens)-1]
-+ if filled {
-+ return maxNumLit, maxNumDist
- }
--
-- // Create slices up to the next power of two to avoid bounds checks.
-- lits := w.literalFreq[:256]
-- offs := w.offsetFreq[:32]
-- lengths := w.literalFreq[lengthCodesStart:]
-- lengths = lengths[:32]
-- for _, t := range tokens {
-- if t < endBlockMarker {
-- lits[t.literal()]++
-- continue
-- }
-- length := t.length()
-- offset := t.offset()
-- lengths[lengthCode(length)&31]++
-- offs[offsetCode(offset)&31]++
-- }
--
- // get the number of literals
- numLiterals = len(w.literalFreq)
- for w.literalFreq[numLiterals-1] == 0 {
-@@ -590,11 +690,14 @@ func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets
- w.offsetFreq[0] = 1
- numOffsets = 1
- }
-- w.literalEncoding.generate(w.literalFreq[:maxNumLit], 15)
-- w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
- return
- }
-
-+func (w *huffmanBitWriter) generate(t *tokens) {
-+ w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
-+ w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
-+}
-+
- // writeTokens writes a slice of tokens to the output.
- // codes for literal and offset encoding must be supplied.
- func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
-@@ -626,8 +729,19 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
- // Write the length
- length := t.length()
- lengthCode := lengthCode(length)
-- w.writeCode(lengths[lengthCode&31])
-- extraLengthBits := uint(lengthExtraBits[lengthCode&31])
-+ if false {
-+ w.writeCode(lengths[lengthCode&31])
-+ } else {
-+ // inlined
-+ c := lengths[lengthCode&31]
-+ w.bits |= uint64(c.code) << (w.nbits & 63)
-+ w.nbits += c.len
-+ if w.nbits >= 48 {
-+ w.writeOutBits()
-+ }
-+ }
-+
-+ extraLengthBits := uint16(lengthExtraBits[lengthCode&31])
- if extraLengthBits > 0 {
- extraLength := int32(length - lengthBase[lengthCode&31])
- w.writeBits(extraLength, extraLengthBits)
-@@ -635,8 +749,18 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
- // Write the offset
- offset := t.offset()
- offsetCode := offsetCode(offset)
-- w.writeCode(offs[offsetCode&31])
-- extraOffsetBits := uint(offsetExtraBits[offsetCode&63])
-+ if false {
-+ w.writeCode(offs[offsetCode&31])
-+ } else {
-+ // inlined
-+ c := offs[offsetCode&31]
-+ w.bits |= uint64(c.code) << (w.nbits & 63)
-+ w.nbits += c.len
-+ if w.nbits >= 48 {
-+ w.writeOutBits()
-+ }
-+ }
-+ extraOffsetBits := uint16(offsetExtraBits[offsetCode&63])
- if extraOffsetBits > 0 {
- extraOffset := int32(offset - offsetBase[offsetCode&63])
- w.writeBits(extraOffset, extraOffsetBits)
-@@ -661,75 +785,93 @@ func init() {
- // writeBlockHuff encodes a block of bytes as either
- // Huffman encoded literals or uncompressed bytes if the
- // results only gains very little from compression.
--func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) {
-+func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
- if w.err != nil {
- return
- }
-
- // Clear histogram
-- for i := range w.literalFreq {
-+ for i := range w.literalFreq[:] {
- w.literalFreq[i] = 0
- }
-+ if !w.lastHuffMan {
-+ for i := range w.offsetFreq[:] {
-+ w.offsetFreq[i] = 0
-+ }
-+ }
-
- // Add everything as literals
-- histogram(input, w.literalFreq)
-+ estBits := histogramSize(input, w.literalFreq[:], !eof && !sync) + 15
-
-- w.literalFreq[endBlockMarker] = 1
-+ // Store bytes, if we don't get a reasonable improvement.
-+ ssize, storable := w.storedSize(input)
-+ if storable && ssize < (estBits+estBits>>4) {
-+ w.writeStoredHeader(len(input), eof)
-+ w.writeBytes(input)
-+ return
-+ }
-
-- const numLiterals = endBlockMarker + 1
-- const numOffsets = 1
-+ if w.lastHeader > 0 {
-+ size, _ := w.dynamicSize(w.literalEncoding, huffOffset, w.lastHeader)
-+ estBits += estBits >> (w.logReusePenalty)
-
-- w.literalEncoding.generate(w.literalFreq[:maxNumLit], 15)
-+ if estBits < size {
-+ // We owe an EOB
-+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
-+ w.lastHeader = 0
-+ }
-+ }
-
-- // Figure out smallest code.
-- // Always use dynamic Huffman or Store
-- var numCodegens int
-+ const numLiterals = endBlockMarker + 1
-+ const numOffsets = 1
-+ if w.lastHeader == 0 {
-+ w.literalFreq[endBlockMarker] = 1
-+ w.literalEncoding.generate(w.literalFreq[:numLiterals], 15)
-
-- // Generate codegen and codegenFrequencies, which indicates how to encode
-- // the literalEncoding and the offsetEncoding.
-- w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
-- w.codegenEncoding.generate(w.codegenFreq[:], 7)
-- size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0)
-+ // Generate codegen and codegenFrequencies, which indicates how to encode
-+ // the literalEncoding and the offsetEncoding.
-+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
-+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
-+ numCodegens := w.codegens()
-
-- // Store bytes, if we don't get a reasonable improvement.
-- if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
-- w.writeStoredHeader(len(input), eof)
-- w.writeBytes(input)
-- return
-+ // Huffman.
-+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
-+ w.lastHuffMan = true
-+ w.lastHeader, _ = w.headerSize()
- }
-
-- // Huffman.
-- w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
- encoding := w.literalEncoding.codes[:257]
-- n := w.nbytes
- for _, t := range input {
- // Bitwriting inlined, ~30% speedup
- c := encoding[t]
-- w.bits |= uint64(c.code) << w.nbits
-- w.nbits += uint(c.len)
-- if w.nbits < 48 {
-- continue
-- }
-- // Store 6 bytes
-- bits := w.bits
-- w.bits >>= 48
-- w.nbits -= 48
-- w.bytes[n] = byte(bits)
-- w.bytes[n+1] = byte(bits >> 8)
-- w.bytes[n+2] = byte(bits >> 16)
-- w.bytes[n+3] = byte(bits >> 24)
-- w.bytes[n+4] = byte(bits >> 32)
-- w.bytes[n+5] = byte(bits >> 40)
-- n += 6
-- if n < bufferFlushSize {
-- continue
-- }
-- w.write(w.bytes[:n])
-- if w.err != nil {
-- return // Return early in the event of write failures
-+ w.bits |= uint64(c.code) << ((w.nbits) & 63)
-+ w.nbits += c.len
-+ if w.nbits >= 48 {
-+ bits := w.bits
-+ w.bits >>= 48
-+ w.nbits -= 48
-+ n := w.nbytes
-+ w.bytes[n] = byte(bits)
-+ w.bytes[n+1] = byte(bits >> 8)
-+ w.bytes[n+2] = byte(bits >> 16)
-+ w.bytes[n+3] = byte(bits >> 24)
-+ w.bytes[n+4] = byte(bits >> 32)
-+ w.bytes[n+5] = byte(bits >> 40)
-+ n += 6
-+ if n >= bufferFlushSize {
-+ if w.err != nil {
-+ n = 0
-+ return
-+ }
-+ w.write(w.bytes[:n])
-+ n = 0
-+ }
-+ w.nbytes = n
- }
-- n = 0
- }
-- w.nbytes = n
-- w.writeCode(encoding[endBlockMarker])
-+ if eof || sync {
-+ w.writeCode(encoding[endBlockMarker])
-+ w.lastHeader = 0
-+ w.lastHuffMan = false
-+ }
- }
-diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go
-index f65f793361480..1810c6898d0b6 100644
---- a/vendor/github.com/klauspost/compress/flate/huffman_code.go
-+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go
-@@ -10,6 +10,12 @@ import (
- ""sort""
- )
-
-+const (
-+ maxBitsLimit = 16
-+ // number of valid literals
-+ literalCount = 286
-+)
-+
- // hcode is a huffman code with a bit code and bit length.
- type hcode struct {
- code, len uint16
-@@ -25,7 +31,7 @@ type huffmanEncoder struct {
-
- type literalNode struct {
- literal uint16
-- freq int32
-+ freq uint16
- }
-
- // A levelInfo describes the state of the constructed tree for a given depth.
-@@ -54,7 +60,11 @@ func (h *hcode) set(code uint16, length uint16) {
- h.code = code
- }
-
--func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} }
-+func reverseBits(number uint16, bitLength byte) uint16 {
-+ return bits.Reverse16(number << ((16 - bitLength) & 15))
-+}
-+
-+func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} }
-
- func newHuffmanEncoder(size int) *huffmanEncoder {
- // Make capacity to next power of two.
-@@ -64,10 +74,10 @@ func newHuffmanEncoder(size int) *huffmanEncoder {
-
- // Generates a HuffmanCode corresponding to the fixed literal table
- func generateFixedLiteralEncoding() *huffmanEncoder {
-- h := newHuffmanEncoder(maxNumLit)
-+ h := newHuffmanEncoder(literalCount)
- codes := h.codes
- var ch uint16
-- for ch = 0; ch < maxNumLit; ch++ {
-+ for ch = 0; ch < literalCount; ch++ {
- var bits uint16
- var size uint16
- switch {
-@@ -75,17 +85,14 @@ func generateFixedLiteralEncoding() *huffmanEncoder {
- // size 8, 000110000 .. 10111111
- bits = ch + 48
- size = 8
-- break
- case ch < 256:
- // size 9, 110010000 .. 111111111
- bits = ch + 400 - 144
- size = 9
-- break
- case ch < 280:
- // size 7, 0000000 .. 0010111
- bits = ch - 256
- size = 7
-- break
- default:
- // size 8, 11000000 .. 11000111
- bits = ch + 192 - 280
-@@ -108,7 +115,7 @@ func generateFixedOffsetEncoding() *huffmanEncoder {
- var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding()
- var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding()
-
--func (h *huffmanEncoder) bitLength(freq []int32) int {
-+func (h *huffmanEncoder) bitLength(freq []uint16) int {
- var total int
- for i, f := range freq {
- if f != 0 {
-@@ -118,8 +125,6 @@ func (h *huffmanEncoder) bitLength(freq []int32) int {
- return total
- }
-
--const maxBitsLimit = 16
--
- // Return the number of literals assigned to each bit size in the Huffman encoding
- //
- // This method is only called when list.length >= 3
-@@ -163,9 +168,9 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
- // We initialize the levels as if we had already figured this out.
- levels[level] = levelInfo{
- level: level,
-- lastFreq: list[1].freq,
-- nextCharFreq: list[2].freq,
-- nextPairFreq: list[0].freq + list[1].freq,
-+ lastFreq: int32(list[1].freq),
-+ nextCharFreq: int32(list[2].freq),
-+ nextPairFreq: int32(list[0].freq) + int32(list[1].freq),
- }
- leafCounts[level][level] = 2
- if level == 1 {
-@@ -197,7 +202,12 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
- l.lastFreq = l.nextCharFreq
- // Lower leafCounts are the same of the previous node.
- leafCounts[level][level] = n
-- l.nextCharFreq = list[n].freq
-+ e := list[n]
-+ if e.literal < math.MaxUint16 {
-+ l.nextCharFreq = int32(e.freq)
-+ } else {
-+ l.nextCharFreq = math.MaxInt32
-+ }
- } else {
- // The next item on this row is a pair from the previous row.
- // nextPairFreq isn't valid until we generate two
-@@ -273,12 +283,12 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
- //
- // freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
- // maxBits The maximum number of bits to use for any literal.
--func (h *huffmanEncoder) generate(freq []int32, maxBits int32) {
-+func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
- if h.freqcache == nil {
- // Allocate a reusable buffer with the longest possible frequency table.
-- // Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit.
-- // The largest of these is maxNumLit, so we allocate for that case.
-- h.freqcache = make([]literalNode, maxNumLit+1)
-+ // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
-+ // The largest of these is literalCount, so we allocate for that case.
-+ h.freqcache = make([]literalNode, literalCount+1)
- }
- list := h.freqcache[:len(freq)+1]
- // Number of non-zero literals
-@@ -345,3 +355,27 @@ func (s byFreq) Less(i, j int) bool {
- }
-
- func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-+
-+// histogramSize accumulates a histogram of b in h.
-+// An estimated size in bits is returned.
-+// Unassigned values are assigned '1' in the histogram.
-+// len(h) must be >= 256, and h's elements must be all zeroes.
-+func histogramSize(b []byte, h []uint16, fill bool) int {
-+ h = h[:256]
-+ for _, t := range b {
-+ h[t]++
-+ }
-+ invTotal := 1.0 / float64(len(b))
-+ shannon := 0.0
-+ single := math.Ceil(-math.Log2(invTotal))
-+ for i, v := range h[:] {
-+ if v > 0 {
-+ n := float64(v)
-+ shannon += math.Ceil(-math.Log2(n*invTotal) * n)
-+ } else if fill {
-+ shannon += single
-+ h[i] = 1
-+ }
-+ }
-+ return int(shannon + 0.99)
-+}
-diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
-index 800d0ce9e5452..6dc5b5d06e303 100644
---- a/vendor/github.com/klauspost/compress/flate/inflate.go
-+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
-@@ -9,6 +9,7 @@ package flate
-
- import (
- ""bufio""
-+ ""fmt""
- ""io""
- ""math/bits""
- ""strconv""
-@@ -24,6 +25,8 @@ const (
- maxNumLit = 286
- maxNumDist = 30
- numCodes = 19 // number of codes in Huffman meta-code
-+
-+ debugDecode = false
- )
-
- // Initialize the fixedHuffmanDecoder only once upon first use.
-@@ -104,8 +107,8 @@ const (
-
- type huffmanDecoder struct {
- min int // the minimum code length
-- chunks *[huffmanNumChunks]uint32 // chunks as described above
-- links [][]uint32 // overflow links
-+ chunks *[huffmanNumChunks]uint16 // chunks as described above
-+ links [][]uint16 // overflow links
- linkMask uint32 // mask the width of the link table
- }
-
-@@ -121,7 +124,7 @@ func (h *huffmanDecoder) init(lengths []int) bool {
- const sanity = false
-
- if h.chunks == nil {
-- h.chunks = &[huffmanNumChunks]uint32{}
-+ h.chunks = &[huffmanNumChunks]uint16{}
- }
- if h.min != 0 {
- *h = huffmanDecoder{chunks: h.chunks, links: h.links}
-@@ -169,6 +172,9 @@ func (h *huffmanDecoder) init(lengths []int) bool {
- // accept degenerate single-code codings. See also
- // TestDegenerateHuffmanCoding.
- if code != 1<> 1
- if cap(h.links) < huffmanNumChunks-link {
-- h.links = make([][]uint32, huffmanNumChunks-link)
-+ h.links = make([][]uint16, huffmanNumChunks-link)
- } else {
- h.links = h.links[:huffmanNumChunks-link]
- }
-@@ -196,9 +202,9 @@ func (h *huffmanDecoder) init(lengths []int) bool {
- if sanity && h.chunks[reverse] != 0 {
- panic(""impossible: overwriting existing chunk"")
- }
-- h.chunks[reverse] = uint32(off<>= uint(16 - n)
- if n <= huffmanChunkBits {
-@@ -347,6 +353,9 @@ func (f *decompressor) nextBlock() {
- f.huffmanBlock()
- default:
- // 3 is reserved.
-+ if debugDecode {
-+ fmt.Println(""reserved data block encountered"")
-+ }
- f.err = CorruptInputError(f.roffset)
- }
- }
-@@ -425,11 +434,17 @@ func (f *decompressor) readHuffman() error {
- }
- nlit := int(f.b&0x1F) + 257
- if nlit > maxNumLit {
-+ if debugDecode {
-+ fmt.Println(""nlit > maxNumLit"", nlit)
-+ }
- return CorruptInputError(f.roffset)
- }
- f.b >>= 5
- ndist := int(f.b&0x1F) + 1
- if ndist > maxNumDist {
-+ if debugDecode {
-+ fmt.Println(""ndist > maxNumDist"", ndist)
-+ }
- return CorruptInputError(f.roffset)
- }
- f.b >>= 5
-@@ -453,6 +468,9 @@ func (f *decompressor) readHuffman() error {
- f.codebits[codeOrder[i]] = 0
- }
- if !f.h1.init(f.codebits[0:]) {
-+ if debugDecode {
-+ fmt.Println(""init codebits failed"")
-+ }
- return CorruptInputError(f.roffset)
- }
-
-@@ -480,6 +498,9 @@ func (f *decompressor) readHuffman() error {
- rep = 3
- nb = 2
- if i == 0 {
-+ if debugDecode {
-+ fmt.Println(""i==0"")
-+ }
- return CorruptInputError(f.roffset)
- }
- b = f.bits[i-1]
-@@ -494,6 +515,9 @@ func (f *decompressor) readHuffman() error {
- }
- for f.nb < nb {
- if err := f.moreBits(); err != nil {
-+ if debugDecode {
-+ fmt.Println(""morebits:"", err)
-+ }
- return err
- }
- }
-@@ -501,6 +525,9 @@ func (f *decompressor) readHuffman() error {
- f.b >>= nb
- f.nb -= nb
- if i+rep > n {
-+ if debugDecode {
-+ fmt.Println(""i+rep > n"", i, rep, n)
-+ }
- return CorruptInputError(f.roffset)
- }
- for j := 0; j < rep; j++ {
-@@ -510,6 +537,9 @@ func (f *decompressor) readHuffman() error {
- }
-
- if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
-+ if debugDecode {
-+ fmt.Println(""init2 failed"")
-+ }
- return CorruptInputError(f.roffset)
- }
-
-@@ -587,12 +617,18 @@ readLiteral:
- length = 258
- n = 0
- default:
-+ if debugDecode {
-+ fmt.Println(v, "">= maxNumLit"")
-+ }
- f.err = CorruptInputError(f.roffset)
- return
- }
- if n > 0 {
- for f.nb < n {
- if err = f.moreBits(); err != nil {
-+ if debugDecode {
-+ fmt.Println(""morebits n>0:"", err)
-+ }
- f.err = err
- return
- }
-@@ -606,6 +642,9 @@ readLiteral:
- if f.hd == nil {
- for f.nb < 5 {
- if err = f.moreBits(); err != nil {
-+ if debugDecode {
-+ fmt.Println(""morebits f.nb<5:"", err)
-+ }
- f.err = err
- return
- }
-@@ -615,6 +654,9 @@ readLiteral:
- f.nb -= 5
- } else {
- if dist, err = f.huffSym(f.hd); err != nil {
-+ if debugDecode {
-+ fmt.Println(""huffsym:"", err)
-+ }
- f.err = err
- return
- }
-@@ -629,6 +671,9 @@ readLiteral:
- extra := (dist & 1) << nb
- for f.nb < nb {
- if err = f.moreBits(); err != nil {
-+ if debugDecode {
-+ fmt.Println(""morebits f.nb f.dict.histSize() {
-+ if debugDecode {
-+ fmt.Println(""dist > f.dict.histSize():"", dist, f.dict.histSize())
-+ }
- f.err = CorruptInputError(f.roffset)
- return
- }
-@@ -688,6 +739,9 @@ func (f *decompressor) dataBlock() {
- n := int(f.buf[0]) | int(f.buf[1])<<8
- nn := int(f.buf[2]) | int(f.buf[3])<<8
- if uint16(nn) != uint16(^n) {
-+ if debugDecode {
-+ fmt.Println(""uint16(nn) != uint16(^n)"", nn, ^n)
-+ }
- f.err = CorruptInputError(f.roffset)
- return
- }
-@@ -789,6 +843,9 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
- if n == 0 {
- f.b = b
- f.nb = nb
-+ if debugDecode {
-+ fmt.Println(""huffsym: n==0"")
-+ }
- f.err = CorruptInputError(f.roffset)
- return 0, f.err
- }
-diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go
-new file mode 100644
-index 0000000000000..20de8f11f4f37
---- /dev/null
-+++ b/vendor/github.com/klauspost/compress/flate/level1.go
-@@ -0,0 +1,174 @@
-+package flate
-+
-+// fastGen maintains the table for matches,
-+// and the previous byte block for level 2.
-+// This is the generic implementation.
-+type fastEncL1 struct {
-+ fastGen
-+ table [tableSize]tableEntry
-+}
-+
-+// EncodeL1 uses a similar algorithm to level 1
-+func (e *fastEncL1) Encode(dst *tokens, src []byte) {
-+ const (
-+ inputMargin = 12 - 1
-+ minNonLiteralBlockSize = 1 + 1 + inputMargin
-+ )
-+
-+ // Protect against e.cur wraparound.
-+ for e.cur >= bufferReset {
-+ if len(e.hist) == 0 {
-+ for i := range e.table[:] {
-+ e.table[i] = tableEntry{}
-+ }
-+ e.cur = maxMatchOffset
-+ break
-+ }
-+ // Shift down everything in the table that isn't already too far away.
-+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
-+ for i := range e.table[:] {
-+ v := e.table[i].offset
-+ if v <= minOff {
-+ v = 0
-+ } else {
-+ v = v - e.cur + maxMatchOffset
-+ }
-+ e.table[i].offset = v
-+ }
-+ e.cur = maxMatchOffset
-+ }
-+
-+ s := e.addBlock(src)
-+
-+ // This check isn't in the Snappy implementation, but there, the caller
-+ // instead of the callee handles this case.
-+ if len(src) < minNonLiteralBlockSize {
-+ // We do not fill the token table.
-+ // This will be picked up by caller.
-+ dst.n = uint16(len(src))
-+ return
-+ }
-+
-+ // Override src
-+ src = e.hist
-+ nextEmit := s
-+
-+ // sLimit is when to stop looking for offset/length copies. The inputMargin
-+ // lets us use a fast path for emitLiteral in the main loop, while we are
-+ // looking for copies.
-+ sLimit := int32(len(src) - inputMargin)
-+
-+ // nextEmit is where in src the next emitLiteral should start from.
-+ cv := load3232(src, s)
-+
-+ for {
-+ const skipLog = 5
-+ const doEvery = 2
-+
-+ nextS := s
-+ var candidate tableEntry
-+ for {
-+ nextHash := hash(cv)
-+ candidate = e.table[nextHash]
-+ nextS = s + doEvery + (s-nextEmit)>>skipLog
-+ if nextS > sLimit {
-+ goto emitRemainder
-+ }
-+
-+ now := load6432(src, nextS)
-+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv}
-+ nextHash = hash(uint32(now))
-+
-+ offset := s - (candidate.offset - e.cur)
-+ if offset < maxMatchOffset && cv == candidate.val {
-+ e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)}
-+ break
-+ }
-+
-+ // Do one right away...
-+ cv = uint32(now)
-+ s = nextS
-+ nextS++
-+ candidate = e.table[nextHash]
-+ now >>= 8
-+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv}
-+
-+ offset = s - (candidate.offset - e.cur)
-+ if offset < maxMatchOffset && cv == candidate.val {
-+ e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)}
-+ break
-+ }
-+ cv = uint32(now)
-+ s = nextS
-+ }
-+
-+ // A 4-byte match has been found. We'll later see if more than 4 bytes
-+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
-+ // them as literal bytes.
-+ for {
-+ // Invariant: we have a 4-byte match at s, and no need to emit any
-+ // literal bytes prior to s.
-+
-+ // Extend the 4-byte match as long as possible.
-+ t := candidate.offset - e.cur
-+ l := e.matchlenLong(s+4, t+4, src) + 4
-+
-+ // Extend backwards
-+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
-+ s--
-+ t--
-+ l++
-+ }
-+ if nextEmit < s {
-+ emitLiteral(dst, src[nextEmit:s])
-+ }
-+
-+ // Save the match found
-+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
-+ s += l
-+ nextEmit = s
-+ if nextS >= s {
-+ s = nextS + 1
-+ }
-+ if s >= sLimit {
-+ // Index first pair after match end.
-+ if int(s+l+4) < len(src) {
-+ cv := load3232(src, s)
-+ e.table[hash(cv)] = tableEntry{offset: s + e.cur, val: cv}
-+ }
-+ goto emitRemainder
-+ }
-+
-+ // We could immediately start working at s now, but to improve
-+ // compression we first update the hash table at s-2 and at s. If
-+ // another emitCopy is not our next move, also calculate nextHash
-+ // at s+1. At least on GOARCH=amd64, these three hash calculations
-+ // are faster as one load64 call (with some shifts) instead of
-+ // three load32 calls.
-+ x := load6432(src, s-2)
-+ o := e.cur + s - 2
-+ prevHash := hash(uint32(x))
-+ e.table[prevHash] = tableEntry{offset: o, val: uint32(x)}
-+ x >>= 16
-+ currHash := hash(uint32(x))
-+ candidate = e.table[currHash]
-+ e.table[currHash] = tableEntry{offset: o + 2, val: uint32(x)}
-+
-+ offset := s - (candidate.offset - e.cur)
-+ if offset > maxMatchOffset || uint32(x) != candidate.val {
-+ cv = uint32(x >> 8)
-+ s++
-+ break
-+ }
-+ }
-+ }
-+
-+emitRemainder:
-+ if int(nextEmit) < len(src) {
-+ // If nothing was added, don't encode literals.
-+ if dst.n == 0 {
-+ return
-+ }
-+ emitLiteral(dst, src[nextEmit:])
-+ }
-+}
-diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go
-new file mode 100644
-index 0000000000000..7c824431e6477
---- /dev/null
-+++ b/vendor/github.com/klauspost/compress/flate/level2.go
-@@ -0,0 +1,199 @@
-+package flate
-+
-+// fastGen maintains the table for matches,
-+// and the previous byte block for level 2.
-+// This is the generic implementation.
-+type fastEncL2 struct {
-+ fastGen
-+ table [bTableSize]tableEntry
-+}
-+
-+// EncodeL2 uses a similar algorithm to level 1, but is capable
-+// of matching across blocks giving better compression at a small slowdown.
-+func (e *fastEncL2) Encode(dst *tokens, src []byte) {
-+ const (
-+ inputMargin = 12 - 1
-+ minNonLiteralBlockSize = 1 + 1 + inputMargin
-+ )
-+
-+ // Protect against e.cur wraparound.
-+ for e.cur >= bufferReset {
-+ if len(e.hist) == 0 {
-+ for i := range e.table[:] {
-+ e.table[i] = tableEntry{}
-+ }
-+ e.cur = maxMatchOffset
-+ break
-+ }
-+ // Shift down everything in the table that isn't already too far away.
-+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
-+ for i := range e.table[:] {
-+ v := e.table[i].offset
-+ if v <= minOff {
-+ v = 0
-+ } else {
-+ v = v - e.cur + maxMatchOffset
-+ }
-+ e.table[i].offset = v
-+ }
-+ e.cur = maxMatchOffset
-+ }
-+
-+ s := e.addBlock(src)
-+
-+ // This check isn't in the Snappy implementation, but there, the caller
-+ // instead of the callee handles this case.
-+ if len(src) < minNonLiteralBlockSize {
-+ // We do not fill the token table.
-+ // This will be picked up by caller.
-+ dst.n = uint16(len(src))
-+ return
-+ }
-+
-+ // Override src
-+ src = e.hist
-+ nextEmit := s
-+
-+ // sLimit is when to stop looking for offset/length copies. The inputMargin
-+ // lets us use a fast path for emitLiteral in the main loop, while we are
-+ // looking for copies.
-+ sLimit := int32(len(src) - inputMargin)
-+
-+ // nextEmit is where in src the next emitLiteral should start from.
-+ cv := load3232(src, s)
-+ for {
-+ // When should we start skipping if we haven't found matches in a long while.
-+ const skipLog = 5
-+ const doEvery = 2
-+
-+ nextS := s
-+ var candidate tableEntry
-+ for {
-+ nextHash := hash4u(cv, bTableBits)
-+ s = nextS
-+ nextS = s + doEvery + (s-nextEmit)>>skipLog
-+ if nextS > sLimit {
-+ goto emitRemainder
-+ }
-+ candidate = e.table[nextHash]
-+ now := load6432(src, nextS)
-+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv}
-+ nextHash = hash4u(uint32(now), bTableBits)
-+
-+ offset := s - (candidate.offset - e.cur)
-+ if offset < maxMatchOffset && cv == candidate.val {
-+ e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)}
-+ break
-+ }
-+
-+ // Do one right away...
-+ cv = uint32(now)
-+ s = nextS
-+ nextS++
-+ candidate = e.table[nextHash]
-+ now >>= 8
-+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv}
-+
-+ offset = s - (candidate.offset - e.cur)
-+ if offset < maxMatchOffset && cv == candidate.val {
-+ break
-+ }
-+ cv = uint32(now)
-+ }
-+
-+ // A 4-byte match has been found. We'll later see if more than 4 bytes
-+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
-+ // them as literal bytes.
-+
-+ // Call emitCopy, and then see if another emitCopy could be our next
-+ // move. Repeat until we find no match for the input immediately after
-+ // what was consumed by the last emitCopy call.
-+ //
-+ // If we exit this loop normally then we need to call emitLiteral next,
-+ // though we don't yet know how big the literal will be. We handle that
-+ // by proceeding to the next iteration of the main loop. We also can
-+ // exit this loop via goto if we get close to exhausting the input.
-+ for {
-+ // Invariant: we have a 4-byte match at s, and no need to emit any
-+ // literal bytes prior to s.
-+
-+ // Extend the 4-byte match as long as possible.
-+ t := candidate.offset - e.cur
-+ l := e.matchlenLong(s+4, t+4, src) + 4
-+
-+ // Extend backwards
-+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
-+ s--
-+ t--
-+ l++
-+ }
-+ if nextEmit < s {
-+ emitLiteral(dst, src[nextEmit:s])
-+ }
-+
-+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
-+ s += l
-+ nextEmit = s
-+ if nextS >= s {
-+ s = nextS + 1
-+ }
-+
-+ if s >= sLimit {
-+ // Index first pair after match end.
-+ if int(s+l+4) < len(src) {
-+ cv := load3232(src, s)
-+ e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur, val: cv}
-+ }
-+ goto emitRemainder
-+ }
-+
-+ // Store every second hash in-between, but offset by 1.
-+ for i := s - l + 2; i < s-5; i += 7 {
-+ x := load6432(src, int32(i))
-+ nextHash := hash4u(uint32(x), bTableBits)
-+ e.table[nextHash] = tableEntry{offset: e.cur + i, val: uint32(x)}
-+ // Skip one
-+ x >>= 16
-+ nextHash = hash4u(uint32(x), bTableBits)
-+ e.table[nextHash] = tableEntry{offset: e.cur + i + 2, val: uint32(x)}
-+ // Skip one
-+ x >>= 16
-+ nextHash = hash4u(uint32(x), bTableBits)
-+ e.table[nextHash] = tableEntry{offset: e.cur + i + 4, val: uint32(x)}
-+ }
-+
-+ // We could immediately start working at s now, but to improve
-+ // compression we first update the hash table at s-2 to s. If
-+ // another emitCopy is not our next move, also calculate nextHash
-+ // at s+1. At least on GOARCH=amd64, these three hash calculations
-+ // are faster as one load64 call (with some shifts) instead of
-+ // three load32 calls.
-+ x := load6432(src, s-2)
-+ o := e.cur + s - 2
-+ prevHash := hash4u(uint32(x), bTableBits)
-+ prevHash2 := hash4u(uint32(x>>8), bTableBits)
-+ e.table[prevHash] = tableEntry{offset: o, val: uint32(x)}
-+ e.table[prevHash2] = tableEntry{offset: o + 1, val: uint32(x >> 8)}
-+ currHash := hash4u(uint32(x>>16), bTableBits)
-+ candidate = e.table[currHash]
-+ e.table[currHash] = tableEntry{offset: o + 2, val: uint32(x >> 16)}
-+
-+ offset := s - (candidate.offset - e.cur)
-+ if offset > maxMatchOffset || uint32(x>>16) != candidate.val {
-+ cv = uint32(x >> 24)
-+ s++
-+ break
-+ }
-+ }
-+ }
-+
-+emitRemainder:
-+ if int(nextEmit) < len(src) {
-+ // If nothing was added, don't encode literals.
-+ if dst.n == 0 {
-+ return
-+ }
-+
-+ emitLiteral(dst, src[nextEmit:])
-+ }
-+}
-diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go
-new file mode 100644
-index 0000000000000..4153d24c95fa4
---- /dev/null
-+++ b/vendor/github.com/klauspost/compress/flate/level3.go
-@@ -0,0 +1,225 @@
-+package flate
-+
-+// fastEncL3
-+type fastEncL3 struct {
-+ fastGen
-+ table [tableSize]tableEntryPrev
-+}
-+
-+// Encode uses a similar algorithm to level 2, will check up to two candidates.
-+func (e *fastEncL3) Encode(dst *tokens, src []byte) {
-+ const (
-+ inputMargin = 8 - 1
-+ minNonLiteralBlockSize = 1 + 1 + inputMargin
-+ )
-+
-+ // Protect against e.cur wraparound.
-+ for e.cur >= bufferReset {
-+ if len(e.hist) == 0 {
-+ for i := range e.table[:] {
-+ e.table[i] = tableEntryPrev{}
-+ }
-+ e.cur = maxMatchOffset
-+ break
-+ }
-+ // Shift down everything in the table that isn't already too far away.
-+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
-+ for i := range e.table[:] {
-+ v := e.table[i]
-+ if v.Cur.offset <= minOff {
-+ v.Cur.offset = 0
-+ } else {
-+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
-+ }
-+ if v.Prev.offset <= minOff {
-+ v.Prev.offset = 0
-+ } else {
-+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
-+ }
-+ e.table[i] = v
-+ }
-+ e.cur = maxMatchOffset
-+ }
-+
-+ s := e.addBlock(src)
-+
-+ // Skip if too small.
-+ if len(src) < minNonLiteralBlockSize {
-+ // We do not fill the token table.
-+ // This will be picked up by caller.
-+ dst.n = uint16(len(src))
-+ return
-+ }
-+
-+ // Override src
-+ src = e.hist
-+ nextEmit := s
-+
-+ // sLimit is when to stop looking for offset/length copies. The inputMargin
-+ // lets us use a fast path for emitLiteral in the main loop, while we are
-+ // looking for copies.
-+ sLimit := int32(len(src) - inputMargin)
-+
-+ // nextEmit is where in src the next emitLiteral should start from.
-+ cv := load3232(src, s)
-+ for {
-+ const skipLog = 6
-+ nextS := s
-+ var candidate tableEntry
-+ for {
-+ nextHash := hash(cv)
-+ s = nextS
-+ nextS = s + 1 + (s-nextEmit)>>skipLog
-+ if nextS > sLimit {
-+ goto emitRemainder
-+ }
-+ candidates := e.table[nextHash]
-+ now := load3232(src, nextS)
-+ e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}
-+
-+ // Check both candidates
-+ candidate = candidates.Cur
-+ offset := s - (candidate.offset - e.cur)
-+ if cv == candidate.val {
-+ if offset > maxMatchOffset {
-+ cv = now
-+ // Previous will also be invalid, we have nothing.
-+ continue
-+ }
-+ o2 := s - (candidates.Prev.offset - e.cur)
-+ if cv != candidates.Prev.val || o2 > maxMatchOffset {
-+ break
-+ }
-+ // Both match and are valid, pick longest.
-+ l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:])
-+ if l2 > l1 {
-+ candidate = candidates.Prev
-+ }
-+ break
-+ } else {
-+ // We only check if value mismatches.
-+ // Offset will always be invalid in other cases.
-+ candidate = candidates.Prev
-+ if cv == candidate.val {
-+ offset := s - (candidate.offset - e.cur)
-+ if offset <= maxMatchOffset {
-+ break
-+ }
-+ }
-+ }
-+ cv = now
-+ }
-+
-+ // Call emitCopy, and then see if another emitCopy could be our next
-+ // move. Repeat until we find no match for the input immediately after
-+ // what was consumed by the last emitCopy call.
-+ //
-+ // If we exit this loop normally then we need to call emitLiteral next,
-+ // though we don't yet know how big the literal will be. We handle that
-+ // by proceeding to the next iteration of the main loop. We also can
-+ // exit this loop via goto if we get close to exhausting the input.
-+ for {
-+ // Invariant: we have a 4-byte match at s, and no need to emit any
-+ // literal bytes prior to s.
-+
-+ // Extend the 4-byte match as long as possible.
-+ //
-+ t := candidate.offset - e.cur
-+ l := e.matchlenLong(s+4, t+4, src) + 4
-+
-+ // Extend backwards
-+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
-+ s--
-+ t--
-+ l++
-+ }
-+ if nextEmit < s {
-+ emitLiteral(dst, src[nextEmit:s])
-+ }
-+
-+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
-+ s += l
-+ nextEmit = s
-+ if nextS >= s {
-+ s = nextS + 1
-+ }
-+
-+ if s >= sLimit {
-+ t += l
-+ // Index first pair after match end.
-+ if int(t+4) < len(src) && t > 0 {
-+ cv := load3232(src, t)
-+ nextHash := hash(cv)
-+ e.table[nextHash] = tableEntryPrev{
-+ Prev: e.table[nextHash].Cur,
-+ Cur: tableEntry{offset: e.cur + t, val: cv},
-+ }
-+ }
-+ goto emitRemainder
-+ }
-+
-+ // We could immediately start working at s now, but to improve
-+ // compression we first update the hash table at s-3 to s.
-+ x := load6432(src, s-3)
-+ prevHash := hash(uint32(x))
-+ e.table[prevHash] = tableEntryPrev{
-+ Prev: e.table[prevHash].Cur,
-+ Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},
-+ }
-+ x >>= 8
-+ prevHash = hash(uint32(x))
-+
-+ e.table[prevHash] = tableEntryPrev{
-+ Prev: e.table[prevHash].Cur,
-+ Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},
-+ }
-+ x >>= 8
-+ prevHash = hash(uint32(x))
-+
-+ e.table[prevHash] = tableEntryPrev{
-+ Prev: e.table[prevHash].Cur,
-+ Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},
-+ }
-+ x >>= 8
-+ currHash := hash(uint32(x))
-+ candidates := e.table[currHash]
-+ cv = uint32(x)
-+ e.table[currHash] = tableEntryPrev{
-+ Prev: candidates.Cur,
-+ Cur: tableEntry{offset: s + e.cur, val: cv},
-+ }
-+
-+ // Check both candidates
-+ candidate = candidates.Cur
-+ if cv == candidate.val {
-+ offset := s - (candidate.offset - e.cur)
-+ if offset <= maxMatchOffset {
-+ continue
-+ }
-+ } else {
-+ // We only check if value mismatches.
-+ // Offset will always be invalid in other cases.
-+ candidate = candidates.Prev
-+ if cv == candidate.val {
-+ offset := s - (candidate.offset - e.cur)
-+ if offset <= maxMatchOffset {
-+ continue
-+ }
-+ }
-+ }
-+ cv = uint32(x >> 8)
-+ s++
-+ break
-+ }
-+ }
-+
-+emitRemainder:
-+ if int(nextEmit) < len(src) {
-+ // If nothing was added, don't encode literals.
-+ if dst.n == 0 {
-+ return
-+ }
-+
-+ emitLiteral(dst, src[nextEmit:])
-+ }
-+}
-diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go
-new file mode 100644
-index 0000000000000..c689ac771b823
---- /dev/null
-+++ b/vendor/github.com/klauspost/compress/flate/level4.go
-@@ -0,0 +1,210 @@
-+package flate
-+
-+import ""fmt""
-+
-+type fastEncL4 struct {
-+ fastGen
-+ table [tableSize]tableEntry
-+ bTable [tableSize]tableEntry
-+}
-+
-+func (e *fastEncL4) Encode(dst *tokens, src []byte) {
-+ const (
-+ inputMargin = 12 - 1
-+ minNonLiteralBlockSize = 1 + 1 + inputMargin
-+ )
-+
-+ // Protect against e.cur wraparound.
-+ for e.cur >= bufferReset {
-+ if len(e.hist) == 0 {
-+ for i := range e.table[:] {
-+ e.table[i] = tableEntry{}
-+ }
-+ for i := range e.bTable[:] {
-+ e.bTable[i] = tableEntry{}
-+ }
-+ e.cur = maxMatchOffset
-+ break
-+ }
-+ // Shift down everything in the table that isn't already too far away.
-+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
-+ for i := range e.table[:] {
-+ v := e.table[i].offset
-+ if v <= minOff {
-+ v = 0
-+ } else {
-+ v = v - e.cur + maxMatchOffset
-+ }
-+ e.table[i].offset = v
-+ }
-+ for i := range e.bTable[:] {
-+ v := e.bTable[i].offset
-+ if v <= minOff {
-+ v = 0
-+ } else {
-+ v = v - e.cur + maxMatchOffset
-+ }
-+ e.bTable[i].offset = v
-+ }
-+ e.cur = maxMatchOffset
-+ }
-+
-+ s := e.addBlock(src)
-+
-+ // This check isn't in the Snappy implementation, but there, the caller
-+ // instead of the callee handles this case.
-+ if len(src) < minNonLiteralBlockSize {
-+ // We do not fill the token table.
-+ // This will be picked up by caller.
-+ dst.n = uint16(len(src))
-+ return
-+ }
-+
-+ // Override src
-+ src = e.hist
-+ nextEmit := s
-+
-+ // sLimit is when to stop looking for offset/length copies. The inputMargin
-+ // lets us use a fast path for emitLiteral in the main loop, while we are
-+ // looking for copies.
-+ sLimit := int32(len(src) - inputMargin)
-+
-+ // nextEmit is where in src the next emitLiteral should start from.
-+ cv := load6432(src, s)
-+ for {
-+ const skipLog = 6
-+ const doEvery = 1
-+
-+ nextS := s
-+ var t int32
-+ for {
-+ nextHashS := hash4x64(cv, tableBits)
-+ nextHashL := hash7(cv, tableBits)
-+
-+ s = nextS
-+ nextS = s + doEvery + (s-nextEmit)>>skipLog
-+ if nextS > sLimit {
-+ goto emitRemainder
-+ }
-+ // Fetch a short+long candidate
-+ sCandidate := e.table[nextHashS]
-+ lCandidate := e.bTable[nextHashL]
-+ next := load6432(src, nextS)
-+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
-+ e.table[nextHashS] = entry
-+ e.bTable[nextHashL] = entry
-+
-+ t = lCandidate.offset - e.cur
-+ if s-t < maxMatchOffset && uint32(cv) == lCandidate.val {
-+ // We got a long match. Use that.
-+ break
-+ }
-+
-+ t = sCandidate.offset - e.cur
-+ if s-t < maxMatchOffset && uint32(cv) == sCandidate.val {
-+ // Found a 4 match...
-+ lCandidate = e.bTable[hash7(next, tableBits)]
-+
-+ // If the next long is a candidate, check if we should use that instead...
-+ lOff := nextS - (lCandidate.offset - e.cur)
-+ if lOff < maxMatchOffset && lCandidate.val == uint32(next) {
-+ l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
-+ if l2 > l1 {
-+ s = nextS
-+ t = lCandidate.offset - e.cur
-+ }
-+ }
-+ break
-+ }
-+ cv = next
-+ }
-+
-+ // A 4-byte match has been found. We'll later see if more than 4 bytes
-+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
-+ // them as literal bytes.
-+
-+ // Extend the 4-byte match as long as possible.
-+ l := e.matchlenLong(s+4, t+4, src) + 4
-+
-+ // Extend backwards
-+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
-+ s--
-+ t--
-+ l++
-+ }
-+ if nextEmit < s {
-+ emitLiteral(dst, src[nextEmit:s])
-+ }
-+ if false {
-+ if t >= s {
-+ panic(""s-t"")
-+ }
-+ if (s - t) > maxMatchOffset {
-+ panic(fmt.Sprintln(""mmo"", t))
-+ }
-+ if l < baseMatchLength {
-+ panic(""bml"")
-+ }
-+ }
-+
-+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
-+ s += l
-+ nextEmit = s
-+ if nextS >= s {
-+ s = nextS + 1
-+ }
-+
-+ if s >= sLimit {
-+ // Index first pair after match end.
-+ if int(s+8) < len(src) {
-+ cv := load6432(src, s)
-+ e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur, val: uint32(cv)}
-+ e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur, val: uint32(cv)}
-+ }
-+ goto emitRemainder
-+ }
-+
-+ // Store every 3rd hash in-between
-+ if true {
-+ i := nextS
-+ if i < s-1 {
-+ cv := load6432(src, i)
-+ t := tableEntry{offset: i + e.cur, val: uint32(cv)}
-+ t2 := tableEntry{val: uint32(cv >> 8), offset: t.offset + 1}
-+ e.bTable[hash7(cv, tableBits)] = t
-+ e.bTable[hash7(cv>>8, tableBits)] = t2
-+ e.table[hash4u(t2.val, tableBits)] = t2
-+
-+ i += 3
-+ for ; i < s-1; i += 3 {
-+ cv := load6432(src, i)
-+ t := tableEntry{offset: i + e.cur, val: uint32(cv)}
-+ t2 := tableEntry{val: uint32(cv >> 8), offset: t.offset + 1}
-+ e.bTable[hash7(cv, tableBits)] = t
-+ e.bTable[hash7(cv>>8, tableBits)] = t2
-+ e.table[hash4u(t2.val, tableBits)] = t2
-+ }
-+ }
-+ }
-+
-+ // We could immediately start working at s now, but to improve
-+ // compression we first update the hash table at s-1 and at s.
-+ x := load6432(src, s-1)
-+ o := e.cur + s - 1
-+ prevHashS := hash4x64(x, tableBits)
-+ prevHashL := hash7(x, tableBits)
-+ e.table[prevHashS] = tableEntry{offset: o, val: uint32(x)}
-+ e.bTable[prevHashL] = tableEntry{offset: o, val: uint32(x)}
-+ cv = x >> 8
-+ }
-+
-+emitRemainder:
-+ if int(nextEmit) < len(src) {
-+ // If nothing was added, don't encode literals.
-+ if dst.n == 0 {
-+ return
-+ }
-+
-+ emitLiteral(dst, src[nextEmit:])
-+ }
-+}
-diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go
-new file mode 100644
-index 0000000000000..14a2356126aad
---- /dev/null
-+++ b/vendor/github.com/klauspost/compress/flate/level5.go
-@@ -0,0 +1,276 @@
-+package flate
-+
-+import ""fmt""
-+
-+type fastEncL5 struct {
-+ fastGen
-+ table [tableSize]tableEntry
-+ bTable [tableSize]tableEntryPrev
-+}
-+
-+func (e *fastEncL5) Encode(dst *tokens, src []byte) {
-+ const (
-+ inputMargin = 12 - 1
-+ minNonLiteralBlockSize = 1 + 1 + inputMargin
-+ )
-+
-+ // Protect against e.cur wraparound.
-+ for e.cur >= bufferReset {
-+ if len(e.hist) == 0 {
-+ for i := range e.table[:] {
-+ e.table[i] = tableEntry{}
-+ }
-+ for i := range e.bTable[:] {
-+ e.bTable[i] = tableEntryPrev{}
-+ }
-+ e.cur = maxMatchOffset
-+ break
-+ }
-+ // Shift down everything in the table that isn't already too far away.
-+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
-+ for i := range e.table[:] {
-+ v := e.table[i].offset
-+ if v <= minOff {
-+ v = 0
-+ } else {
-+ v = v - e.cur + maxMatchOffset
-+ }
-+ e.table[i].offset = v
-+ }
-+ for i := range e.bTable[:] {
-+ v := e.bTable[i]
-+ if v.Cur.offset <= minOff {
-+ v.Cur.offset = 0
-+ v.Prev.offset = 0
-+ } else {
-+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
-+ if v.Prev.offset <= minOff {
-+ v.Prev.offset = 0
-+ } else {
-+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
-+ }
-+ }
-+ e.bTable[i] = v
-+ }
-+ e.cur = maxMatchOffset
-+ }
-+
-+ s := e.addBlock(src)
-+
-+ // This check isn't in the Snappy implementation, but there, the caller
-+ // instead of the callee handles this case.
-+ if len(src) < minNonLiteralBlockSize {
-+ // We do not fill the token table.
-+ // This will be picked up by caller.
-+ dst.n = uint16(len(src))
-+ return
-+ }
-+
-+ // Override src
-+ src = e.hist
-+ nextEmit := s
-+
-+ // sLimit is when to stop looking for offset/length copies. The inputMargin
-+ // lets us use a fast path for emitLiteral in the main loop, while we are
-+ // looking for copies.
-+ sLimit := int32(len(src) - inputMargin)
-+
-+ // nextEmit is where in src the next emitLiteral should start from.
-+ cv := load6432(src, s)
-+ for {
-+ const skipLog = 6
-+ const doEvery = 1
-+
-+ nextS := s
-+ var l int32
-+ var t int32
-+ for {
-+ nextHashS := hash4x64(cv, tableBits)
-+ nextHashL := hash7(cv, tableBits)
-+
-+ s = nextS
-+ nextS = s + doEvery + (s-nextEmit)>>skipLog
-+ if nextS > sLimit {
-+ goto emitRemainder
-+ }
-+ // Fetch a short+long candidate
-+ sCandidate := e.table[nextHashS]
-+ lCandidate := e.bTable[nextHashL]
-+ next := load6432(src, nextS)
-+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
-+ e.table[nextHashS] = entry
-+ eLong := &e.bTable[nextHashL]
-+ eLong.Cur, eLong.Prev = entry, eLong.Cur
-+
-+ nextHashS = hash4x64(next, tableBits)
-+ nextHashL = hash7(next, tableBits)
-+
-+ t = lCandidate.Cur.offset - e.cur
-+ if s-t < maxMatchOffset {
-+ if uint32(cv) == lCandidate.Cur.val {
-+ // Store the next match
-+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)}
-+ eLong := &e.bTable[nextHashL]
-+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur
-+
-+ t2 := lCandidate.Prev.offset - e.cur
-+ if s-t2 < maxMatchOffset && uint32(cv) == lCandidate.Prev.val {
-+ l = e.matchlen(s+4, t+4, src) + 4
-+ ml1 := e.matchlen(s+4, t2+4, src) + 4
-+ if ml1 > l {
-+ t = t2
-+ l = ml1
-+ break
-+ }
-+ }
-+ break
-+ }
-+ t = lCandidate.Prev.offset - e.cur
-+ if s-t < maxMatchOffset && uint32(cv) == lCandidate.Prev.val {
-+ // Store the next match
-+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)}
-+ eLong := &e.bTable[nextHashL]
-+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur
-+ break
-+ }
-+ }
-+
-+ t = sCandidate.offset - e.cur
-+ if s-t < maxMatchOffset && uint32(cv) == sCandidate.val {
-+ // Found a 4 match...
-+ l = e.matchlen(s+4, t+4, src) + 4
-+ lCandidate = e.bTable[nextHashL]
-+ // Store the next match
-+
-+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)}
-+ eLong := &e.bTable[nextHashL]
-+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur
-+
-+ // If the next long is a candidate, use that...
-+ t2 := lCandidate.Cur.offset - e.cur
-+ if nextS-t2 < maxMatchOffset {
-+ if lCandidate.Cur.val == uint32(next) {
-+ ml := e.matchlen(nextS+4, t2+4, src) + 4
-+ if ml > l {
-+ t = t2
-+ s = nextS
-+ l = ml
-+ break
-+ }
-+ }
-+ // If the previous long is a candidate, use that...
-+ t2 = lCandidate.Prev.offset - e.cur
-+ if nextS-t2 < maxMatchOffset && lCandidate.Prev.val == uint32(next) {
-+ ml := e.matchlen(nextS+4, t2+4, src) + 4
-+ if ml > l {
-+ t = t2
-+ s = nextS
-+ l = ml
-+ break
-+ }
-+ }
-+ }
-+ break
-+ }
-+ cv = next
-+ }
-+
-+ // A 4-byte match has been found. We'll later see if more than 4 bytes
-+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
-+ // them as literal bytes.
-+
-+ // Extend the 4-byte match as long as possible.
-+ if l == 0 {
-+ l = e.matchlenLong(s+4, t+4, src) + 4
-+ } else if l == maxMatchLength {
-+ l += e.matchlenLong(s+l, t+l, src)
-+ }
-+ // Extend backwards
-+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
-+ s--
-+ t--
-+ l++
-+ }
-+ if nextEmit < s {
-+ emitLiteral(dst, src[nextEmit:s])
-+ }
-+ if false {
-+ if t >= s {
-+ panic(fmt.Sprintln(""s-t"", s, t))
-+ }
-+ if (s - t) > maxMatchOffset {
-+ panic(fmt.Sprintln(""mmo"", s-t))
-+ }
-+ if l < baseMatchLength {
-+ panic(""bml"")
-+ }
-+ }
-+
-+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
-+ s += l
-+ nextEmit = s
-+ if nextS >= s {
-+ s = nextS + 1
-+ }
-+
-+ if s >= sLimit {
-+ goto emitRemainder
-+ }
-+
-+ // Store every 3rd hash in-between.
-+ if true {
-+ const hashEvery = 3
-+ i := s - l + 1
-+ if i < s-1 {
-+ cv := load6432(src, i)
-+ t := tableEntry{offset: i + e.cur, val: uint32(cv)}
-+ e.table[hash4x64(cv, tableBits)] = t
-+ eLong := &e.bTable[hash7(cv, tableBits)]
-+ eLong.Cur, eLong.Prev = t, eLong.Cur
-+
-+ // Do an long at i+1
-+ cv >>= 8
-+ t = tableEntry{offset: t.offset + 1, val: uint32(cv)}
-+ eLong = &e.bTable[hash7(cv, tableBits)]
-+ eLong.Cur, eLong.Prev = t, eLong.Cur
-+
-+ // We only have enough bits for a short entry at i+2
-+ cv >>= 8
-+ t = tableEntry{offset: t.offset + 1, val: uint32(cv)}
-+ e.table[hash4x64(cv, tableBits)] = t
-+
-+ // Skip one - otherwise we risk hitting 's'
-+ i += 4
-+ for ; i < s-1; i += hashEvery {
-+ cv := load6432(src, i)
-+ t := tableEntry{offset: i + e.cur, val: uint32(cv)}
-+ t2 := tableEntry{offset: t.offset + 1, val: uint32(cv >> 8)}
-+ eLong := &e.bTable[hash7(cv, tableBits)]
-+ eLong.Cur, eLong.Prev = t, eLong.Cur
-+ e.table[hash4u(t2.val, tableBits)] = t2
-+ }
-+ }
-+ }
-+
-+ // We could immediately start working at s now, but to improve
-+ // compression we first update the hash table at s-1 and at s.
-+ x := load6432(src, s-1)
-+ o := e.cur + s - 1
-+ prevHashS := hash4x64(x, tableBits)
-+ prevHashL := hash7(x, tableBits)
-+ e.table[prevHashS] = tableEntry{offset: o, val: uint32(x)}
-+ eLong := &e.bTable[prevHashL]
-+ eLong.Cur, eLong.Prev = tableEntry{offset: o, val: uint32(x)}, eLong.Cur
-+ cv = x >> 8
-+ }
-+
-+emitRemainder:
-+ if int(nextEmit) < len(src) {
-+ // If nothing was added, don't encode literals.
-+ if dst.n == 0 {
-+ return
-+ }
-+
-+ emitLiteral(dst, src[nextEmit:])
-+ }
-+}
-diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go
-new file mode 100644
-index 0000000000000..cad0c7df7fc3c
---- /dev/null
-+++ b/vendor/github.com/klauspost/compress/flate/level6.go
-@@ -0,0 +1,279 @@
-+package flate
-+
-+import ""fmt""
-+
-+type fastEncL6 struct {
-+ fastGen
-+ table [tableSize]tableEntry
-+ bTable [tableSize]tableEntryPrev
-+}
-+
-+func (e *fastEncL6) Encode(dst *tokens, src []byte) {
-+ const (
-+ inputMargin = 12 - 1
-+ minNonLiteralBlockSize = 1 + 1 + inputMargin
-+ )
-+
-+ // Protect against e.cur wraparound.
-+ for e.cur >= bufferReset {
-+ if len(e.hist) == 0 {
-+ for i := range e.table[:] {
-+ e.table[i] = tableEntry{}
-+ }
-+ for i := range e.bTable[:] {
-+ e.bTable[i] = tableEntryPrev{}
-+ }
-+ e.cur = maxMatchOffset
-+ break
-+ }
-+ // Shift down everything in the table that isn't already too far away.
-+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
-+ for i := range e.table[:] {
-+ v := e.table[i].offset
-+ if v <= minOff {
-+ v = 0
-+ } else {
-+ v = v - e.cur + maxMatchOffset
-+ }
-+ e.table[i].offset = v
-+ }
-+ for i := range e.bTable[:] {
-+ v := e.bTable[i]
-+ if v.Cur.offset <= minOff {
-+ v.Cur.offset = 0
-+ v.Prev.offset = 0
-+ } else {
-+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
-+ if v.Prev.offset <= minOff {
-+ v.Prev.offset = 0
-+ } else {
-+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
-+ }
-+ }
-+ e.bTable[i] = v
-+ }
-+ e.cur = maxMatchOffset
-+ }
-+
-+ s := e.addBlock(src)
-+
-+ // This check isn't in the Snappy implementation, but there, the caller
-+ // instead of the callee handles this case.
-+ if len(src) < minNonLiteralBlockSize {
-+ // We do not fill the token table.
-+ // This will be picked up by caller.
-+ dst.n = uint16(len(src))
-+ return
-+ }
-+
-+ // Override src
-+ src = e.hist
-+ nextEmit := s
-+
-+ // sLimit is when to stop looking for offset/length copies. The inputMargin
-+ // lets us use a fast path for emitLiteral in the main loop, while we are
-+ // looking for copies.
-+ sLimit := int32(len(src) - inputMargin)
-+
-+ // nextEmit is where in src the next emitLiteral should start from.
-+ cv := load6432(src, s)
-+ // Repeat MUST be > 1 and within range
-+ repeat := int32(1)
-+ for {
-+ const skipLog = 7
-+ const doEvery = 1
-+
-+ nextS := s
-+ var l int32
-+ var t int32
-+ for {
-+ nextHashS := hash4x64(cv, tableBits)
-+ nextHashL := hash7(cv, tableBits)
-+ s = nextS
-+ nextS = s + doEvery + (s-nextEmit)>>skipLog
-+ if nextS > sLimit {
-+ goto emitRemainder
-+ }
-+ // Fetch a short+long candidate
-+ sCandidate := e.table[nextHashS]
-+ lCandidate := e.bTable[nextHashL]
-+ next := load6432(src, nextS)
-+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
-+ e.table[nextHashS] = entry
-+ eLong := &e.bTable[nextHashL]
-+ eLong.Cur, eLong.Prev = entry, eLong.Cur
-+
-+ // Calculate hashes of 'next'
-+ nextHashS = hash4x64(next, tableBits)
-+ nextHashL = hash7(next, tableBits)
-+
-+ t = lCandidate.Cur.offset - e.cur
-+ if s-t < maxMatchOffset {
-+ if uint32(cv) == lCandidate.Cur.val {
-+ // Long candidate matches at least 4 bytes.
-+
-+ // Store the next match
-+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)}
-+ eLong := &e.bTable[nextHashL]
-+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur
-+
-+ // Check the previous long candidate as well.
-+ t2 := lCandidate.Prev.offset - e.cur
-+ if s-t2 < maxMatchOffset && uint32(cv) == lCandidate.Prev.val {
-+ l = e.matchlen(s+4, t+4, src) + 4
-+ ml1 := e.matchlen(s+4, t2+4, src) + 4
-+ if ml1 > l {
-+ t = t2
-+ l = ml1
-+ break
-+ }
-+ }
-+ break
-+ }
-+ // Current value did not match, but check if previous long value does.
-+ t = lCandidate.Prev.offset - e.cur
-+ if s-t < maxMatchOffset && uint32(cv) == lCandidate.Prev.val {
-+ // Store the next match
-+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)}
-+ eLong := &e.bTable[nextHashL]
-+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur
-+ break
-+ }
-+ }
-+
-+ t = sCandidate.offset - e.cur
-+ if s-t < maxMatchOffset && uint32(cv) == sCandidate.val {
-+ // Found a 4 match...
-+ l = e.matchlen(s+4, t+4, src) + 4
-+
-+ // Look up next long candidate (at nextS)
-+ lCandidate = e.bTable[nextHashL]
-+
-+ // Store the next match
-+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)}
-+ eLong := &e.bTable[nextHashL]
-+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur
-+
-+ // Check repeat at s + repOff
-+ const repOff = 1
-+ t2 := s - repeat + repOff
-+ if load3232(src, t2) == uint32(cv>>(8*repOff)) {
-+ ml := e.matchlen(s+4+repOff, t2+4, src) + 4
-+ if ml > l {
-+ t = t2
-+ l = ml
-+ s += repOff
-+ // Not worth checking more.
-+ break
-+ }
-+ }
-+
-+ // If the next long is a candidate, use that...
-+ t2 = lCandidate.Cur.offset - e.cur
-+ if nextS-t2 < maxMatchOffset {
-+ if lCandidate.Cur.val == uint32(next) {
-+ ml := e.matchlen(nextS+4, t2+4, src) + 4
-+ if ml > l {
-+ t = t2
-+ s = nextS
-+ l = ml
-+ // This is ok, but check previous as well.
-+ }
-+ }
-+ // If the previous long is a candidate, use that...
-+ t2 = lCandidate.Prev.offset - e.cur
-+ if nextS-t2 < maxMatchOffset && lCandidate.Prev.val == uint32(next) {
-+ ml := e.matchlen(nextS+4, t2+4, src) + 4
-+ if ml > l {
-+ t = t2
-+ s = nextS
-+ l = ml
-+ break
-+ }
-+ }
-+ }
-+ break
-+ }
-+ cv = next
-+ }
-+
-+ // A 4-byte match has been found. We'll later see if more than 4 bytes
-+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
-+ // them as literal bytes.
-+
-+ // Extend the 4-byte match as long as possible.
-+ if l == 0 {
-+ l = e.matchlenLong(s+4, t+4, src) + 4
-+ } else if l == maxMatchLength {
-+ l += e.matchlenLong(s+l, t+l, src)
-+ }
-+
-+ // Extend backwards
-+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
-+ s--
-+ t--
-+ l++
-+ }
-+ if nextEmit < s {
-+ emitLiteral(dst, src[nextEmit:s])
-+ }
-+ if false {
-+ if t >= s {
-+ panic(fmt.Sprintln(""s-t"", s, t))
-+ }
-+ if (s - t) > maxMatchOffset {
-+ panic(fmt.Sprintln(""mmo"", s-t))
-+ }
-+ if l < baseMatchLength {
-+ panic(""bml"")
-+ }
-+ }
-+
-+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
-+ repeat = s - t
-+ s += l
-+ nextEmit = s
-+ if nextS >= s {
-+ s = nextS + 1
-+ }
-+
-+ if s >= sLimit {
-+ // Index after match end.
-+ for i := nextS + 1; i < int32(len(src))-8; i += 2 {
-+ cv := load6432(src, i)
-+ e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur, val: uint32(cv)}
-+ eLong := &e.bTable[hash7(cv, tableBits)]
-+ eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur, val: uint32(cv)}, eLong.Cur
-+ }
-+ goto emitRemainder
-+ }
-+
-+ // Store every long hash in-between and every second short.
-+ if true {
-+ for i := nextS + 1; i < s-1; i += 2 {
-+ cv := load6432(src, i)
-+ t := tableEntry{offset: i + e.cur, val: uint32(cv)}
-+ t2 := tableEntry{offset: t.offset + 1, val: uint32(cv >> 8)}
-+ eLong := &e.bTable[hash7(cv, tableBits)]
-+ eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
-+ e.table[hash4x64(cv, tableBits)] = t
-+ eLong.Cur, eLong.Prev = t, eLong.Cur
-+ eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
-+ }
-+ }
-+
-+ // We could immediately start working at s now, but to improve
-+ // compression we first update the hash table at s-1 and at s.
-+ cv = load6432(src, s)
-+ }
-+
-+emitRemainder:
-+ if int(nextEmit) < len(src) {
-+ // If nothing was added, don't encode literals.
-+ if dst.n == 0 {
-+ return
-+ }
-+
-+ emitLiteral(dst, src[nextEmit:])
-+ }
-+}
-diff --git a/vendor/github.com/klauspost/compress/flate/reverse_bits.go b/vendor/github.com/klauspost/compress/flate/reverse_bits.go
-deleted file mode 100644
-index c1a02720d1a9b..0000000000000
---- a/vendor/github.com/klauspost/compress/flate/reverse_bits.go
-+++ /dev/null
-@@ -1,48 +0,0 @@
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package flate
--
--var reverseByte = [256]byte{
-- 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
-- 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
-- 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
-- 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
-- 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
-- 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
-- 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
-- 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
-- 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
-- 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
-- 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
-- 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
-- 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
-- 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
-- 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
-- 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
-- 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
-- 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
-- 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
-- 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
-- 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
-- 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
-- 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
-- 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
-- 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
-- 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
-- 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
-- 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
-- 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
-- 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
-- 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
-- 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
--}
--
--func reverseUint16(v uint16) uint16 {
-- return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8
--}
--
--func reverseBits(number uint16, bitLength byte) uint16 {
-- return reverseUint16(number << uint8(16-bitLength))
--}
-diff --git a/vendor/github.com/klauspost/compress/flate/snappy.go b/vendor/github.com/klauspost/compress/flate/snappy.go
-deleted file mode 100644
-index aebebd5248f91..0000000000000
---- a/vendor/github.com/klauspost/compress/flate/snappy.go
-+++ /dev/null
-@@ -1,900 +0,0 @@
--// Copyright 2011 The Snappy-Go Authors. All rights reserved.
--// Modified for deflate by Klaus Post (c) 2015.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package flate
--
--// emitLiteral writes a literal chunk and returns the number of bytes written.
--func emitLiteral(dst *tokens, lit []byte) {
-- ol := int(dst.n)
-- for i, v := range lit {
-- dst.tokens[(i+ol)&maxStoreBlockSize] = token(v)
-- }
-- dst.n += uint16(len(lit))
--}
--
--// emitCopy writes a copy chunk and returns the number of bytes written.
--func emitCopy(dst *tokens, offset, length int) {
-- dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize))
-- dst.n++
--}
--
--type fastEnc interface {
-- Encode(dst *tokens, src []byte)
-- Reset()
--}
--
--func newFastEnc(level int) fastEnc {
-- switch level {
-- case 1:
-- return &snappyL1{}
-- case 2:
-- return &snappyL2{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}
-- case 3:
-- return &snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}
-- case 4:
-- return &snappyL4{snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}}
-- default:
-- panic(""invalid level specified"")
-- }
--}
--
--const (
-- tableBits = 14 // Bits used in the table
-- tableSize = 1 << tableBits // Size of the table
-- tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
-- tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
-- baseMatchOffset = 1 // The smallest match offset
-- baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
-- maxMatchOffset = 1 << 15 // The largest match offset
--)
--
--func load32(b []byte, i int) uint32 {
-- b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
-- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
--}
--
--func load64(b []byte, i int) uint64 {
-- b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
-- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
-- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
--}
--
--func hash(u uint32) uint32 {
-- return (u * 0x1e35a7bd) >> tableShift
--}
--
--// snappyL1 encapsulates level 1 compression
--type snappyL1 struct{}
--
--func (e *snappyL1) Reset() {}
--
--func (e *snappyL1) Encode(dst *tokens, src []byte) {
-- const (
-- inputMargin = 16 - 1
-- minNonLiteralBlockSize = 1 + 1 + inputMargin
-- )
--
-- // This check isn't in the Snappy implementation, but there, the caller
-- // instead of the callee handles this case.
-- if len(src) < minNonLiteralBlockSize {
-- // We do not fill the token table.
-- // This will be picked up by caller.
-- dst.n = uint16(len(src))
-- return
-- }
--
-- // Initialize the hash table.
-- //
-- // The table element type is uint16, as s < sLimit and sLimit < len(src)
-- // and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535.
-- var table [tableSize]uint16
--
-- // sLimit is when to stop looking for offset/length copies. The inputMargin
-- // lets us use a fast path for emitLiteral in the main loop, while we are
-- // looking for copies.
-- sLimit := len(src) - inputMargin
--
-- // nextEmit is where in src the next emitLiteral should start from.
-- nextEmit := 0
--
-- // The encoded form must start with a literal, as there are no previous
-- // bytes to copy, so we start looking for hash matches at s == 1.
-- s := 1
-- nextHash := hash(load32(src, s))
--
-- for {
-- // Copied from the C++ snappy implementation:
-- //
-- // Heuristic match skipping: If 32 bytes are scanned with no matches
-- // found, start looking only at every other byte. If 32 more bytes are
-- // scanned (or skipped), look at every third byte, etc.. When a match
-- // is found, immediately go back to looking at every byte. This is a
-- // small loss (~5% performance, ~0.1% density) for compressible data
-- // due to more bookkeeping, but for non-compressible data (such as
-- // JPEG) it's a huge win since the compressor quickly ""realizes"" the
-- // data is incompressible and doesn't bother looking for matches
-- // everywhere.
-- //
-- // The ""skip"" variable keeps track of how many bytes there are since
-- // the last match; dividing it by 32 (ie. right-shifting by five) gives
-- // the number of bytes to move ahead for each iteration.
-- skip := 32
--
-- nextS := s
-- candidate := 0
-- for {
-- s = nextS
-- bytesBetweenHashLookups := skip >> 5
-- nextS = s + bytesBetweenHashLookups
-- skip += bytesBetweenHashLookups
-- if nextS > sLimit {
-- goto emitRemainder
-- }
-- candidate = int(table[nextHash&tableMask])
-- table[nextHash&tableMask] = uint16(s)
-- nextHash = hash(load32(src, nextS))
-- if s-candidate <= maxMatchOffset && load32(src, s) == load32(src, candidate) {
-- break
-- }
-- }
--
-- // A 4-byte match has been found. We'll later see if more than 4 bytes
-- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
-- // them as literal bytes.
-- emitLiteral(dst, src[nextEmit:s])
--
-- // Call emitCopy, and then see if another emitCopy could be our next
-- // move. Repeat until we find no match for the input immediately after
-- // what was consumed by the last emitCopy call.
-- //
-- // If we exit this loop normally then we need to call emitLiteral next,
-- // though we don't yet know how big the literal will be. We handle that
-- // by proceeding to the next iteration of the main loop. We also can
-- // exit this loop via goto if we get close to exhausting the input.
-- for {
-- // Invariant: we have a 4-byte match at s, and no need to emit any
-- // literal bytes prior to s.
-- base := s
--
-- // Extend the 4-byte match as long as possible.
-- //
-- // This is an inlined version of Snappy's:
-- // s = extendMatch(src, candidate+4, s+4)
-- s += 4
-- s1 := base + maxMatchLength
-- if s1 > len(src) {
-- s1 = len(src)
-- }
-- a := src[s:s1]
-- b := src[candidate+4:]
-- b = b[:len(a)]
-- l := len(a)
-- for i := range a {
-- if a[i] != b[i] {
-- l = i
-- break
-- }
-- }
-- s += l
--
-- // matchToken is flate's equivalent of Snappy's emitCopy.
-- dst.tokens[dst.n] = matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset))
-- dst.n++
-- nextEmit = s
-- if s >= sLimit {
-- goto emitRemainder
-- }
--
-- // We could immediately start working at s now, but to improve
-- // compression we first update the hash table at s-1 and at s. If
-- // another emitCopy is not our next move, also calculate nextHash
-- // at s+1. At least on GOARCH=amd64, these three hash calculations
-- // are faster as one load64 call (with some shifts) instead of
-- // three load32 calls.
-- x := load64(src, s-1)
-- prevHash := hash(uint32(x >> 0))
-- table[prevHash&tableMask] = uint16(s - 1)
-- currHash := hash(uint32(x >> 8))
-- candidate = int(table[currHash&tableMask])
-- table[currHash&tableMask] = uint16(s)
-- if s-candidate > maxMatchOffset || uint32(x>>8) != load32(src, candidate) {
-- nextHash = hash(uint32(x >> 16))
-- s++
-- break
-- }
-- }
-- }
--
--emitRemainder:
-- if nextEmit < len(src) {
-- emitLiteral(dst, src[nextEmit:])
-- }
--}
--
--type tableEntry struct {
-- val uint32
-- offset int32
--}
--
--func load3232(b []byte, i int32) uint32 {
-- b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
-- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
--}
--
--func load6432(b []byte, i int32) uint64 {
-- b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
-- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
-- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
--}
--
--// snappyGen maintains the table for matches,
--// and the previous byte block for level 2.
--// This is the generic implementation.
--type snappyGen struct {
-- prev []byte
-- cur int32
--}
--
--// snappyGen maintains the table for matches,
--// and the previous byte block for level 2.
--// This is the generic implementation.
--type snappyL2 struct {
-- snappyGen
-- table [tableSize]tableEntry
--}
--
--// EncodeL2 uses a similar algorithm to level 1, but is capable
--// of matching across blocks giving better compression at a small slowdown.
--func (e *snappyL2) Encode(dst *tokens, src []byte) {
-- const (
-- inputMargin = 8 - 1
-- minNonLiteralBlockSize = 1 + 1 + inputMargin
-- )
--
-- // Protect against e.cur wraparound.
-- if e.cur > 1<<30 {
-- for i := range e.table[:] {
-- e.table[i] = tableEntry{}
-- }
-- e.cur = maxStoreBlockSize
-- }
--
-- // This check isn't in the Snappy implementation, but there, the caller
-- // instead of the callee handles this case.
-- if len(src) < minNonLiteralBlockSize {
-- // We do not fill the token table.
-- // This will be picked up by caller.
-- dst.n = uint16(len(src))
-- e.cur += maxStoreBlockSize
-- e.prev = e.prev[:0]
-- return
-- }
--
-- // sLimit is when to stop looking for offset/length copies. The inputMargin
-- // lets us use a fast path for emitLiteral in the main loop, while we are
-- // looking for copies.
-- sLimit := int32(len(src) - inputMargin)
--
-- // nextEmit is where in src the next emitLiteral should start from.
-- nextEmit := int32(0)
-- s := int32(0)
-- cv := load3232(src, s)
-- nextHash := hash(cv)
--
-- for {
-- // Copied from the C++ snappy implementation:
-- //
-- // Heuristic match skipping: If 32 bytes are scanned with no matches
-- // found, start looking only at every other byte. If 32 more bytes are
-- // scanned (or skipped), look at every third byte, etc.. When a match
-- // is found, immediately go back to looking at every byte. This is a
-- // small loss (~5% performance, ~0.1% density) for compressible data
-- // due to more bookkeeping, but for non-compressible data (such as
-- // JPEG) it's a huge win since the compressor quickly ""realizes"" the
-- // data is incompressible and doesn't bother looking for matches
-- // everywhere.
-- //
-- // The ""skip"" variable keeps track of how many bytes there are since
-- // the last match; dividing it by 32 (ie. right-shifting by five) gives
-- // the number of bytes to move ahead for each iteration.
-- skip := int32(32)
--
-- nextS := s
-- var candidate tableEntry
-- for {
-- s = nextS
-- bytesBetweenHashLookups := skip >> 5
-- nextS = s + bytesBetweenHashLookups
-- skip += bytesBetweenHashLookups
-- if nextS > sLimit {
-- goto emitRemainder
-- }
-- candidate = e.table[nextHash&tableMask]
-- now := load3232(src, nextS)
-- e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv}
-- nextHash = hash(now)
--
-- offset := s - (candidate.offset - e.cur)
-- if offset > maxMatchOffset || cv != candidate.val {
-- // Out of range or not matched.
-- cv = now
-- continue
-- }
-- break
-- }
--
-- // A 4-byte match has been found. We'll later see if more than 4 bytes
-- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
-- // them as literal bytes.
-- emitLiteral(dst, src[nextEmit:s])
--
-- // Call emitCopy, and then see if another emitCopy could be our next
-- // move. Repeat until we find no match for the input immediately after
-- // what was consumed by the last emitCopy call.
-- //
-- // If we exit this loop normally then we need to call emitLiteral next,
-- // though we don't yet know how big the literal will be. We handle that
-- // by proceeding to the next iteration of the main loop. We also can
-- // exit this loop via goto if we get close to exhausting the input.
-- for {
-- // Invariant: we have a 4-byte match at s, and no need to emit any
-- // literal bytes prior to s.
--
-- // Extend the 4-byte match as long as possible.
-- //
-- s += 4
-- t := candidate.offset - e.cur + 4
-- l := e.matchlen(s, t, src)
--
-- // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
-- dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
-- dst.n++
-- s += l
-- nextEmit = s
-- if s >= sLimit {
-- t += l
-- // Index first pair after match end.
-- if int(t+4) < len(src) && t > 0 {
-- cv := load3232(src, t)
-- e.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv}
-- }
-- goto emitRemainder
-- }
--
-- // We could immediately start working at s now, but to improve
-- // compression we first update the hash table at s-1 and at s. If
-- // another emitCopy is not our next move, also calculate nextHash
-- // at s+1. At least on GOARCH=amd64, these three hash calculations
-- // are faster as one load64 call (with some shifts) instead of
-- // three load32 calls.
-- x := load6432(src, s-1)
-- prevHash := hash(uint32(x))
-- e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)}
-- x >>= 8
-- currHash := hash(uint32(x))
-- candidate = e.table[currHash&tableMask]
-- e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)}
--
-- offset := s - (candidate.offset - e.cur)
-- if offset > maxMatchOffset || uint32(x) != candidate.val {
-- cv = uint32(x >> 8)
-- nextHash = hash(cv)
-- s++
-- break
-- }
-- }
-- }
--
--emitRemainder:
-- if int(nextEmit) < len(src) {
-- emitLiteral(dst, src[nextEmit:])
-- }
-- e.cur += int32(len(src))
-- e.prev = e.prev[:len(src)]
-- copy(e.prev, src)
--}
--
--type tableEntryPrev struct {
-- Cur tableEntry
-- Prev tableEntry
--}
--
--// snappyL3
--type snappyL3 struct {
-- snappyGen
-- table [tableSize]tableEntryPrev
--}
--
--// Encode uses a similar algorithm to level 2, will check up to two candidates.
--func (e *snappyL3) Encode(dst *tokens, src []byte) {
-- const (
-- inputMargin = 8 - 1
-- minNonLiteralBlockSize = 1 + 1 + inputMargin
-- )
--
-- // Protect against e.cur wraparound.
-- if e.cur > 1<<30 {
-- for i := range e.table[:] {
-- e.table[i] = tableEntryPrev{}
-- }
-- e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]}
-- }
--
-- // This check isn't in the Snappy implementation, but there, the caller
-- // instead of the callee handles this case.
-- if len(src) < minNonLiteralBlockSize {
-- // We do not fill the token table.
-- // This will be picked up by caller.
-- dst.n = uint16(len(src))
-- e.cur += maxStoreBlockSize
-- e.prev = e.prev[:0]
-- return
-- }
--
-- // sLimit is when to stop looking for offset/length copies. The inputMargin
-- // lets us use a fast path for emitLiteral in the main loop, while we are
-- // looking for copies.
-- sLimit := int32(len(src) - inputMargin)
--
-- // nextEmit is where in src the next emitLiteral should start from.
-- nextEmit := int32(0)
-- s := int32(0)
-- cv := load3232(src, s)
-- nextHash := hash(cv)
--
-- for {
-- // Copied from the C++ snappy implementation:
-- //
-- // Heuristic match skipping: If 32 bytes are scanned with no matches
-- // found, start looking only at every other byte. If 32 more bytes are
-- // scanned (or skipped), look at every third byte, etc.. When a match
-- // is found, immediately go back to looking at every byte. This is a
-- // small loss (~5% performance, ~0.1% density) for compressible data
-- // due to more bookkeeping, but for non-compressible data (such as
-- // JPEG) it's a huge win since the compressor quickly ""realizes"" the
-- // data is incompressible and doesn't bother looking for matches
-- // everywhere.
-- //
-- // The ""skip"" variable keeps track of how many bytes there are since
-- // the last match; dividing it by 32 (ie. right-shifting by five) gives
-- // the number of bytes to move ahead for each iteration.
-- skip := int32(32)
--
-- nextS := s
-- var candidate tableEntry
-- for {
-- s = nextS
-- bytesBetweenHashLookups := skip >> 5
-- nextS = s + bytesBetweenHashLookups
-- skip += bytesBetweenHashLookups
-- if nextS > sLimit {
-- goto emitRemainder
-- }
-- candidates := e.table[nextHash&tableMask]
-- now := load3232(src, nextS)
-- e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}
-- nextHash = hash(now)
--
-- // Check both candidates
-- candidate = candidates.Cur
-- if cv == candidate.val {
-- offset := s - (candidate.offset - e.cur)
-- if offset <= maxMatchOffset {
-- break
-- }
-- } else {
-- // We only check if value mismatches.
-- // Offset will always be invalid in other cases.
-- candidate = candidates.Prev
-- if cv == candidate.val {
-- offset := s - (candidate.offset - e.cur)
-- if offset <= maxMatchOffset {
-- break
-- }
-- }
-- }
-- cv = now
-- }
--
-- // A 4-byte match has been found. We'll later see if more than 4 bytes
-- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
-- // them as literal bytes.
-- emitLiteral(dst, src[nextEmit:s])
--
-- // Call emitCopy, and then see if another emitCopy could be our next
-- // move. Repeat until we find no match for the input immediately after
-- // what was consumed by the last emitCopy call.
-- //
-- // If we exit this loop normally then we need to call emitLiteral next,
-- // though we don't yet know how big the literal will be. We handle that
-- // by proceeding to the next iteration of the main loop. We also can
-- // exit this loop via goto if we get close to exhausting the input.
-- for {
-- // Invariant: we have a 4-byte match at s, and no need to emit any
-- // literal bytes prior to s.
--
-- // Extend the 4-byte match as long as possible.
-- //
-- s += 4
-- t := candidate.offset - e.cur + 4
-- l := e.matchlen(s, t, src)
--
-- // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
-- dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
-- dst.n++
-- s += l
-- nextEmit = s
-- if s >= sLimit {
-- t += l
-- // Index first pair after match end.
-- if int(t+4) < len(src) && t > 0 {
-- cv := load3232(src, t)
-- nextHash = hash(cv)
-- e.table[nextHash&tableMask] = tableEntryPrev{
-- Prev: e.table[nextHash&tableMask].Cur,
-- Cur: tableEntry{offset: e.cur + t, val: cv},
-- }
-- }
-- goto emitRemainder
-- }
--
-- // We could immediately start working at s now, but to improve
-- // compression we first update the hash table at s-3 to s. If
-- // another emitCopy is not our next move, also calculate nextHash
-- // at s+1. At least on GOARCH=amd64, these three hash calculations
-- // are faster as one load64 call (with some shifts) instead of
-- // three load32 calls.
-- x := load6432(src, s-3)
-- prevHash := hash(uint32(x))
-- e.table[prevHash&tableMask] = tableEntryPrev{
-- Prev: e.table[prevHash&tableMask].Cur,
-- Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},
-- }
-- x >>= 8
-- prevHash = hash(uint32(x))
--
-- e.table[prevHash&tableMask] = tableEntryPrev{
-- Prev: e.table[prevHash&tableMask].Cur,
-- Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},
-- }
-- x >>= 8
-- prevHash = hash(uint32(x))
--
-- e.table[prevHash&tableMask] = tableEntryPrev{
-- Prev: e.table[prevHash&tableMask].Cur,
-- Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},
-- }
-- x >>= 8
-- currHash := hash(uint32(x))
-- candidates := e.table[currHash&tableMask]
-- cv = uint32(x)
-- e.table[currHash&tableMask] = tableEntryPrev{
-- Prev: candidates.Cur,
-- Cur: tableEntry{offset: s + e.cur, val: cv},
-- }
--
-- // Check both candidates
-- candidate = candidates.Cur
-- if cv == candidate.val {
-- offset := s - (candidate.offset - e.cur)
-- if offset <= maxMatchOffset {
-- continue
-- }
-- } else {
-- // We only check if value mismatches.
-- // Offset will always be invalid in other cases.
-- candidate = candidates.Prev
-- if cv == candidate.val {
-- offset := s - (candidate.offset - e.cur)
-- if offset <= maxMatchOffset {
-- continue
-- }
-- }
-- }
-- cv = uint32(x >> 8)
-- nextHash = hash(cv)
-- s++
-- break
-- }
-- }
--
--emitRemainder:
-- if int(nextEmit) < len(src) {
-- emitLiteral(dst, src[nextEmit:])
-- }
-- e.cur += int32(len(src))
-- e.prev = e.prev[:len(src)]
-- copy(e.prev, src)
--}
--
--// snappyL4
--type snappyL4 struct {
-- snappyL3
--}
--
--// Encode uses a similar algorithm to level 3,
--// but will check up to two candidates if first isn't long enough.
--func (e *snappyL4) Encode(dst *tokens, src []byte) {
-- const (
-- inputMargin = 8 - 3
-- minNonLiteralBlockSize = 1 + 1 + inputMargin
-- matchLenGood = 12
-- )
--
-- // Protect against e.cur wraparound.
-- if e.cur > 1<<30 {
-- for i := range e.table[:] {
-- e.table[i] = tableEntryPrev{}
-- }
-- e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]}
-- }
--
-- // This check isn't in the Snappy implementation, but there, the caller
-- // instead of the callee handles this case.
-- if len(src) < minNonLiteralBlockSize {
-- // We do not fill the token table.
-- // This will be picked up by caller.
-- dst.n = uint16(len(src))
-- e.cur += maxStoreBlockSize
-- e.prev = e.prev[:0]
-- return
-- }
--
-- // sLimit is when to stop looking for offset/length copies. The inputMargin
-- // lets us use a fast path for emitLiteral in the main loop, while we are
-- // looking for copies.
-- sLimit := int32(len(src) - inputMargin)
--
-- // nextEmit is where in src the next emitLiteral should start from.
-- nextEmit := int32(0)
-- s := int32(0)
-- cv := load3232(src, s)
-- nextHash := hash(cv)
--
-- for {
-- // Copied from the C++ snappy implementation:
-- //
-- // Heuristic match skipping: If 32 bytes are scanned with no matches
-- // found, start looking only at every other byte. If 32 more bytes are
-- // scanned (or skipped), look at every third byte, etc.. When a match
-- // is found, immediately go back to looking at every byte. This is a
-- // small loss (~5% performance, ~0.1% density) for compressible data
-- // due to more bookkeeping, but for non-compressible data (such as
-- // JPEG) it's a huge win since the compressor quickly ""realizes"" the
-- // data is incompressible and doesn't bother looking for matches
-- // everywhere.
-- //
-- // The ""skip"" variable keeps track of how many bytes there are since
-- // the last match; dividing it by 32 (ie. right-shifting by five) gives
-- // the number of bytes to move ahead for each iteration.
-- skip := int32(32)
--
-- nextS := s
-- var candidate tableEntry
-- var candidateAlt tableEntry
-- for {
-- s = nextS
-- bytesBetweenHashLookups := skip >> 5
-- nextS = s + bytesBetweenHashLookups
-- skip += bytesBetweenHashLookups
-- if nextS > sLimit {
-- goto emitRemainder
-- }
-- candidates := e.table[nextHash&tableMask]
-- now := load3232(src, nextS)
-- e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}
-- nextHash = hash(now)
--
-- // Check both candidates
-- candidate = candidates.Cur
-- if cv == candidate.val {
-- offset := s - (candidate.offset - e.cur)
-- if offset < maxMatchOffset {
-- offset = s - (candidates.Prev.offset - e.cur)
-- if cv == candidates.Prev.val && offset < maxMatchOffset {
-- candidateAlt = candidates.Prev
-- }
-- break
-- }
-- } else {
-- // We only check if value mismatches.
-- // Offset will always be invalid in other cases.
-- candidate = candidates.Prev
-- if cv == candidate.val {
-- offset := s - (candidate.offset - e.cur)
-- if offset < maxMatchOffset {
-- break
-- }
-- }
-- }
-- cv = now
-- }
--
-- // A 4-byte match has been found. We'll later see if more than 4 bytes
-- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
-- // them as literal bytes.
-- emitLiteral(dst, src[nextEmit:s])
--
-- // Call emitCopy, and then see if another emitCopy could be our next
-- // move. Repeat until we find no match for the input immediately after
-- // what was consumed by the last emitCopy call.
-- //
-- // If we exit this loop normally then we need to call emitLiteral next,
-- // though we don't yet know how big the literal will be. We handle that
-- // by proceeding to the next iteration of the main loop. We also can
-- // exit this loop via goto if we get close to exhausting the input.
-- for {
-- // Invariant: we have a 4-byte match at s, and no need to emit any
-- // literal bytes prior to s.
--
-- // Extend the 4-byte match as long as possible.
-- //
-- s += 4
-- t := candidate.offset - e.cur + 4
-- l := e.matchlen(s, t, src)
-- // Try alternative candidate if match length < matchLenGood.
-- if l < matchLenGood-4 && candidateAlt.offset != 0 {
-- t2 := candidateAlt.offset - e.cur + 4
-- l2 := e.matchlen(s, t2, src)
-- if l2 > l {
-- l = l2
-- t = t2
-- }
-- }
-- // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
-- dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
-- dst.n++
-- s += l
-- nextEmit = s
-- if s >= sLimit {
-- t += l
-- // Index first pair after match end.
-- if int(t+4) < len(src) && t > 0 {
-- cv := load3232(src, t)
-- nextHash = hash(cv)
-- e.table[nextHash&tableMask] = tableEntryPrev{
-- Prev: e.table[nextHash&tableMask].Cur,
-- Cur: tableEntry{offset: e.cur + t, val: cv},
-- }
-- }
-- goto emitRemainder
-- }
--
-- // We could immediately start working at s now, but to improve
-- // compression we first update the hash table at s-3 to s. If
-- // another emitCopy is not our next move, also calculate nextHash
-- // at s+1. At least on GOARCH=amd64, these three hash calculations
-- // are faster as one load64 call (with some shifts) instead of
-- // three load32 calls.
-- x := load6432(src, s-3)
-- prevHash := hash(uint32(x))
-- e.table[prevHash&tableMask] = tableEntryPrev{
-- Prev: e.table[prevHash&tableMask].Cur,
-- Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},
-- }
-- x >>= 8
-- prevHash = hash(uint32(x))
--
-- e.table[prevHash&tableMask] = tableEntryPrev{
-- Prev: e.table[prevHash&tableMask].Cur,
-- Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},
-- }
-- x >>= 8
-- prevHash = hash(uint32(x))
--
-- e.table[prevHash&tableMask] = tableEntryPrev{
-- Prev: e.table[prevHash&tableMask].Cur,
-- Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},
-- }
-- x >>= 8
-- currHash := hash(uint32(x))
-- candidates := e.table[currHash&tableMask]
-- cv = uint32(x)
-- e.table[currHash&tableMask] = tableEntryPrev{
-- Prev: candidates.Cur,
-- Cur: tableEntry{offset: s + e.cur, val: cv},
-- }
--
-- // Check both candidates
-- candidate = candidates.Cur
-- candidateAlt = tableEntry{}
-- if cv == candidate.val {
-- offset := s - (candidate.offset - e.cur)
-- if offset <= maxMatchOffset {
-- offset = s - (candidates.Prev.offset - e.cur)
-- if cv == candidates.Prev.val && offset <= maxMatchOffset {
-- candidateAlt = candidates.Prev
-- }
-- continue
-- }
-- } else {
-- // We only check if value mismatches.
-- // Offset will always be invalid in other cases.
-- candidate = candidates.Prev
-- if cv == candidate.val {
-- offset := s - (candidate.offset - e.cur)
-- if offset <= maxMatchOffset {
-- continue
-- }
-- }
-- }
-- cv = uint32(x >> 8)
-- nextHash = hash(cv)
-- s++
-- break
-- }
-- }
--
--emitRemainder:
-- if int(nextEmit) < len(src) {
-- emitLiteral(dst, src[nextEmit:])
-- }
-- e.cur += int32(len(src))
-- e.prev = e.prev[:len(src)]
-- copy(e.prev, src)
--}
--
--func (e *snappyGen) matchlen(s, t int32, src []byte) int32 {
-- s1 := int(s) + maxMatchLength - 4
-- if s1 > len(src) {
-- s1 = len(src)
-- }
--
-- // If we are inside the current block
-- if t >= 0 {
-- b := src[t:]
-- a := src[s:s1]
-- b = b[:len(a)]
-- // Extend the match to be as long as possible.
-- for i := range a {
-- if a[i] != b[i] {
-- return int32(i)
-- }
-- }
-- return int32(len(a))
-- }
--
-- // We found a match in the previous block.
-- tp := int32(len(e.prev)) + t
-- if tp < 0 {
-- return 0
-- }
--
-- // Extend the match to be as long as possible.
-- a := src[s:s1]
-- b := e.prev[tp:]
-- if len(b) > len(a) {
-- b = b[:len(a)]
-- }
-- a = a[:len(b)]
-- for i := range b {
-- if a[i] != b[i] {
-- return int32(i)
-- }
-- }
--
-- // If we reached our limit, we matched everything we are
-- // allowed to in the previous block and we return.
-- n := int32(len(b))
-- if int(s+n) == s1 {
-- return n
-- }
--
-- // Continue looking for more matches in the current block.
-- a = src[s+n : s1]
-- b = src[:len(a)]
-- for i := range a {
-- if a[i] != b[i] {
-- return int32(i) + n
-- }
-- }
-- return int32(len(a)) + n
--}
--
--// Reset the encoding table.
--func (e *snappyGen) Reset() {
-- e.prev = e.prev[:0]
-- e.cur += maxMatchOffset
--}
-diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go
-new file mode 100644
-index 0000000000000..a4705119757d7
---- /dev/null
-+++ b/vendor/github.com/klauspost/compress/flate/stateless.go
-@@ -0,0 +1,266 @@
-+package flate
-+
-+import (
-+ ""io""
-+ ""math""
-+ ""sync""
-+)
-+
-+const (
-+ maxStatelessBlock = math.MaxInt16
-+
-+ slTableBits = 13
-+ slTableSize = 1 << slTableBits
-+ slTableShift = 32 - slTableBits
-+)
-+
-+type statelessWriter struct {
-+ dst io.Writer
-+ closed bool
-+}
-+
-+func (s *statelessWriter) Close() error {
-+ if s.closed {
-+ return nil
-+ }
-+ s.closed = true
-+ // Emit EOF block
-+ return StatelessDeflate(s.dst, nil, true)
-+}
-+
-+func (s *statelessWriter) Write(p []byte) (n int, err error) {
-+ err = StatelessDeflate(s.dst, p, false)
-+ if err != nil {
-+ return 0, err
-+ }
-+ return len(p), nil
-+}
-+
-+func (s *statelessWriter) Reset(w io.Writer) {
-+ s.dst = w
-+ s.closed = false
-+}
-+
-+// NewStatelessWriter will do compression but without maintaining any state
-+// between Write calls.
-+// There will be no memory kept between Write calls,
-+// but compression and speed will be suboptimal.
-+// Because of this, the size of actual Write calls will affect output size.
-+func NewStatelessWriter(dst io.Writer) io.WriteCloser {
-+ return &statelessWriter{dst: dst}
-+}
-+
-+// bitWriterPool contains bit writers that can be reused.
-+var bitWriterPool = sync.Pool{
-+ New: func() interface{} {
-+ return newHuffmanBitWriter(nil)
-+ },
-+}
-+
-+// StatelessDeflate allows to compress directly to a Writer without retaining state.
-+// When returning everything will be flushed.
-+func StatelessDeflate(out io.Writer, in []byte, eof bool) error {
-+ var dst tokens
-+ bw := bitWriterPool.Get().(*huffmanBitWriter)
-+ bw.reset(out)
-+ defer func() {
-+ // don't keep a reference to our output
-+ bw.reset(nil)
-+ bitWriterPool.Put(bw)
-+ }()
-+ if eof && len(in) == 0 {
-+ // Just write an EOF block.
-+ // Could be faster...
-+ bw.writeStoredHeader(0, true)
-+ bw.flush()
-+ return bw.err
-+ }
-+
-+ for len(in) > 0 {
-+ todo := in
-+ if len(todo) > maxStatelessBlock {
-+ todo = todo[:maxStatelessBlock]
-+ }
-+ in = in[len(todo):]
-+ // Compress
-+ statelessEnc(&dst, todo)
-+ isEof := eof && len(in) == 0
-+
-+ if dst.n == 0 {
-+ bw.writeStoredHeader(len(todo), isEof)
-+ if bw.err != nil {
-+ return bw.err
-+ }
-+ bw.writeBytes(todo)
-+ } else if int(dst.n) > len(todo)-len(todo)>>4 {
-+ // If we removed less than 1/16th, huffman compress the block.
-+ bw.writeBlockHuff(isEof, todo, false)
-+ } else {
-+ bw.writeBlockDynamic(&dst, isEof, todo, false)
-+ }
-+ if bw.err != nil {
-+ return bw.err
-+ }
-+ dst.Reset()
-+ }
-+ if !eof {
-+ // Align.
-+ bw.writeStoredHeader(0, false)
-+ }
-+ bw.flush()
-+ return bw.err
-+}
-+
-+func hashSL(u uint32) uint32 {
-+ return (u * 0x1e35a7bd) >> slTableShift
-+}
-+
-+func load3216(b []byte, i int16) uint32 {
-+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
-+ b = b[i:]
-+ b = b[:4]
-+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-+}
-+
-+func load6416(b []byte, i int16) uint64 {
-+ // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
-+ b = b[i:]
-+ b = b[:8]
-+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
-+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-+}
-+
-+func statelessEnc(dst *tokens, src []byte) {
-+ const (
-+ inputMargin = 12 - 1
-+ minNonLiteralBlockSize = 1 + 1 + inputMargin
-+ )
-+
-+ type tableEntry struct {
-+ offset int16
-+ }
-+
-+ var table [slTableSize]tableEntry
-+
-+ // This check isn't in the Snappy implementation, but there, the caller
-+ // instead of the callee handles this case.
-+ if len(src) < minNonLiteralBlockSize {
-+ // We do not fill the token table.
-+ // This will be picked up by caller.
-+ dst.n = uint16(len(src))
-+ return
-+ }
-+
-+ s := int16(1)
-+ nextEmit := int16(0)
-+ // sLimit is when to stop looking for offset/length copies. The inputMargin
-+ // lets us use a fast path for emitLiteral in the main loop, while we are
-+ // looking for copies.
-+ sLimit := int16(len(src) - inputMargin)
-+
-+ // nextEmit is where in src the next emitLiteral should start from.
-+ cv := load3216(src, s)
-+
-+ for {
-+ const skipLog = 5
-+ const doEvery = 2
-+
-+ nextS := s
-+ var candidate tableEntry
-+ for {
-+ nextHash := hashSL(cv)
-+ candidate = table[nextHash]
-+ nextS = s + doEvery + (s-nextEmit)>>skipLog
-+ if nextS > sLimit || nextS <= 0 {
-+ goto emitRemainder
-+ }
-+
-+ now := load6416(src, nextS)
-+ table[nextHash] = tableEntry{offset: s}
-+ nextHash = hashSL(uint32(now))
-+
-+ if cv == load3216(src, candidate.offset) {
-+ table[nextHash] = tableEntry{offset: nextS}
-+ break
-+ }
-+
-+ // Do one right away...
-+ cv = uint32(now)
-+ s = nextS
-+ nextS++
-+ candidate = table[nextHash]
-+ now >>= 8
-+ table[nextHash] = tableEntry{offset: s}
-+
-+ if cv == load3216(src, candidate.offset) {
-+ table[nextHash] = tableEntry{offset: nextS}
-+ break
-+ }
-+ cv = uint32(now)
-+ s = nextS
-+ }
-+
-+ // A 4-byte match has been found. We'll later see if more than 4 bytes
-+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
-+ // them as literal bytes.
-+ for {
-+ // Invariant: we have a 4-byte match at s, and no need to emit any
-+ // literal bytes prior to s.
-+
-+ // Extend the 4-byte match as long as possible.
-+ t := candidate.offset
-+ l := int16(matchLen(src[s+4:], src[t+4:]) + 4)
-+
-+ // Extend backwards
-+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
-+ s--
-+ t--
-+ l++
-+ }
-+ if nextEmit < s {
-+ emitLiteral(dst, src[nextEmit:s])
-+ }
-+
-+ // Save the match found
-+ dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset))
-+ s += l
-+ nextEmit = s
-+ if nextS >= s {
-+ s = nextS + 1
-+ }
-+ if s >= sLimit {
-+ goto emitRemainder
-+ }
-+
-+ // We could immediately start working at s now, but to improve
-+ // compression we first update the hash table at s-2 and at s. If
-+ // another emitCopy is not our next move, also calculate nextHash
-+ // at s+1. At least on GOARCH=amd64, these three hash calculations
-+ // are faster as one load64 call (with some shifts) instead of
-+ // three load32 calls.
-+ x := load6416(src, s-2)
-+ o := s - 2
-+ prevHash := hashSL(uint32(x))
-+ table[prevHash] = tableEntry{offset: o}
-+ x >>= 16
-+ currHash := hashSL(uint32(x))
-+ candidate = table[currHash]
-+ table[currHash] = tableEntry{offset: o + 2}
-+
-+ if uint32(x) != load3216(src, candidate.offset) {
-+ cv = uint32(x >> 8)
-+ s++
-+ break
-+ }
-+ }
-+ }
-+
-+emitRemainder:
-+ if int(nextEmit) < len(src) {
-+ // If nothing was added, don't encode literals.
-+ if dst.n == 0 {
-+ return
-+ }
-+ emitLiteral(dst, src[nextEmit:])
-+ }
-+}
-diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go
-index 141299b973803..b3df0d8941e12 100644
---- a/vendor/github.com/klauspost/compress/flate/token.go
-+++ b/vendor/github.com/klauspost/compress/flate/token.go
-@@ -4,6 +4,14 @@
-
- package flate
-
-+import (
-+ ""bytes""
-+ ""encoding/binary""
-+ ""fmt""
-+ ""io""
-+ ""math""
-+)
-+
- const (
- // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
- // 8 bits: xlength = length - MIN_MATCH_LENGTH
-@@ -46,6 +54,36 @@ var lengthCodes = [256]uint8{
- 27, 27, 27, 27, 27, 28,
- }
-
-+// lengthCodes1 is length codes, but starting at 1.
-+var lengthCodes1 = [256]uint8{
-+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 9,
-+ 10, 10, 11, 11, 12, 12, 13, 13, 13, 13,
-+ 14, 14, 14, 14, 15, 15, 15, 15, 16, 16,
-+ 16, 16, 17, 17, 17, 17, 17, 17, 17, 17,
-+ 18, 18, 18, 18, 18, 18, 18, 18, 19, 19,
-+ 19, 19, 19, 19, 19, 19, 20, 20, 20, 20,
-+ 20, 20, 20, 20, 21, 21, 21, 21, 21, 21,
-+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
-+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
-+ 22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
-+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
-+ 23, 23, 24, 24, 24, 24, 24, 24, 24, 24,
-+ 24, 24, 24, 24, 24, 24, 24, 24, 25, 25,
-+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
-+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
-+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
-+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
-+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
-+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
-+ 26, 26, 27, 27, 27, 27, 27, 27, 27, 27,
-+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
-+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
-+ 27, 27, 27, 27, 28, 28, 28, 28, 28, 28,
-+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
-+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
-+ 28, 28, 28, 28, 28, 29,
-+}
-+
- var offsetCodes = [256]uint32{
- 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
- 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
-@@ -65,19 +103,236 @@ var offsetCodes = [256]uint32{
- 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
- }
-
-+// offsetCodes14 are offsetCodes, but with 14 added.
-+var offsetCodes14 = [256]uint32{
-+ 14, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
-+ 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
-+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
-+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
-+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
-+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
-+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
-+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
-+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
-+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
-+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
-+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
-+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
-+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
-+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
-+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
-+}
-+
- type token uint32
-
- type tokens struct {
-- tokens [maxStoreBlockSize + 1]token
-- n uint16 // Must be able to contain maxStoreBlockSize
-+ nLits int
-+ extraHist [32]uint16 // codes 256->maxnumlit
-+ offHist [32]uint16 // offset codes
-+ litHist [256]uint16 // codes 0->255
-+ n uint16 // Must be able to contain maxStoreBlockSize
-+ tokens [maxStoreBlockSize + 1]token
-+}
-+
-+func (t *tokens) Reset() {
-+ if t.n == 0 {
-+ return
-+ }
-+ t.n = 0
-+ t.nLits = 0
-+ for i := range t.litHist[:] {
-+ t.litHist[i] = 0
-+ }
-+ for i := range t.extraHist[:] {
-+ t.extraHist[i] = 0
-+ }
-+ for i := range t.offHist[:] {
-+ t.offHist[i] = 0
-+ }
-+}
-+
-+func (t *tokens) Fill() {
-+ if t.n == 0 {
-+ return
-+ }
-+ for i, v := range t.litHist[:] {
-+ if v == 0 {
-+ t.litHist[i] = 1
-+ t.nLits++
-+ }
-+ }
-+ for i, v := range t.extraHist[:literalCount-256] {
-+ if v == 0 {
-+ t.nLits++
-+ t.extraHist[i] = 1
-+ }
-+ }
-+ for i, v := range t.offHist[:offsetCodeCount] {
-+ if v == 0 {
-+ t.offHist[i] = 1
-+ }
-+ }
-+}
-+
-+func indexTokens(in []token) tokens {
-+ var t tokens
-+ t.indexTokens(in)
-+ return t
-+}
-+
-+func (t *tokens) indexTokens(in []token) {
-+ t.Reset()
-+ for _, tok := range in {
-+ if tok < matchType {
-+ t.tokens[t.n] = tok
-+ t.litHist[tok]++
-+ t.n++
-+ continue
-+ }
-+ t.AddMatch(uint32(tok.length()), tok.offset())
-+ }
- }
-
--// Convert a literal into a literal token.
--func literalToken(literal uint32) token { return token(literalType + literal) }
-+// emitLiteral writes a literal chunk and returns the number of bytes written.
-+func emitLiteral(dst *tokens, lit []byte) {
-+ ol := int(dst.n)
-+ for i, v := range lit {
-+ dst.tokens[(i+ol)&maxStoreBlockSize] = token(v)
-+ dst.litHist[v]++
-+ }
-+ dst.n += uint16(len(lit))
-+ dst.nLits += len(lit)
-+}
-
--// Convert a < xlength, xoffset > pair into a match token.
--func matchToken(xlength uint32, xoffset uint32) token {
-- return token(matchType + xlength< 0 {
-+ invTotal := 1.0 / float64(t.nLits)
-+ for _, v := range t.litHist[:] {
-+ if v > 0 {
-+ n := float64(v)
-+ shannon += math.Ceil(-math.Log2(n*invTotal) * n)
-+ }
-+ }
-+ // Just add 15 for EOB
-+ shannon += 15
-+ for _, v := range t.extraHist[1 : literalCount-256] {
-+ if v > 0 {
-+ n := float64(v)
-+ shannon += math.Ceil(-math.Log2(n*invTotal) * n)
-+ bits += int(lengthExtraBits[v&31]) * int(v)
-+ nMatches += int(v)
-+ }
-+ }
-+ }
-+ if nMatches > 0 {
-+ invTotal := 1.0 / float64(nMatches)
-+ for _, v := range t.offHist[:offsetCodeCount] {
-+ if v > 0 {
-+ n := float64(v)
-+ shannon += math.Ceil(-math.Log2(n*invTotal) * n)
-+ bits += int(offsetExtraBits[v&31]) * int(n)
-+ }
-+ }
-+ }
-+
-+ return int(shannon) + bits
-+}
-+
-+// AddMatch adds a match to the tokens.
-+// This function is very sensitive to inlining and right on the border.
-+func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
-+ if debugDecode {
-+ if xlength >= maxMatchLength+baseMatchLength {
-+ panic(fmt.Errorf(""invalid length: %v"", xlength))
-+ }
-+ if xoffset >= maxMatchOffset+baseMatchOffset {
-+ panic(fmt.Errorf(""invalid offset: %v"", xoffset))
-+ }
-+ }
-+ t.nLits++
-+ lengthCode := lengthCodes1[uint8(xlength)] & 31
-+ t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset {
-+ panic(fmt.Errorf(""invalid offset: %v"", xoffset))
-+ }
-+ }
-+ oc := offsetCode(xoffset) & 31
-+ for xlength > 0 {
-+ xl := xlength
-+ if xl > 258 {
-+ // We need to have at least baseMatchLength left over for next loop.
-+ xl = 258 - baseMatchLength
-+ }
-+ xlength -= xl
-+ xl -= 3
-+ t.nLits++
-+ lengthCode := lengthCodes1[uint8(xl)] & 31
-+ t.tokens[t.n] = token(matchType | uint32(xl)<>7 < uint32(len(offsetCodes)) {
-+ return offsetCodes[(off>>7)&255] + 14
-+ } else {
-+ return offsetCodes[(off>>14)&255] + 28
-+ }
-+ }
- if off < uint32(len(offsetCodes)) {
-- return offsetCodes[off&255]
-- } else if off>>7 < uint32(len(offsetCodes)) {
-- return offsetCodes[(off>>7)&255] + 14
-- } else {
-- return offsetCodes[(off>>14)&255] + 28
-+ return offsetCodes[uint8(off)]
- }
-+ return offsetCodes14[uint8(off>>7)]
- }
-diff --git a/vendor/github.com/klauspost/compress/gzip/gzip.go b/vendor/github.com/klauspost/compress/gzip/gzip.go
-index 7da7ee7486eb8..ed0cc148f8c77 100644
---- a/vendor/github.com/klauspost/compress/gzip/gzip.go
-+++ b/vendor/github.com/klauspost/compress/gzip/gzip.go
-@@ -22,6 +22,13 @@ const (
- DefaultCompression = flate.DefaultCompression
- ConstantCompression = flate.ConstantCompression
- HuffmanOnly = flate.HuffmanOnly
-+
-+ // StatelessCompression will do compression but without maintaining any state
-+ // between Write calls.
-+ // There will be no memory kept between Write calls,
-+ // but compression and speed will be suboptimal.
-+ // Because of this, the size of actual Write calls will affect output size.
-+ StatelessCompression = -3
- )
-
- // A Writer is an io.WriteCloser.
-@@ -59,7 +66,7 @@ func NewWriter(w io.Writer) *Writer {
- // integer value between BestSpeed and BestCompression inclusive. The error
- // returned will be nil if the level is valid.
- func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
-- if level < HuffmanOnly || level > BestCompression {
-+ if level < StatelessCompression || level > BestCompression {
- return nil, fmt.Errorf(""gzip: invalid compression level: %d"", level)
- }
- z := new(Writer)
-@@ -69,9 +76,12 @@ func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
-
- func (z *Writer) init(w io.Writer, level int) {
- compressor := z.compressor
-- if compressor != nil {
-- compressor.Reset(w)
-+ if level != StatelessCompression {
-+ if compressor != nil {
-+ compressor.Reset(w)
-+ }
- }
-+
- *z = Writer{
- Header: Header{
- OS: 255, // unknown
-@@ -189,12 +199,16 @@ func (z *Writer) Write(p []byte) (int, error) {
- return n, z.err
- }
- }
-- if z.compressor == nil {
-+
-+ if z.compressor == nil && z.level != StatelessCompression {
- z.compressor, _ = flate.NewWriter(z.w, z.level)
- }
- }
- z.size += uint32(len(p))
- z.digest = crc32.Update(z.digest, crc32.IEEETable, p)
-+ if z.level == StatelessCompression {
-+ return len(p), flate.StatelessDeflate(z.w, p, false)
-+ }
- n, z.err = z.compressor.Write(p)
- return n, z.err
- }
-@@ -211,7 +225,7 @@ func (z *Writer) Flush() error {
- if z.err != nil {
- return z.err
- }
-- if z.closed {
-+ if z.closed || z.level == StatelessCompression {
- return nil
- }
- if !z.wroteHeader {
-@@ -240,7 +254,11 @@ func (z *Writer) Close() error {
- return z.err
- }
- }
-- z.err = z.compressor.Close()
-+ if z.level == StatelessCompression {
-+ z.err = flate.StatelessDeflate(z.w, nil, true)
-+ } else {
-+ z.err = z.compressor.Close()
-+ }
- if z.err != nil {
- return z.err
- }
-diff --git a/vendor/github.com/klauspost/cpuid/.gitignore b/vendor/github.com/klauspost/cpuid/.gitignore
-deleted file mode 100644
-index daf913b1b347a..0000000000000
---- a/vendor/github.com/klauspost/cpuid/.gitignore
-+++ /dev/null
-@@ -1,24 +0,0 @@
--# Compiled Object files, Static and Dynamic libs (Shared Objects)
--*.o
--*.a
--*.so
--
--# Folders
--_obj
--_test
--
--# Architecture specific extensions/prefixes
--*.[568vq]
--[568vq].out
--
--*.cgo1.go
--*.cgo2.c
--_cgo_defun.c
--_cgo_gotypes.go
--_cgo_export.*
--
--_testmain.go
--
--*.exe
--*.test
--*.prof
-diff --git a/vendor/github.com/klauspost/cpuid/.travis.yml b/vendor/github.com/klauspost/cpuid/.travis.yml
-deleted file mode 100644
-index 630192d597b2e..0000000000000
---- a/vendor/github.com/klauspost/cpuid/.travis.yml
-+++ /dev/null
-@@ -1,23 +0,0 @@
--language: go
--
--sudo: false
--
--os:
-- - linux
-- - osx
--go:
-- - 1.8.x
-- - 1.9.x
-- - 1.10.x
-- - master
--
--script:
-- - go vet ./...
-- - go test -v ./...
-- - go test -race ./...
-- - diff <(gofmt -d .) <("""")
--
--matrix:
-- allow_failures:
-- - go: 'master'
-- fast_finish: true
-diff --git a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt
-deleted file mode 100644
-index 2ef4714f7165b..0000000000000
---- a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt
-+++ /dev/null
-@@ -1,35 +0,0 @@
--Developer Certificate of Origin
--Version 1.1
--
--Copyright (C) 2015- Klaus Post & Contributors.
--Email: klauspost@gmail.com
--
--Everyone is permitted to copy and distribute verbatim copies of this
--license document, but changing it is not allowed.
--
--
--Developer's Certificate of Origin 1.1
--
--By making a contribution to this project, I certify that:
--
--(a) The contribution was created in whole or in part by me and I
-- have the right to submit it under the open source license
-- indicated in the file; or
--
--(b) The contribution is based upon previous work that, to the best
-- of my knowledge, is covered under an appropriate open source
-- license and I have the right under that license to submit that
-- work with modifications, whether created in whole or in part
-- by me, under the same open source license (unless I am
-- permitted to submit under a different license), as indicated
-- in the file; or
--
--(c) The contribution was provided directly to me by some other
-- person who certified (a), (b) or (c) and I have not modified
-- it.
--
--(d) I understand and agree that this project and the contribution
-- are public and that a record of the contribution (including all
-- personal information I submit with it, including my sign-off) is
-- maintained indefinitely and may be redistributed consistent with
-- this project or the open source license(s) involved.
-diff --git a/vendor/github.com/klauspost/cpuid/LICENSE b/vendor/github.com/klauspost/cpuid/LICENSE
-deleted file mode 100644
-index 5cec7ee949b10..0000000000000
---- a/vendor/github.com/klauspost/cpuid/LICENSE
-+++ /dev/null
-@@ -1,22 +0,0 @@
--The MIT License (MIT)
--
--Copyright (c) 2015 Klaus Post
--
--Permission is hereby granted, free of charge, to any person obtaining a copy
--of this software and associated documentation files (the ""Software""), to deal
--in the Software without restriction, including without limitation the rights
--to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
--copies of the Software, and to permit persons to whom the Software is
--furnished to do so, subject to the following conditions:
--
--The above copyright notice and this permission notice shall be included in all
--copies or substantial portions of the Software.
--
--THE SOFTWARE IS PROVIDED ""AS IS"", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
--IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
--FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
--AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
--LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
--OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
--SOFTWARE.
--
-diff --git a/vendor/github.com/klauspost/cpuid/README.md b/vendor/github.com/klauspost/cpuid/README.md
-deleted file mode 100644
-index a7fb41fbecbc3..0000000000000
---- a/vendor/github.com/klauspost/cpuid/README.md
-+++ /dev/null
-@@ -1,147 +0,0 @@
--# cpuid
--Package cpuid provides information about the CPU running the current program.
--
--CPU features are detected on startup, and kept for fast access through the life of the application.
--Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use.
--
--You can access the CPU information by accessing the shared CPU variable of the cpuid library.
--
--Package home: https://github.com/klauspost/cpuid
--
--[![GoDoc][1]][2] [![Build Status][3]][4]
--
--[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg
--[2]: https://godoc.org/github.com/klauspost/cpuid
--[3]: https://travis-ci.org/klauspost/cpuid.svg
--[4]: https://travis-ci.org/klauspost/cpuid
--
--# features
--## CPU Instructions
--* **CMOV** (i686 CMOV)
--* **NX** (NX (No-Execute) bit)
--* **AMD3DNOW** (AMD 3DNOW)
--* **AMD3DNOWEXT** (AMD 3DNowExt)
--* **MMX** (standard MMX)
--* **MMXEXT** (SSE integer functions or AMD MMX ext)
--* **SSE** (SSE functions)
--* **SSE2** (P4 SSE functions)
--* **SSE3** (Prescott SSE3 functions)
--* **SSSE3** (Conroe SSSE3 functions)
--* **SSE4** (Penryn SSE4.1 functions)
--* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions)
--* **SSE42** (Nehalem SSE4.2 functions)
--* **AVX** (AVX functions)
--* **AVX2** (AVX2 functions)
--* **FMA3** (Intel FMA 3)
--* **FMA4** (Bulldozer FMA4 functions)
--* **XOP** (Bulldozer XOP functions)
--* **F16C** (Half-precision floating-point conversion)
--* **BMI1** (Bit Manipulation Instruction Set 1)
--* **BMI2** (Bit Manipulation Instruction Set 2)
--* **TBM** (AMD Trailing Bit Manipulation)
--* **LZCNT** (LZCNT instruction)
--* **POPCNT** (POPCNT instruction)
--* **AESNI** (Advanced Encryption Standard New Instructions)
--* **CLMUL** (Carry-less Multiplication)
--* **HTT** (Hyperthreading (enabled))
--* **HLE** (Hardware Lock Elision)
--* **RTM** (Restricted Transactional Memory)
--* **RDRAND** (RDRAND instruction is available)
--* **RDSEED** (RDSEED instruction is available)
--* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions))
--* **SHA** (Intel SHA Extensions)
--* **AVX512F** (AVX-512 Foundation)
--* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions)
--* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions)
--* **AVX512PF** (AVX-512 Prefetch Instructions)
--* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions)
--* **AVX512CD** (AVX-512 Conflict Detection Instructions)
--* **AVX512BW** (AVX-512 Byte and Word Instructions)
--* **AVX512VL** (AVX-512 Vector Length Extensions)
--* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions)
--* **MPX** (Intel MPX (Memory Protection Extensions))
--* **ERMS** (Enhanced REP MOVSB/STOSB)
--* **RDTSCP** (RDTSCP Instruction)
--* **CX16** (CMPXCHG16B Instruction)
--* **SGX** (Software Guard Extensions, with activation details)
--
--## Performance
--* **RDTSCP()** Returns current cycle count. Can be used for benchmarking.
--* **SSE2SLOW** (SSE2 is supported, but usually not faster)
--* **SSE3SLOW** (SSE3 is supported, but usually not faster)
--* **ATOM** (Atom processor, some SSSE3 instructions are slower)
--* **Cache line** (Probable size of a cache line).
--* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs.
--
--## Cpu Vendor/VM
--* **Intel**
--* **AMD**
--* **VIA**
--* **Transmeta**
--* **NSC**
--* **KVM** (Kernel-based Virtual Machine)
--* **MSVM** (Microsoft Hyper-V or Windows Virtual PC)
--* **VMware**
--* **XenHVM**
--* **Bhyve**
--* **Hygon**
--
--# installing
--
--```go get github.com/klauspost/cpuid```
--
--# example
--
--```Go
--package main
--
--import (
-- ""fmt""
-- ""github.com/klauspost/cpuid""
--)
--
--func main() {
-- // Print basic CPU information:
-- fmt.Println(""Name:"", cpuid.CPU.BrandName)
-- fmt.Println(""PhysicalCores:"", cpuid.CPU.PhysicalCores)
-- fmt.Println(""ThreadsPerCore:"", cpuid.CPU.ThreadsPerCore)
-- fmt.Println(""LogicalCores:"", cpuid.CPU.LogicalCores)
-- fmt.Println(""Family"", cpuid.CPU.Family, ""Model:"", cpuid.CPU.Model)
-- fmt.Println(""Features:"", cpuid.CPU.Features)
-- fmt.Println(""Cacheline bytes:"", cpuid.CPU.CacheLine)
-- fmt.Println(""L1 Data Cache:"", cpuid.CPU.Cache.L1D, ""bytes"")
-- fmt.Println(""L1 Instruction Cache:"", cpuid.CPU.Cache.L1D, ""bytes"")
-- fmt.Println(""L2 Cache:"", cpuid.CPU.Cache.L2, ""bytes"")
-- fmt.Println(""L3 Cache:"", cpuid.CPU.Cache.L3, ""bytes"")
--
-- // Test if we have a specific feature:
-- if cpuid.CPU.SSE() {
-- fmt.Println(""We have Streaming SIMD Extensions"")
-- }
--}
--```
--
--Sample output:
--```
-->go run main.go
--Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz
--PhysicalCores: 2
--ThreadsPerCore: 2
--LogicalCores: 4
--Family 6 Model: 42
--Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL
--Cacheline bytes: 64
--We have Streaming SIMD Extensions
--```
--
--# private package
--
--In the ""private"" folder you can find an autogenerated version of the library you can include in your own packages.
--
--For this purpose all exports are removed, and functions and constants are lowercased.
--
--This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages.
--
--# license
--
--This code is published under an MIT license. See LICENSE file for more information.
-diff --git a/vendor/github.com/klauspost/cpuid/cpuid.go b/vendor/github.com/klauspost/cpuid/cpuid.go
-deleted file mode 100644
-index db95913212311..0000000000000
---- a/vendor/github.com/klauspost/cpuid/cpuid.go
-+++ /dev/null
-@@ -1,1049 +0,0 @@
--// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
--
--// Package cpuid provides information about the CPU running the current program.
--//
--// CPU features are detected on startup, and kept for fast access through the life of the application.
--// Currently x86 / x64 (AMD64) is supported.
--//
--// You can access the CPU information by accessing the shared CPU variable of the cpuid library.
--//
--// Package home: https://github.com/klauspost/cpuid
--package cpuid
--
--import ""strings""
--
--// Vendor is a representation of a CPU vendor.
--type Vendor int
--
--const (
-- Other Vendor = iota
-- Intel
-- AMD
-- VIA
-- Transmeta
-- NSC
-- KVM // Kernel-based Virtual Machine
-- MSVM // Microsoft Hyper-V or Windows Virtual PC
-- VMware
-- XenHVM
-- Bhyve
-- Hygon
--)
--
--const (
-- CMOV = 1 << iota // i686 CMOV
-- NX // NX (No-Execute) bit
-- AMD3DNOW // AMD 3DNOW
-- AMD3DNOWEXT // AMD 3DNowExt
-- MMX // standard MMX
-- MMXEXT // SSE integer functions or AMD MMX ext
-- SSE // SSE functions
-- SSE2 // P4 SSE functions
-- SSE3 // Prescott SSE3 functions
-- SSSE3 // Conroe SSSE3 functions
-- SSE4 // Penryn SSE4.1 functions
-- SSE4A // AMD Barcelona microarchitecture SSE4a instructions
-- SSE42 // Nehalem SSE4.2 functions
-- AVX // AVX functions
-- AVX2 // AVX2 functions
-- FMA3 // Intel FMA 3
-- FMA4 // Bulldozer FMA4 functions
-- XOP // Bulldozer XOP functions
-- F16C // Half-precision floating-point conversion
-- BMI1 // Bit Manipulation Instruction Set 1
-- BMI2 // Bit Manipulation Instruction Set 2
-- TBM // AMD Trailing Bit Manipulation
-- LZCNT // LZCNT instruction
-- POPCNT // POPCNT instruction
-- AESNI // Advanced Encryption Standard New Instructions
-- CLMUL // Carry-less Multiplication
-- HTT // Hyperthreading (enabled)
-- HLE // Hardware Lock Elision
-- RTM // Restricted Transactional Memory
-- RDRAND // RDRAND instruction is available
-- RDSEED // RDSEED instruction is available
-- ADX // Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
-- SHA // Intel SHA Extensions
-- AVX512F // AVX-512 Foundation
-- AVX512DQ // AVX-512 Doubleword and Quadword Instructions
-- AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions
-- AVX512PF // AVX-512 Prefetch Instructions
-- AVX512ER // AVX-512 Exponential and Reciprocal Instructions
-- AVX512CD // AVX-512 Conflict Detection Instructions
-- AVX512BW // AVX-512 Byte and Word Instructions
-- AVX512VL // AVX-512 Vector Length Extensions
-- AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions
-- MPX // Intel MPX (Memory Protection Extensions)
-- ERMS // Enhanced REP MOVSB/STOSB
-- RDTSCP // RDTSCP Instruction
-- CX16 // CMPXCHG16B Instruction
-- SGX // Software Guard Extensions
-- IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB)
-- STIBP // Single Thread Indirect Branch Predictors
--
-- // Performance indicators
-- SSE2SLOW // SSE2 is supported, but usually not faster
-- SSE3SLOW // SSE3 is supported, but usually not faster
-- ATOM // Atom processor, some SSSE3 instructions are slower
--)
--
--var flagNames = map[Flags]string{
-- CMOV: ""CMOV"", // i686 CMOV
-- NX: ""NX"", // NX (No-Execute) bit
-- AMD3DNOW: ""AMD3DNOW"", // AMD 3DNOW
-- AMD3DNOWEXT: ""AMD3DNOWEXT"", // AMD 3DNowExt
-- MMX: ""MMX"", // Standard MMX
-- MMXEXT: ""MMXEXT"", // SSE integer functions or AMD MMX ext
-- SSE: ""SSE"", // SSE functions
-- SSE2: ""SSE2"", // P4 SSE2 functions
-- SSE3: ""SSE3"", // Prescott SSE3 functions
-- SSSE3: ""SSSE3"", // Conroe SSSE3 functions
-- SSE4: ""SSE4.1"", // Penryn SSE4.1 functions
-- SSE4A: ""SSE4A"", // AMD Barcelona microarchitecture SSE4a instructions
-- SSE42: ""SSE4.2"", // Nehalem SSE4.2 functions
-- AVX: ""AVX"", // AVX functions
-- AVX2: ""AVX2"", // AVX functions
-- FMA3: ""FMA3"", // Intel FMA 3
-- FMA4: ""FMA4"", // Bulldozer FMA4 functions
-- XOP: ""XOP"", // Bulldozer XOP functions
-- F16C: ""F16C"", // Half-precision floating-point conversion
-- BMI1: ""BMI1"", // Bit Manipulation Instruction Set 1
-- BMI2: ""BMI2"", // Bit Manipulation Instruction Set 2
-- TBM: ""TBM"", // AMD Trailing Bit Manipulation
-- LZCNT: ""LZCNT"", // LZCNT instruction
-- POPCNT: ""POPCNT"", // POPCNT instruction
-- AESNI: ""AESNI"", // Advanced Encryption Standard New Instructions
-- CLMUL: ""CLMUL"", // Carry-less Multiplication
-- HTT: ""HTT"", // Hyperthreading (enabled)
-- HLE: ""HLE"", // Hardware Lock Elision
-- RTM: ""RTM"", // Restricted Transactional Memory
-- RDRAND: ""RDRAND"", // RDRAND instruction is available
-- RDSEED: ""RDSEED"", // RDSEED instruction is available
-- ADX: ""ADX"", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
-- SHA: ""SHA"", // Intel SHA Extensions
-- AVX512F: ""AVX512F"", // AVX-512 Foundation
-- AVX512DQ: ""AVX512DQ"", // AVX-512 Doubleword and Quadword Instructions
-- AVX512IFMA: ""AVX512IFMA"", // AVX-512 Integer Fused Multiply-Add Instructions
-- AVX512PF: ""AVX512PF"", // AVX-512 Prefetch Instructions
-- AVX512ER: ""AVX512ER"", // AVX-512 Exponential and Reciprocal Instructions
-- AVX512CD: ""AVX512CD"", // AVX-512 Conflict Detection Instructions
-- AVX512BW: ""AVX512BW"", // AVX-512 Byte and Word Instructions
-- AVX512VL: ""AVX512VL"", // AVX-512 Vector Length Extensions
-- AVX512VBMI: ""AVX512VBMI"", // AVX-512 Vector Bit Manipulation Instructions
-- MPX: ""MPX"", // Intel MPX (Memory Protection Extensions)
-- ERMS: ""ERMS"", // Enhanced REP MOVSB/STOSB
-- RDTSCP: ""RDTSCP"", // RDTSCP Instruction
-- CX16: ""CX16"", // CMPXCHG16B Instruction
-- SGX: ""SGX"", // Software Guard Extensions
-- IBPB: ""IBPB"", // Indirect Branch Restricted Speculation and Indirect Branch Predictor Barrier
-- STIBP: ""STIBP"", // Single Thread Indirect Branch Predictors
--
-- // Performance indicators
-- SSE2SLOW: ""SSE2SLOW"", // SSE2 supported, but usually not faster
-- SSE3SLOW: ""SSE3SLOW"", // SSE3 supported, but usually not faster
-- ATOM: ""ATOM"", // Atom processor, some SSSE3 instructions are slower
--
--}
--
--// CPUInfo contains information about the detected system CPU.
--type CPUInfo struct {
-- BrandName string // Brand name reported by the CPU
-- VendorID Vendor // Comparable CPU vendor ID
-- Features Flags // Features of the CPU
-- PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable.
-- ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable.
-- LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
-- Family int // CPU family number
-- Model int // CPU model number
-- CacheLine int // Cache line size in bytes. Will be 0 if undetectable.
-- Cache struct {
-- L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected
-- L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected
-- L2 int // L2 Cache (per core or shared). Will be -1 if undetected
-- L3 int // L3 Instruction Cache (per core or shared). Will be -1 if undetected
-- }
-- SGX SGXSupport
-- maxFunc uint32
-- maxExFunc uint32
--}
--
--var cpuid func(op uint32) (eax, ebx, ecx, edx uint32)
--var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32)
--var xgetbv func(index uint32) (eax, edx uint32)
--var rdtscpAsm func() (eax, ebx, ecx, edx uint32)
--
--// CPU contains information about the CPU as detected on startup,
--// or when Detect last was called.
--//
--// Use this as the primary entry point to you data,
--// this way queries are
--var CPU CPUInfo
--
--func init() {
-- initCPU()
-- Detect()
--}
--
--// Detect will re-detect current CPU info.
--// This will replace the content of the exported CPU variable.
--//
--// Unless you expect the CPU to change while you are running your program
--// you should not need to call this function.
--// If you call this, you must ensure that no other goroutine is accessing the
--// exported CPU variable.
--func Detect() {
-- CPU.maxFunc = maxFunctionID()
-- CPU.maxExFunc = maxExtendedFunction()
-- CPU.BrandName = brandName()
-- CPU.CacheLine = cacheLine()
-- CPU.Family, CPU.Model = familyModel()
-- CPU.Features = support()
-- CPU.SGX = hasSGX(CPU.Features&SGX != 0)
-- CPU.ThreadsPerCore = threadsPerCore()
-- CPU.LogicalCores = logicalCores()
-- CPU.PhysicalCores = physicalCores()
-- CPU.VendorID = vendorID()
-- CPU.cacheSize()
--}
--
--// Generated here: http://play.golang.org/p/BxFH2Gdc0G
--
--// Cmov indicates support of CMOV instructions
--func (c CPUInfo) Cmov() bool {
-- return c.Features&CMOV != 0
--}
--
--// Amd3dnow indicates support of AMD 3DNOW! instructions
--func (c CPUInfo) Amd3dnow() bool {
-- return c.Features&AMD3DNOW != 0
--}
--
--// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions
--func (c CPUInfo) Amd3dnowExt() bool {
-- return c.Features&AMD3DNOWEXT != 0
--}
--
--// MMX indicates support of MMX instructions
--func (c CPUInfo) MMX() bool {
-- return c.Features&MMX != 0
--}
--
--// MMXExt indicates support of MMXEXT instructions
--// (SSE integer functions or AMD MMX ext)
--func (c CPUInfo) MMXExt() bool {
-- return c.Features&MMXEXT != 0
--}
--
--// SSE indicates support of SSE instructions
--func (c CPUInfo) SSE() bool {
-- return c.Features&SSE != 0
--}
--
--// SSE2 indicates support of SSE 2 instructions
--func (c CPUInfo) SSE2() bool {
-- return c.Features&SSE2 != 0
--}
--
--// SSE3 indicates support of SSE 3 instructions
--func (c CPUInfo) SSE3() bool {
-- return c.Features&SSE3 != 0
--}
--
--// SSSE3 indicates support of SSSE 3 instructions
--func (c CPUInfo) SSSE3() bool {
-- return c.Features&SSSE3 != 0
--}
--
--// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions
--func (c CPUInfo) SSE4() bool {
-- return c.Features&SSE4 != 0
--}
--
--// SSE42 indicates support of SSE4.2 instructions
--func (c CPUInfo) SSE42() bool {
-- return c.Features&SSE42 != 0
--}
--
--// AVX indicates support of AVX instructions
--// and operating system support of AVX instructions
--func (c CPUInfo) AVX() bool {
-- return c.Features&AVX != 0
--}
--
--// AVX2 indicates support of AVX2 instructions
--func (c CPUInfo) AVX2() bool {
-- return c.Features&AVX2 != 0
--}
--
--// FMA3 indicates support of FMA3 instructions
--func (c CPUInfo) FMA3() bool {
-- return c.Features&FMA3 != 0
--}
--
--// FMA4 indicates support of FMA4 instructions
--func (c CPUInfo) FMA4() bool {
-- return c.Features&FMA4 != 0
--}
--
--// XOP indicates support of XOP instructions
--func (c CPUInfo) XOP() bool {
-- return c.Features&XOP != 0
--}
--
--// F16C indicates support of F16C instructions
--func (c CPUInfo) F16C() bool {
-- return c.Features&F16C != 0
--}
--
--// BMI1 indicates support of BMI1 instructions
--func (c CPUInfo) BMI1() bool {
-- return c.Features&BMI1 != 0
--}
--
--// BMI2 indicates support of BMI2 instructions
--func (c CPUInfo) BMI2() bool {
-- return c.Features&BMI2 != 0
--}
--
--// TBM indicates support of TBM instructions
--// (AMD Trailing Bit Manipulation)
--func (c CPUInfo) TBM() bool {
-- return c.Features&TBM != 0
--}
--
--// Lzcnt indicates support of LZCNT instruction
--func (c CPUInfo) Lzcnt() bool {
-- return c.Features&LZCNT != 0
--}
--
--// Popcnt indicates support of POPCNT instruction
--func (c CPUInfo) Popcnt() bool {
-- return c.Features&POPCNT != 0
--}
--
--// HTT indicates the processor has Hyperthreading enabled
--func (c CPUInfo) HTT() bool {
-- return c.Features&HTT != 0
--}
--
--// SSE2Slow indicates that SSE2 may be slow on this processor
--func (c CPUInfo) SSE2Slow() bool {
-- return c.Features&SSE2SLOW != 0
--}
--
--// SSE3Slow indicates that SSE3 may be slow on this processor
--func (c CPUInfo) SSE3Slow() bool {
-- return c.Features&SSE3SLOW != 0
--}
--
--// AesNi indicates support of AES-NI instructions
--// (Advanced Encryption Standard New Instructions)
--func (c CPUInfo) AesNi() bool {
-- return c.Features&AESNI != 0
--}
--
--// Clmul indicates support of CLMUL instructions
--// (Carry-less Multiplication)
--func (c CPUInfo) Clmul() bool {
-- return c.Features&CLMUL != 0
--}
--
--// NX indicates support of NX (No-Execute) bit
--func (c CPUInfo) NX() bool {
-- return c.Features&NX != 0
--}
--
--// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions
--func (c CPUInfo) SSE4A() bool {
-- return c.Features&SSE4A != 0
--}
--
--// HLE indicates support of Hardware Lock Elision
--func (c CPUInfo) HLE() bool {
-- return c.Features&HLE != 0
--}
--
--// RTM indicates support of Restricted Transactional Memory
--func (c CPUInfo) RTM() bool {
-- return c.Features&RTM != 0
--}
--
--// Rdrand indicates support of RDRAND instruction is available
--func (c CPUInfo) Rdrand() bool {
-- return c.Features&RDRAND != 0
--}
--
--// Rdseed indicates support of RDSEED instruction is available
--func (c CPUInfo) Rdseed() bool {
-- return c.Features&RDSEED != 0
--}
--
--// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
--func (c CPUInfo) ADX() bool {
-- return c.Features&ADX != 0
--}
--
--// SHA indicates support of Intel SHA Extensions
--func (c CPUInfo) SHA() bool {
-- return c.Features&SHA != 0
--}
--
--// AVX512F indicates support of AVX-512 Foundation
--func (c CPUInfo) AVX512F() bool {
-- return c.Features&AVX512F != 0
--}
--
--// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions
--func (c CPUInfo) AVX512DQ() bool {
-- return c.Features&AVX512DQ != 0
--}
--
--// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions
--func (c CPUInfo) AVX512IFMA() bool {
-- return c.Features&AVX512IFMA != 0
--}
--
--// AVX512PF indicates support of AVX-512 Prefetch Instructions
--func (c CPUInfo) AVX512PF() bool {
-- return c.Features&AVX512PF != 0
--}
--
--// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions
--func (c CPUInfo) AVX512ER() bool {
-- return c.Features&AVX512ER != 0
--}
--
--// AVX512CD indicates support of AVX-512 Conflict Detection Instructions
--func (c CPUInfo) AVX512CD() bool {
-- return c.Features&AVX512CD != 0
--}
--
--// AVX512BW indicates support of AVX-512 Byte and Word Instructions
--func (c CPUInfo) AVX512BW() bool {
-- return c.Features&AVX512BW != 0
--}
--
--// AVX512VL indicates support of AVX-512 Vector Length Extensions
--func (c CPUInfo) AVX512VL() bool {
-- return c.Features&AVX512VL != 0
--}
--
--// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions
--func (c CPUInfo) AVX512VBMI() bool {
-- return c.Features&AVX512VBMI != 0
--}
--
--// MPX indicates support of Intel MPX (Memory Protection Extensions)
--func (c CPUInfo) MPX() bool {
-- return c.Features&MPX != 0
--}
--
--// ERMS indicates support of Enhanced REP MOVSB/STOSB
--func (c CPUInfo) ERMS() bool {
-- return c.Features&ERMS != 0
--}
--
--// RDTSCP Instruction is available.
--func (c CPUInfo) RDTSCP() bool {
-- return c.Features&RDTSCP != 0
--}
--
--// CX16 indicates if CMPXCHG16B instruction is available.
--func (c CPUInfo) CX16() bool {
-- return c.Features&CX16 != 0
--}
--
--// TSX is split into HLE (Hardware Lock Elision) and RTM (Restricted Transactional Memory) detection.
--// So TSX simply checks that.
--func (c CPUInfo) TSX() bool {
-- return c.Features&(HLE|RTM) == HLE|RTM
--}
--
--// Atom indicates an Atom processor
--func (c CPUInfo) Atom() bool {
-- return c.Features&ATOM != 0
--}
--
--// Intel returns true if vendor is recognized as Intel
--func (c CPUInfo) Intel() bool {
-- return c.VendorID == Intel
--}
--
--// AMD returns true if vendor is recognized as AMD
--func (c CPUInfo) AMD() bool {
-- return c.VendorID == AMD
--}
--
--// Hygon returns true if vendor is recognized as Hygon
--func (c CPUInfo) Hygon() bool {
-- return c.VendorID == Hygon
--}
--
--// Transmeta returns true if vendor is recognized as Transmeta
--func (c CPUInfo) Transmeta() bool {
-- return c.VendorID == Transmeta
--}
--
--// NSC returns true if vendor is recognized as National Semiconductor
--func (c CPUInfo) NSC() bool {
-- return c.VendorID == NSC
--}
--
--// VIA returns true if vendor is recognized as VIA
--func (c CPUInfo) VIA() bool {
-- return c.VendorID == VIA
--}
--
--// RTCounter returns the 64-bit time-stamp counter
--// Uses the RDTSCP instruction. The value 0 is returned
--// if the CPU does not support the instruction.
--func (c CPUInfo) RTCounter() uint64 {
-- if !c.RDTSCP() {
-- return 0
-- }
-- a, _, _, d := rdtscpAsm()
-- return uint64(a) | (uint64(d) << 32)
--}
--
--// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP.
--// This variable is OS dependent, but on Linux contains information
--// about the current cpu/core the code is running on.
--// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned.
--func (c CPUInfo) Ia32TscAux() uint32 {
-- if !c.RDTSCP() {
-- return 0
-- }
-- _, _, ecx, _ := rdtscpAsm()
-- return ecx
--}
--
--// LogicalCPU will return the Logical CPU the code is currently executing on.
--// This is likely to change when the OS re-schedules the running thread
--// to another CPU.
--// If the current core cannot be detected, -1 will be returned.
--func (c CPUInfo) LogicalCPU() int {
-- if c.maxFunc < 1 {
-- return -1
-- }
-- _, ebx, _, _ := cpuid(1)
-- return int(ebx >> 24)
--}
--
--// VM Will return true if the cpu id indicates we are in
--// a virtual machine. This is only a hint, and will very likely
--// have many false negatives.
--func (c CPUInfo) VM() bool {
-- switch c.VendorID {
-- case MSVM, KVM, VMware, XenHVM, Bhyve:
-- return true
-- }
-- return false
--}
--
--// Flags contains detected cpu features and caracteristics
--type Flags uint64
--
--// String returns a string representation of the detected
--// CPU features.
--func (f Flags) String() string {
-- return strings.Join(f.Strings(), "","")
--}
--
--// Strings returns and array of the detected features.
--func (f Flags) Strings() []string {
-- s := support()
-- r := make([]string, 0, 20)
-- for i := uint(0); i < 64; i++ {
-- key := Flags(1 << i)
-- val := flagNames[key]
-- if s&key != 0 {
-- r = append(r, val)
-- }
-- }
-- return r
--}
--
--func maxExtendedFunction() uint32 {
-- eax, _, _, _ := cpuid(0x80000000)
-- return eax
--}
--
--func maxFunctionID() uint32 {
-- a, _, _, _ := cpuid(0)
-- return a
--}
--
--func brandName() string {
-- if maxExtendedFunction() >= 0x80000004 {
-- v := make([]uint32, 0, 48)
-- for i := uint32(0); i < 3; i++ {
-- a, b, c, d := cpuid(0x80000002 + i)
-- v = append(v, a, b, c, d)
-- }
-- return strings.Trim(string(valAsString(v...)), "" "")
-- }
-- return ""unknown""
--}
--
--func threadsPerCore() int {
-- mfi := maxFunctionID()
-- if mfi < 0x4 || vendorID() != Intel {
-- return 1
-- }
--
-- if mfi < 0xb {
-- _, b, _, d := cpuid(1)
-- if (d & (1 << 28)) != 0 {
-- // v will contain logical core count
-- v := (b >> 16) & 255
-- if v > 1 {
-- a4, _, _, _ := cpuid(4)
-- // physical cores
-- v2 := (a4 >> 26) + 1
-- if v2 > 0 {
-- return int(v) / int(v2)
-- }
-- }
-- }
-- return 1
-- }
-- _, b, _, _ := cpuidex(0xb, 0)
-- if b&0xffff == 0 {
-- return 1
-- }
-- return int(b & 0xffff)
--}
--
--func logicalCores() int {
-- mfi := maxFunctionID()
-- switch vendorID() {
-- case Intel:
-- // Use this on old Intel processors
-- if mfi < 0xb {
-- if mfi < 1 {
-- return 0
-- }
-- // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID)
-- // that can be assigned to logical processors in a physical package.
-- // The value may not be the same as the number of logical processors that are present in the hardware of a physical package.
-- _, ebx, _, _ := cpuid(1)
-- logical := (ebx >> 16) & 0xff
-- return int(logical)
-- }
-- _, b, _, _ := cpuidex(0xb, 1)
-- return int(b & 0xffff)
-- case AMD, Hygon:
-- _, b, _, _ := cpuid(1)
-- return int((b >> 16) & 0xff)
-- default:
-- return 0
-- }
--}
--
--func familyModel() (int, int) {
-- if maxFunctionID() < 0x1 {
-- return 0, 0
-- }
-- eax, _, _, _ := cpuid(1)
-- family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff)
-- model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0)
-- return int(family), int(model)
--}
--
--func physicalCores() int {
-- switch vendorID() {
-- case Intel:
-- return logicalCores() / threadsPerCore()
-- case AMD, Hygon:
-- if maxExtendedFunction() >= 0x80000008 {
-- _, _, c, _ := cpuid(0x80000008)
-- return int(c&0xff) + 1
-- }
-- }
-- return 0
--}
--
--// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
--var vendorMapping = map[string]Vendor{
-- ""AMDisbetter!"": AMD,
-- ""AuthenticAMD"": AMD,
-- ""CentaurHauls"": VIA,
-- ""GenuineIntel"": Intel,
-- ""TransmetaCPU"": Transmeta,
-- ""GenuineTMx86"": Transmeta,
-- ""Geode by NSC"": NSC,
-- ""VIA VIA VIA "": VIA,
-- ""KVMKVMKVMKVM"": KVM,
-- ""Microsoft Hv"": MSVM,
-- ""VMwareVMware"": VMware,
-- ""XenVMMXenVMM"": XenHVM,
-- ""bhyve bhyve "": Bhyve,
-- ""HygonGenuine"": Hygon,
--}
--
--func vendorID() Vendor {
-- _, b, c, d := cpuid(0)
-- v := valAsString(b, d, c)
-- vend, ok := vendorMapping[string(v)]
-- if !ok {
-- return Other
-- }
-- return vend
--}
--
--func cacheLine() int {
-- if maxFunctionID() < 0x1 {
-- return 0
-- }
--
-- _, ebx, _, _ := cpuid(1)
-- cache := (ebx & 0xff00) >> 5 // cflush size
-- if cache == 0 && maxExtendedFunction() >= 0x80000006 {
-- _, _, ecx, _ := cpuid(0x80000006)
-- cache = ecx & 0xff // cacheline size
-- }
-- // TODO: Read from Cache and TLB Information
-- return int(cache)
--}
--
--func (c *CPUInfo) cacheSize() {
-- c.Cache.L1D = -1
-- c.Cache.L1I = -1
-- c.Cache.L2 = -1
-- c.Cache.L3 = -1
-- vendor := vendorID()
-- switch vendor {
-- case Intel:
-- if maxFunctionID() < 4 {
-- return
-- }
-- for i := uint32(0); ; i++ {
-- eax, ebx, ecx, _ := cpuidex(4, i)
-- cacheType := eax & 15
-- if cacheType == 0 {
-- break
-- }
-- cacheLevel := (eax >> 5) & 7
-- coherency := int(ebx&0xfff) + 1
-- partitions := int((ebx>>12)&0x3ff) + 1
-- associativity := int((ebx>>22)&0x3ff) + 1
-- sets := int(ecx) + 1
-- size := associativity * partitions * coherency * sets
-- switch cacheLevel {
-- case 1:
-- if cacheType == 1 {
-- // 1 = Data Cache
-- c.Cache.L1D = size
-- } else if cacheType == 2 {
-- // 2 = Instruction Cache
-- c.Cache.L1I = size
-- } else {
-- if c.Cache.L1D < 0 {
-- c.Cache.L1I = size
-- }
-- if c.Cache.L1I < 0 {
-- c.Cache.L1I = size
-- }
-- }
-- case 2:
-- c.Cache.L2 = size
-- case 3:
-- c.Cache.L3 = size
-- }
-- }
-- case AMD, Hygon:
-- // Untested.
-- if maxExtendedFunction() < 0x80000005 {
-- return
-- }
-- _, _, ecx, edx := cpuid(0x80000005)
-- c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024)
-- c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024)
--
-- if maxExtendedFunction() < 0x80000006 {
-- return
-- }
-- _, _, ecx, _ = cpuid(0x80000006)
-- c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024)
-- }
--
-- return
--}
--
--type SGXSupport struct {
-- Available bool
-- SGX1Supported bool
-- SGX2Supported bool
-- MaxEnclaveSizeNot64 int64
-- MaxEnclaveSize64 int64
--}
--
--func hasSGX(available bool) (rval SGXSupport) {
-- rval.Available = available
--
-- if !available {
-- return
-- }
--
-- a, _, _, d := cpuidex(0x12, 0)
-- rval.SGX1Supported = a&0x01 != 0
-- rval.SGX2Supported = a&0x02 != 0
-- rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2
-- rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2
--
-- return
--}
--
--func support() Flags {
-- mfi := maxFunctionID()
-- vend := vendorID()
-- if mfi < 0x1 {
-- return 0
-- }
-- rval := uint64(0)
-- _, _, c, d := cpuid(1)
-- if (d & (1 << 15)) != 0 {
-- rval |= CMOV
-- }
-- if (d & (1 << 23)) != 0 {
-- rval |= MMX
-- }
-- if (d & (1 << 25)) != 0 {
-- rval |= MMXEXT
-- }
-- if (d & (1 << 25)) != 0 {
-- rval |= SSE
-- }
-- if (d & (1 << 26)) != 0 {
-- rval |= SSE2
-- }
-- if (c & 1) != 0 {
-- rval |= SSE3
-- }
-- if (c & 0x00000200) != 0 {
-- rval |= SSSE3
-- }
-- if (c & 0x00080000) != 0 {
-- rval |= SSE4
-- }
-- if (c & 0x00100000) != 0 {
-- rval |= SSE42
-- }
-- if (c & (1 << 25)) != 0 {
-- rval |= AESNI
-- }
-- if (c & (1 << 1)) != 0 {
-- rval |= CLMUL
-- }
-- if c&(1<<23) != 0 {
-- rval |= POPCNT
-- }
-- if c&(1<<30) != 0 {
-- rval |= RDRAND
-- }
-- if c&(1<<29) != 0 {
-- rval |= F16C
-- }
-- if c&(1<<13) != 0 {
-- rval |= CX16
-- }
-- if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 {
-- if threadsPerCore() > 1 {
-- rval |= HTT
-- }
-- }
--
-- // Check XGETBV, OXSAVE and AVX bits
-- if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 {
-- // Check for OS support
-- eax, _ := xgetbv(0)
-- if (eax & 0x6) == 0x6 {
-- rval |= AVX
-- if (c & 0x00001000) != 0 {
-- rval |= FMA3
-- }
-- }
-- }
--
-- // Check AVX2, AVX2 requires OS support, but BMI1/2 don't.
-- if mfi >= 7 {
-- _, ebx, ecx, edx := cpuidex(7, 0)
-- if (rval&AVX) != 0 && (ebx&0x00000020) != 0 {
-- rval |= AVX2
-- }
-- if (ebx & 0x00000008) != 0 {
-- rval |= BMI1
-- if (ebx & 0x00000100) != 0 {
-- rval |= BMI2
-- }
-- }
-- if ebx&(1<<2) != 0 {
-- rval |= SGX
-- }
-- if ebx&(1<<4) != 0 {
-- rval |= HLE
-- }
-- if ebx&(1<<9) != 0 {
-- rval |= ERMS
-- }
-- if ebx&(1<<11) != 0 {
-- rval |= RTM
-- }
-- if ebx&(1<<14) != 0 {
-- rval |= MPX
-- }
-- if ebx&(1<<18) != 0 {
-- rval |= RDSEED
-- }
-- if ebx&(1<<19) != 0 {
-- rval |= ADX
-- }
-- if ebx&(1<<29) != 0 {
-- rval |= SHA
-- }
-- if edx&(1<<26) != 0 {
-- rval |= IBPB
-- }
-- if edx&(1<<27) != 0 {
-- rval |= STIBP
-- }
--
-- // Only detect AVX-512 features if XGETBV is supported
-- if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
-- // Check for OS support
-- eax, _ := xgetbv(0)
--
-- // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and
-- // ZMM16-ZMM31 state are enabled by OS)
-- /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS).
-- if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 {
-- if ebx&(1<<16) != 0 {
-- rval |= AVX512F
-- }
-- if ebx&(1<<17) != 0 {
-- rval |= AVX512DQ
-- }
-- if ebx&(1<<21) != 0 {
-- rval |= AVX512IFMA
-- }
-- if ebx&(1<<26) != 0 {
-- rval |= AVX512PF
-- }
-- if ebx&(1<<27) != 0 {
-- rval |= AVX512ER
-- }
-- if ebx&(1<<28) != 0 {
-- rval |= AVX512CD
-- }
-- if ebx&(1<<30) != 0 {
-- rval |= AVX512BW
-- }
-- if ebx&(1<<31) != 0 {
-- rval |= AVX512VL
-- }
-- // ecx
-- if ecx&(1<<1) != 0 {
-- rval |= AVX512VBMI
-- }
-- }
-- }
-- }
--
-- if maxExtendedFunction() >= 0x80000001 {
-- _, _, c, d := cpuid(0x80000001)
-- if (c & (1 << 5)) != 0 {
-- rval |= LZCNT
-- rval |= POPCNT
-- }
-- if (d & (1 << 31)) != 0 {
-- rval |= AMD3DNOW
-- }
-- if (d & (1 << 30)) != 0 {
-- rval |= AMD3DNOWEXT
-- }
-- if (d & (1 << 23)) != 0 {
-- rval |= MMX
-- }
-- if (d & (1 << 22)) != 0 {
-- rval |= MMXEXT
-- }
-- if (c & (1 << 6)) != 0 {
-- rval |= SSE4A
-- }
-- if d&(1<<20) != 0 {
-- rval |= NX
-- }
-- if d&(1<<27) != 0 {
-- rval |= RDTSCP
-- }
--
-- /* Allow for selectively disabling SSE2 functions on AMD processors
-- with SSE2 support but not SSE4a. This includes Athlon64, some
-- Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster
-- than SSE2 often enough to utilize this special-case flag.
-- AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
-- so that SSE2 is used unless explicitly disabled by checking
-- AV_CPU_FLAG_SSE2SLOW. */
-- if vendorID() != Intel &&
-- rval&SSE2 != 0 && (c&0x00000040) == 0 {
-- rval |= SSE2SLOW
-- }
--
-- /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
-- * used unless the OS has AVX support. */
-- if (rval & AVX) != 0 {
-- if (c & 0x00000800) != 0 {
-- rval |= XOP
-- }
-- if (c & 0x00010000) != 0 {
-- rval |= FMA4
-- }
-- }
--
-- if vendorID() == Intel {
-- family, model := familyModel()
-- if family == 6 && (model == 9 || model == 13 || model == 14) {
-- /* 6/9 (pentium-m ""banias""), 6/13 (pentium-m ""dothan""), and
-- * 6/14 (core1 ""yonah"") theoretically support sse2, but it's
-- * usually slower than mmx. */
-- if (rval & SSE2) != 0 {
-- rval |= SSE2SLOW
-- }
-- if (rval & SSE3) != 0 {
-- rval |= SSE3SLOW
-- }
-- }
-- /* The Atom processor has SSSE3 support, which is useful in many cases,
-- * but sometimes the SSSE3 version is slower than the SSE2 equivalent
-- * on the Atom, but is generally faster on other processors supporting
-- * SSSE3. This flag allows for selectively disabling certain SSSE3
-- * functions on the Atom. */
-- if family == 6 && model == 28 {
-- rval |= ATOM
-- }
-- }
-- }
-- return Flags(rval)
--}
--
--func valAsString(values ...uint32) []byte {
-- r := make([]byte, 4*len(values))
-- for i, v := range values {
-- dst := r[i*4:]
-- dst[0] = byte(v & 0xff)
-- dst[1] = byte((v >> 8) & 0xff)
-- dst[2] = byte((v >> 16) & 0xff)
-- dst[3] = byte((v >> 24) & 0xff)
-- switch {
-- case dst[0] == 0:
-- return r[:i*4]
-- case dst[1] == 0:
-- return r[:i*4+1]
-- case dst[2] == 0:
-- return r[:i*4+2]
-- case dst[3] == 0:
-- return r[:i*4+3]
-- }
-- }
-- return r
--}
-diff --git a/vendor/github.com/klauspost/cpuid/cpuid_386.s b/vendor/github.com/klauspost/cpuid/cpuid_386.s
-deleted file mode 100644
-index 4d731711e48f2..0000000000000
---- a/vendor/github.com/klauspost/cpuid/cpuid_386.s
-+++ /dev/null
-@@ -1,42 +0,0 @@
--// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
--
--// +build 386,!gccgo
--
--// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
--TEXT ·asmCpuid(SB), 7, $0
-- XORL CX, CX
-- MOVL op+0(FP), AX
-- CPUID
-- MOVL AX, eax+4(FP)
-- MOVL BX, ebx+8(FP)
-- MOVL CX, ecx+12(FP)
-- MOVL DX, edx+16(FP)
-- RET
--
--// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
--TEXT ·asmCpuidex(SB), 7, $0
-- MOVL op+0(FP), AX
-- MOVL op2+4(FP), CX
-- CPUID
-- MOVL AX, eax+8(FP)
-- MOVL BX, ebx+12(FP)
-- MOVL CX, ecx+16(FP)
-- MOVL DX, edx+20(FP)
-- RET
--
--// func xgetbv(index uint32) (eax, edx uint32)
--TEXT ·asmXgetbv(SB), 7, $0
-- MOVL index+0(FP), CX
-- BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
-- MOVL AX, eax+4(FP)
-- MOVL DX, edx+8(FP)
-- RET
--
--// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
--TEXT ·asmRdtscpAsm(SB), 7, $0
-- BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
-- MOVL AX, eax+0(FP)
-- MOVL BX, ebx+4(FP)
-- MOVL CX, ecx+8(FP)
-- MOVL DX, edx+12(FP)
-- RET
-diff --git a/vendor/github.com/klauspost/cpuid/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/cpuid_amd64.s
-deleted file mode 100644
-index 3c1d60e422125..0000000000000
---- a/vendor/github.com/klauspost/cpuid/cpuid_amd64.s
-+++ /dev/null
-@@ -1,42 +0,0 @@
--// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
--
--//+build amd64,!gccgo
--
--// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
--TEXT ·asmCpuid(SB), 7, $0
-- XORQ CX, CX
-- MOVL op+0(FP), AX
-- CPUID
-- MOVL AX, eax+8(FP)
-- MOVL BX, ebx+12(FP)
-- MOVL CX, ecx+16(FP)
-- MOVL DX, edx+20(FP)
-- RET
--
--// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
--TEXT ·asmCpuidex(SB), 7, $0
-- MOVL op+0(FP), AX
-- MOVL op2+4(FP), CX
-- CPUID
-- MOVL AX, eax+8(FP)
-- MOVL BX, ebx+12(FP)
-- MOVL CX, ecx+16(FP)
-- MOVL DX, edx+20(FP)
-- RET
--
--// func asmXgetbv(index uint32) (eax, edx uint32)
--TEXT ·asmXgetbv(SB), 7, $0
-- MOVL index+0(FP), CX
-- BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
-- MOVL AX, eax+8(FP)
-- MOVL DX, edx+12(FP)
-- RET
--
--// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
--TEXT ·asmRdtscpAsm(SB), 7, $0
-- BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
-- MOVL AX, eax+0(FP)
-- MOVL BX, ebx+4(FP)
-- MOVL CX, ecx+8(FP)
-- MOVL DX, edx+12(FP)
-- RET
-diff --git a/vendor/github.com/klauspost/cpuid/detect_intel.go b/vendor/github.com/klauspost/cpuid/detect_intel.go
-deleted file mode 100644
-index a5f04dd6d0a77..0000000000000
---- a/vendor/github.com/klauspost/cpuid/detect_intel.go
-+++ /dev/null
-@@ -1,17 +0,0 @@
--// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
--
--// +build 386,!gccgo amd64,!gccgo
--
--package cpuid
--
--func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
--func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
--func asmXgetbv(index uint32) (eax, edx uint32)
--func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
--
--func initCPU() {
-- cpuid = asmCpuid
-- cpuidex = asmCpuidex
-- xgetbv = asmXgetbv
-- rdtscpAsm = asmRdtscpAsm
--}
-diff --git a/vendor/github.com/klauspost/cpuid/detect_ref.go b/vendor/github.com/klauspost/cpuid/detect_ref.go
-deleted file mode 100644
-index 909c5d9a7aed6..0000000000000
---- a/vendor/github.com/klauspost/cpuid/detect_ref.go
-+++ /dev/null
-@@ -1,23 +0,0 @@
--// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
--
--// +build !amd64,!386 gccgo
--
--package cpuid
--
--func initCPU() {
-- cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) {
-- return 0, 0, 0, 0
-- }
--
-- cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
-- return 0, 0, 0, 0
-- }
--
-- xgetbv = func(index uint32) (eax, edx uint32) {
-- return 0, 0
-- }
--
-- rdtscpAsm = func() (eax, ebx, ecx, edx uint32) {
-- return 0, 0, 0, 0
-- }
--}
-diff --git a/vendor/github.com/klauspost/cpuid/generate.go b/vendor/github.com/klauspost/cpuid/generate.go
-deleted file mode 100644
-index 90e7a98d278da..0000000000000
---- a/vendor/github.com/klauspost/cpuid/generate.go
-+++ /dev/null
-@@ -1,4 +0,0 @@
--package cpuid
--
--//go:generate go run private-gen.go
--//go:generate gofmt -w ./private
-diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore
-new file mode 100644
-index 0000000000000..5e987350471d0
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/.gitignore
-@@ -0,0 +1,34 @@
-+# Created by https://www.gitignore.io/api/macos
-+
-+### macOS ###
-+*.DS_Store
-+.AppleDouble
-+.LSOverride
-+
-+# Icon must end with two \r
-+Icon
-+
-+
-+# Thumbnails
-+._*
-+
-+# Files that might appear in the root of a volume
-+.DocumentRevisions-V100
-+.fseventsd
-+.Spotlight-V100
-+.TemporaryItems
-+.Trashes
-+.VolumeIcon.icns
-+.com.apple.timemachine.donotpresent
-+
-+# Directories potentially created on remote AFP share
-+.AppleDB
-+.AppleDesktop
-+Network Trash Folder
-+Temporary Items
-+.apdisk
-+
-+# End of https://www.gitignore.io/api/macos
-+
-+cmd/*/*exe
-+.idea
-\ No newline at end of file
-diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml
-new file mode 100644
-index 0000000000000..fd6c6db713d3a
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/.travis.yml
-@@ -0,0 +1,24 @@
-+language: go
-+
-+env:
-+ - GO111MODULE=off
-+
-+go:
-+ - 1.9.x
-+ - 1.10.x
-+ - 1.11.x
-+ - 1.12.x
-+ - master
-+
-+matrix:
-+ fast_finish: true
-+ allow_failures:
-+ - go: master
-+
-+sudo: false
-+
-+script:
-+ - go test -v -cpu=2
-+ - go test -v -cpu=2 -race
-+ - go test -v -cpu=2 -tags noasm
-+ - go test -v -cpu=2 -race -tags noasm
-diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE
-new file mode 100644
-index 0000000000000..bd899d8353dd5
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/LICENSE
-@@ -0,0 +1,28 @@
-+Copyright (c) 2015, Pierre Curto
-+All rights reserved.
-+
-+Redistribution and use in source and binary forms, with or without
-+modification, are permitted provided that the following conditions are met:
-+
-+* Redistributions of source code must retain the above copyright notice, this
-+ list of conditions and the following disclaimer.
-+
-+* Redistributions in binary form must reproduce the above copyright notice,
-+ this list of conditions and the following disclaimer in the documentation
-+ and/or other materials provided with the distribution.
-+
-+* Neither the name of xxHash nor the names of its
-+ contributors may be used to endorse or promote products derived from
-+ this software without specific prior written permission.
-+
-+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ""AS IS""
-+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+
-diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md
-new file mode 100644
-index 0000000000000..4ee388e81bfb9
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/README.md
-@@ -0,0 +1,90 @@
-+# lz4 : LZ4 compression in pure Go
-+
-+[](https://godoc.org/github.com/pierrec/lz4)
-+[](https://travis-ci.org/pierrec/lz4)
-+[](https://goreportcard.com/report/github.com/pierrec/lz4)
-+[](https://github.com/pierrec/lz4/tags)
-+
-+## Overview
-+
-+This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks.
-+The implementation is based on the reference C [one](https://github.com/lz4/lz4).
-+
-+## Install
-+
-+Assuming you have the go toolchain installed:
-+
-+```
-+go get github.com/pierrec/lz4
-+```
-+
-+There is a command line interface tool to compress and decompress LZ4 files.
-+
-+```
-+go install github.com/pierrec/lz4/cmd/lz4c
-+```
-+
-+Usage
-+
-+```
-+Usage of lz4c:
-+ -version
-+ print the program version
-+
-+Subcommands:
-+Compress the given files or from stdin to stdout.
-+compress [arguments] [ ...]
-+ -bc
-+ enable block checksum
-+ -l int
-+ compression level (0=fastest)
-+ -sc
-+ disable stream checksum
-+ -size string
-+ block max size [64K,256K,1M,4M] (default ""4M"")
-+
-+Uncompress the given files or from stdin to stdout.
-+uncompress [arguments] [ ...]
-+
-+```
-+
-+
-+## Example
-+
-+```
-+// Compress and uncompress an input string.
-+s := ""hello world""
-+r := strings.NewReader(s)
-+
-+// The pipe will uncompress the data from the writer.
-+pr, pw := io.Pipe()
-+zw := lz4.NewWriter(pw)
-+zr := lz4.NewReader(pr)
-+
-+go func() {
-+ // Compress the input string.
-+ _, _ = io.Copy(zw, r)
-+ _ = zw.Close() // Make sure the writer is closed
-+ _ = pw.Close() // Terminate the pipe
-+}()
-+
-+_, _ = io.Copy(os.Stdout, zr)
-+
-+// Output:
-+// hello world
-+```
-+
-+## Contributing
-+
-+Contributions are very welcome for bug fixing, performance improvements...!
-+
-+- Open an issue with a proper description
-+- Send a pull request with appropriate test case(s)
-+
-+## Contributors
-+
-+Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far!
-+
-+Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder.
-+
-+Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code.
-diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go
-new file mode 100644
-index 0000000000000..ee178a992b11a
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/block.go
-@@ -0,0 +1,387 @@
-+package lz4
-+
-+import (
-+ ""encoding/binary""
-+ ""fmt""
-+ ""math/bits""
-+)
-+
-+// blockHash hashes the lower 6 bytes into a value < htSize.
-+func blockHash(x uint64) uint32 {
-+ const prime6bytes = 227718039650203
-+ return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
-+}
-+
-+// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
-+func CompressBlockBound(n int) int {
-+ return n + n/255 + 16
-+}
-+
-+// UncompressBlock uncompresses the source buffer into the destination one,
-+// and returns the uncompressed size.
-+//
-+// The destination buffer must be sized appropriately.
-+//
-+// An error is returned if the source data is invalid or the destination buffer is too small.
-+func UncompressBlock(src, dst []byte) (int, error) {
-+ if len(src) == 0 {
-+ return 0, nil
-+ }
-+ if di := decodeBlock(dst, src); di >= 0 {
-+ return di, nil
-+ }
-+ return 0, ErrInvalidSourceShortBuffer
-+}
-+
-+// CompressBlock compresses the source buffer into the destination one.
-+// This is the fast version of LZ4 compression and also the default one.
-+// The size of hashTable must be at least 64Kb.
-+//
-+// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible.
-+//
-+// An error is returned if the destination buffer is too small.
-+func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
-+ if len(hashTable) < htSize {
-+ return 0, fmt.Errorf(""hash table too small, should be at least %d in size"", htSize)
-+ }
-+ defer recoverBlock(&err)
-+
-+ // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
-+ // This significantly speeds up incompressible data and usually has very small impact on compresssion.
-+ // bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
-+ const adaptSkipLog = 7
-+ sn, dn := len(src)-mfLimit, len(dst)
-+ if sn <= 0 || dn == 0 {
-+ return 0, nil
-+ }
-+ // Prove to the compiler the table has at least htSize elements.
-+ // The compiler can see that ""uint32() >> hashShift"" cannot be out of bounds.
-+ hashTable = hashTable[:htSize]
-+
-+ // si: Current position of the search.
-+ // anchor: Position of the current literals.
-+ var si, di, anchor int
-+
-+ // Fast scan strategy: the hash table only stores the last 4 bytes sequences.
-+ for si < sn {
-+ // Hash the next 6 bytes (sequence)...
-+ match := binary.LittleEndian.Uint64(src[si:])
-+ h := blockHash(match)
-+ h2 := blockHash(match >> 8)
-+
-+ // We check a match at s, s+1 and s+2 and pick the first one we get.
-+ // Checking 3 only requires us to load the source one.
-+ ref := hashTable[h]
-+ ref2 := hashTable[h2]
-+ hashTable[h] = si
-+ hashTable[h2] = si + 1
-+ offset := si - ref
-+
-+ // If offset <= 0 we got an old entry in the hash table.
-+ if offset <= 0 || offset >= winSize || // Out of window.
-+ uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches.
-+ // No match. Start calculating another hash.
-+ // The processor can usually do this out-of-order.
-+ h = blockHash(match >> 16)
-+ ref = hashTable[h]
-+
-+ // Check the second match at si+1
-+ si += 1
-+ offset = si - ref2
-+
-+ if offset <= 0 || offset >= winSize ||
-+ uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
-+ // No match. Check the third match at si+2
-+ si += 1
-+ offset = si - ref
-+ hashTable[h] = si
-+
-+ if offset <= 0 || offset >= winSize ||
-+ uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) {
-+ // Skip one extra byte (at si+3) before we check 3 matches again.
-+ si += 2 + (si-anchor)>>adaptSkipLog
-+ continue
-+ }
-+ }
-+ }
-+
-+ // Match found.
-+ lLen := si - anchor // Literal length.
-+ // We already matched 4 bytes.
-+ mLen := 4
-+
-+ // Extend backwards if we can, reducing literals.
-+ tOff := si - offset - 1
-+ for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] {
-+ si--
-+ tOff--
-+ lLen--
-+ mLen++
-+ }
-+
-+ // Add the match length, so we continue search at the end.
-+ // Use mLen to store the offset base.
-+ si, mLen = si+mLen, si+minMatch
-+
-+ // Find the longest match by looking by batches of 8 bytes.
-+ for si < sn {
-+ x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
-+ if x == 0 {
-+ si += 8
-+ } else {
-+ // Stop is first non-zero byte.
-+ si += bits.TrailingZeros64(x) >> 3
-+ break
-+ }
-+ }
-+
-+ mLen = si - mLen
-+ if mLen < 0xF {
-+ dst[di] = byte(mLen)
-+ } else {
-+ dst[di] = 0xF
-+ }
-+
-+ // Encode literals length.
-+ if lLen < 0xF {
-+ dst[di] |= byte(lLen << 4)
-+ } else {
-+ dst[di] |= 0xF0
-+ di++
-+ l := lLen - 0xF
-+ for ; l >= 0xFF; l -= 0xFF {
-+ dst[di] = 0xFF
-+ di++
-+ }
-+ dst[di] = byte(l)
-+ }
-+ di++
-+
-+ // Literals.
-+ copy(dst[di:di+lLen], src[anchor:anchor+lLen])
-+ di += lLen + 2
-+ anchor = si
-+
-+ // Encode offset.
-+ _ = dst[di] // Bound check elimination.
-+ dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
-+
-+ // Encode match length part 2.
-+ if mLen >= 0xF {
-+ for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
-+ dst[di] = 0xFF
-+ di++
-+ }
-+ dst[di] = byte(mLen)
-+ di++
-+ }
-+ // Check if we can load next values.
-+ if si >= sn {
-+ break
-+ }
-+ // Hash match end-2
-+ h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
-+ hashTable[h] = si - 2
-+ }
-+
-+ if anchor == 0 {
-+ // Incompressible.
-+ return 0, nil
-+ }
-+
-+ // Last literals.
-+ lLen := len(src) - anchor
-+ if lLen < 0xF {
-+ dst[di] = byte(lLen << 4)
-+ } else {
-+ dst[di] = 0xF0
-+ di++
-+ for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF {
-+ dst[di] = 0xFF
-+ di++
-+ }
-+ dst[di] = byte(lLen)
-+ }
-+ di++
-+
-+ // Write the last literals.
-+ if di >= anchor {
-+ // Incompressible.
-+ return 0, nil
-+ }
-+ di += copy(dst[di:di+len(src)-anchor], src[anchor:])
-+ return di, nil
-+}
-+
-+// blockHash hashes 4 bytes into a value < winSize.
-+func blockHashHC(x uint32) uint32 {
-+ const hasher uint32 = 2654435761 // Knuth multiplicative hash.
-+ return x * hasher >> (32 - winSizeLog)
-+}
-+
-+// CompressBlockHC compresses the source buffer src into the destination dst
-+// with max search depth (use 0 or negative value for no max).
-+//
-+// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
-+//
-+// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible.
-+//
-+// An error is returned if the destination buffer is too small.
-+func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) {
-+ defer recoverBlock(&err)
-+
-+ // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
-+ // This significantly speeds up incompressible data and usually has very small impact on compresssion.
-+ // bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
-+ const adaptSkipLog = 7
-+
-+ sn, dn := len(src)-mfLimit, len(dst)
-+ if sn <= 0 || dn == 0 {
-+ return 0, nil
-+ }
-+ var si, di int
-+
-+ // hashTable: stores the last position found for a given hash
-+ // chainTable: stores previous positions for a given hash
-+ var hashTable, chainTable [winSize]int
-+
-+ if depth <= 0 {
-+ depth = winSize
-+ }
-+
-+ anchor := si
-+ for si < sn {
-+ // Hash the next 4 bytes (sequence).
-+ match := binary.LittleEndian.Uint32(src[si:])
-+ h := blockHashHC(match)
-+
-+ // Follow the chain until out of window and give the longest match.
-+ mLen := 0
-+ offset := 0
-+ for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] {
-+ // The first (mLen==0) or next byte (mLen>=minMatch) at current match length
-+ // must match to improve on the match length.
-+ if src[next+mLen] != src[si+mLen] {
-+ continue
-+ }
-+ ml := 0
-+ // Compare the current position with a previous with the same hash.
-+ for ml < sn-si {
-+ x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:])
-+ if x == 0 {
-+ ml += 8
-+ } else {
-+ // Stop is first non-zero byte.
-+ ml += bits.TrailingZeros64(x) >> 3
-+ break
-+ }
-+ }
-+ if ml < minMatch || ml <= mLen {
-+ // Match too small (>adaptSkipLog
-+ continue
-+ }
-+
-+ // Match found.
-+ // Update hash/chain tables with overlapping bytes:
-+ // si already hashed, add everything from si+1 up to the match length.
-+ winStart := si + 1
-+ if ws := si + mLen - winSize; ws > winStart {
-+ winStart = ws
-+ }
-+ for si, ml := winStart, si+mLen; si < ml; {
-+ match >>= 8
-+ match |= uint32(src[si+3]) << 24
-+ h := blockHashHC(match)
-+ chainTable[si&winMask] = hashTable[h]
-+ hashTable[h] = si
-+ si++
-+ }
-+
-+ lLen := si - anchor
-+ si += mLen
-+ mLen -= minMatch // Match length does not include minMatch.
-+
-+ if mLen < 0xF {
-+ dst[di] = byte(mLen)
-+ } else {
-+ dst[di] = 0xF
-+ }
-+
-+ // Encode literals length.
-+ if lLen < 0xF {
-+ dst[di] |= byte(lLen << 4)
-+ } else {
-+ dst[di] |= 0xF0
-+ di++
-+ l := lLen - 0xF
-+ for ; l >= 0xFF; l -= 0xFF {
-+ dst[di] = 0xFF
-+ di++
-+ }
-+ dst[di] = byte(l)
-+ }
-+ di++
-+
-+ // Literals.
-+ copy(dst[di:di+lLen], src[anchor:anchor+lLen])
-+ di += lLen
-+ anchor = si
-+
-+ // Encode offset.
-+ di += 2
-+ dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
-+
-+ // Encode match length part 2.
-+ if mLen >= 0xF {
-+ for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
-+ dst[di] = 0xFF
-+ di++
-+ }
-+ dst[di] = byte(mLen)
-+ di++
-+ }
-+ }
-+
-+ if anchor == 0 {
-+ // Incompressible.
-+ return 0, nil
-+ }
-+
-+ // Last literals.
-+ lLen := len(src) - anchor
-+ if lLen < 0xF {
-+ dst[di] = byte(lLen << 4)
-+ } else {
-+ dst[di] = 0xF0
-+ di++
-+ lLen -= 0xF
-+ for ; lLen >= 0xFF; lLen -= 0xFF {
-+ dst[di] = 0xFF
-+ di++
-+ }
-+ dst[di] = byte(lLen)
-+ }
-+ di++
-+
-+ // Write the last literals.
-+ if di >= anchor {
-+ // Incompressible.
-+ return 0, nil
-+ }
-+ di += copy(dst[di:di+len(src)-anchor], src[anchor:])
-+ return di, nil
-+}
-diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go
-new file mode 100644
-index 0000000000000..bc5e78d40f0a3
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/debug.go
-@@ -0,0 +1,23 @@
-+// +build lz4debug
-+
-+package lz4
-+
-+import (
-+ ""fmt""
-+ ""os""
-+ ""path/filepath""
-+ ""runtime""
-+)
-+
-+const debugFlag = true
-+
-+func debug(args ...interface{}) {
-+ _, file, line, _ := runtime.Caller(1)
-+ file = filepath.Base(file)
-+
-+ f := fmt.Sprintf(""LZ4: %s:%d %s"", file, line, args[0])
-+ if f[len(f)-1] != '\n' {
-+ f += ""\n""
-+ }
-+ fmt.Fprintf(os.Stderr, f, args[1:]...)
-+}
-diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go
-new file mode 100644
-index 0000000000000..44211ad96453b
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/debug_stub.go
-@@ -0,0 +1,7 @@
-+// +build !lz4debug
-+
-+package lz4
-+
-+const debugFlag = false
-+
-+func debug(args ...interface{}) {}
-diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go
-new file mode 100644
-index 0000000000000..43cc14fbe2e37
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/decode_amd64.go
-@@ -0,0 +1,8 @@
-+// +build !appengine
-+// +build gc
-+// +build !noasm
-+
-+package lz4
-+
-+//go:noescape
-+func decodeBlock(dst, src []byte) int
-diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/decode_amd64.s
-new file mode 100644
-index 0000000000000..20fef39759cb6
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/decode_amd64.s
-@@ -0,0 +1,375 @@
-+// +build !appengine
-+// +build gc
-+// +build !noasm
-+
-+#include ""textflag.h""
-+
-+// AX scratch
-+// BX scratch
-+// CX scratch
-+// DX token
-+//
-+// DI &dst
-+// SI &src
-+// R8 &dst + len(dst)
-+// R9 &src + len(src)
-+// R11 &dst
-+// R12 short output end
-+// R13 short input end
-+// func decodeBlock(dst, src []byte) int
-+// using 50 bytes of stack currently
-+TEXT ·decodeBlock(SB), NOSPLIT, $64-56
-+ MOVQ dst_base+0(FP), DI
-+ MOVQ DI, R11
-+ MOVQ dst_len+8(FP), R8
-+ ADDQ DI, R8
-+
-+ MOVQ src_base+24(FP), SI
-+ MOVQ src_len+32(FP), R9
-+ ADDQ SI, R9
-+
-+ // shortcut ends
-+ // short output end
-+ MOVQ R8, R12
-+ SUBQ $32, R12
-+ // short input end
-+ MOVQ R9, R13
-+ SUBQ $16, R13
-+
-+loop:
-+ // for si < len(src)
-+ CMPQ SI, R9
-+ JGE end
-+
-+ // token := uint32(src[si])
-+ MOVBQZX (SI), DX
-+ INCQ SI
-+
-+ // lit_len = token >> 4
-+ // if lit_len > 0
-+ // CX = lit_len
-+ MOVQ DX, CX
-+ SHRQ $4, CX
-+
-+ // if lit_len != 0xF
-+ CMPQ CX, $0xF
-+ JEQ lit_len_loop_pre
-+ CMPQ DI, R12
-+ JGE lit_len_loop_pre
-+ CMPQ SI, R13
-+ JGE lit_len_loop_pre
-+
-+ // copy shortcut
-+
-+ // A two-stage shortcut for the most common case:
-+ // 1) If the literal length is 0..14, and there is enough space,
-+ // enter the shortcut and copy 16 bytes on behalf of the literals
-+ // (in the fast mode, only 8 bytes can be safely copied this way).
-+ // 2) Further if the match length is 4..18, copy 18 bytes in a similar
-+ // manner; but we ensure that there's enough space in the output for
-+ // those 18 bytes earlier, upon entering the shortcut (in other words,
-+ // there is a combined check for both stages).
-+
-+ // copy literal
-+ MOVOU (SI), X0
-+ MOVOU X0, (DI)
-+ ADDQ CX, DI
-+ ADDQ CX, SI
-+
-+ MOVQ DX, CX
-+ ANDQ $0xF, CX
-+
-+ // The second stage: prepare for match copying, decode full info.
-+ // If it doesn't work out, the info won't be wasted.
-+ // offset := uint16(data[:2])
-+ MOVWQZX (SI), DX
-+ ADDQ $2, SI
-+
-+ MOVQ DI, AX
-+ SUBQ DX, AX
-+ CMPQ AX, DI
-+ JGT err_short_buf
-+
-+ // if we can't do the second stage then jump straight to read the
-+ // match length, we already have the offset.
-+ CMPQ CX, $0xF
-+ JEQ match_len_loop_pre
-+ CMPQ DX, $8
-+ JLT match_len_loop_pre
-+ CMPQ AX, R11
-+ JLT err_short_buf
-+
-+ // memcpy(op + 0, match + 0, 8);
-+ MOVQ (AX), BX
-+ MOVQ BX, (DI)
-+ // memcpy(op + 8, match + 8, 8);
-+ MOVQ 8(AX), BX
-+ MOVQ BX, 8(DI)
-+ // memcpy(op +16, match +16, 2);
-+ MOVW 16(AX), BX
-+ MOVW BX, 16(DI)
-+
-+ ADDQ $4, DI // minmatch
-+ ADDQ CX, DI
-+
-+ // shortcut complete, load next token
-+ JMP loop
-+
-+lit_len_loop_pre:
-+ // if lit_len > 0
-+ CMPQ CX, $0
-+ JEQ offset
-+ CMPQ CX, $0xF
-+ JNE copy_literal
-+
-+lit_len_loop:
-+ // for src[si] == 0xFF
-+ CMPB (SI), $0xFF
-+ JNE lit_len_finalise
-+
-+ // bounds check src[si+1]
-+ MOVQ SI, AX
-+ ADDQ $1, AX
-+ CMPQ AX, R9
-+ JGT err_short_buf
-+
-+ // lit_len += 0xFF
-+ ADDQ $0xFF, CX
-+ INCQ SI
-+ JMP lit_len_loop
-+
-+lit_len_finalise:
-+ // lit_len += int(src[si])
-+ // si++
-+ MOVBQZX (SI), AX
-+ ADDQ AX, CX
-+ INCQ SI
-+
-+copy_literal:
-+ // bounds check src and dst
-+ MOVQ SI, AX
-+ ADDQ CX, AX
-+ CMPQ AX, R9
-+ JGT err_short_buf
-+
-+ MOVQ DI, AX
-+ ADDQ CX, AX
-+ CMPQ AX, R8
-+ JGT err_short_buf
-+
-+ // whats a good cut off to call memmove?
-+ CMPQ CX, $16
-+ JGT memmove_lit
-+
-+ // if len(dst[di:]) < 16
-+ MOVQ R8, AX
-+ SUBQ DI, AX
-+ CMPQ AX, $16
-+ JLT memmove_lit
-+
-+ // if len(src[si:]) < 16
-+ MOVQ R9, AX
-+ SUBQ SI, AX
-+ CMPQ AX, $16
-+ JLT memmove_lit
-+
-+ MOVOU (SI), X0
-+ MOVOU X0, (DI)
-+
-+ JMP finish_lit_copy
-+
-+memmove_lit:
-+ // memmove(to, from, len)
-+ MOVQ DI, 0(SP)
-+ MOVQ SI, 8(SP)
-+ MOVQ CX, 16(SP)
-+ // spill
-+ MOVQ DI, 24(SP)
-+ MOVQ SI, 32(SP)
-+ MOVQ CX, 40(SP) // need len to inc SI, DI after
-+ MOVB DX, 48(SP)
-+ CALL runtime·memmove(SB)
-+
-+ // restore registers
-+ MOVQ 24(SP), DI
-+ MOVQ 32(SP), SI
-+ MOVQ 40(SP), CX
-+ MOVB 48(SP), DX
-+
-+ // recalc initial values
-+ MOVQ dst_base+0(FP), R8
-+ MOVQ R8, R11
-+ ADDQ dst_len+8(FP), R8
-+ MOVQ src_base+24(FP), R9
-+ ADDQ src_len+32(FP), R9
-+ MOVQ R8, R12
-+ SUBQ $32, R12
-+ MOVQ R9, R13
-+ SUBQ $16, R13
-+
-+finish_lit_copy:
-+ ADDQ CX, SI
-+ ADDQ CX, DI
-+
-+ CMPQ SI, R9
-+ JGE end
-+
-+offset:
-+ // CX := mLen
-+ // free up DX to use for offset
-+ MOVQ DX, CX
-+
-+ MOVQ SI, AX
-+ ADDQ $2, AX
-+ CMPQ AX, R9
-+ JGT err_short_buf
-+
-+ // offset
-+ // DX := int(src[si]) | int(src[si+1])<<8
-+ MOVWQZX (SI), DX
-+ ADDQ $2, SI
-+
-+ // 0 offset is invalid
-+ CMPQ DX, $0
-+ JEQ err_corrupt
-+
-+ ANDB $0xF, CX
-+
-+match_len_loop_pre:
-+ // if mlen != 0xF
-+ CMPB CX, $0xF
-+ JNE copy_match
-+
-+match_len_loop:
-+ // for src[si] == 0xFF
-+ // lit_len += 0xFF
-+ CMPB (SI), $0xFF
-+ JNE match_len_finalise
-+
-+ // bounds check src[si+1]
-+ MOVQ SI, AX
-+ ADDQ $1, AX
-+ CMPQ AX, R9
-+ JGT err_short_buf
-+
-+ ADDQ $0xFF, CX
-+ INCQ SI
-+ JMP match_len_loop
-+
-+match_len_finalise:
-+ // lit_len += int(src[si])
-+ // si++
-+ MOVBQZX (SI), AX
-+ ADDQ AX, CX
-+ INCQ SI
-+
-+copy_match:
-+ // mLen += minMatch
-+ ADDQ $4, CX
-+
-+ // check we have match_len bytes left in dst
-+ // di+match_len < len(dst)
-+ MOVQ DI, AX
-+ ADDQ CX, AX
-+ CMPQ AX, R8
-+ JGT err_short_buf
-+
-+ // DX = offset
-+ // CX = match_len
-+ // BX = &dst + (di - offset)
-+ MOVQ DI, BX
-+ SUBQ DX, BX
-+
-+ // check BX is within dst
-+ // if BX < &dst
-+ CMPQ BX, R11
-+ JLT err_short_buf
-+
-+ // if offset + match_len < di
-+ MOVQ BX, AX
-+ ADDQ CX, AX
-+ CMPQ DI, AX
-+ JGT copy_interior_match
-+
-+ // AX := len(dst[:di])
-+ // MOVQ DI, AX
-+ // SUBQ R11, AX
-+
-+ // copy 16 bytes at a time
-+ // if di-offset < 16 copy 16-(di-offset) bytes to di
-+ // then do the remaining
-+
-+copy_match_loop:
-+ // for match_len >= 0
-+ // dst[di] = dst[i]
-+ // di++
-+ // i++
-+ MOVB (BX), AX
-+ MOVB AX, (DI)
-+ INCQ DI
-+ INCQ BX
-+ DECQ CX
-+
-+ CMPQ CX, $0
-+ JGT copy_match_loop
-+
-+ JMP loop
-+
-+copy_interior_match:
-+ CMPQ CX, $16
-+ JGT memmove_match
-+
-+ // if len(dst[di:]) < 16
-+ MOVQ R8, AX
-+ SUBQ DI, AX
-+ CMPQ AX, $16
-+ JLT memmove_match
-+
-+ MOVOU (BX), X0
-+ MOVOU X0, (DI)
-+
-+ ADDQ CX, DI
-+ JMP loop
-+
-+memmove_match:
-+ // memmove(to, from, len)
-+ MOVQ DI, 0(SP)
-+ MOVQ BX, 8(SP)
-+ MOVQ CX, 16(SP)
-+ // spill
-+ MOVQ DI, 24(SP)
-+ MOVQ SI, 32(SP)
-+ MOVQ CX, 40(SP) // need len to inc SI, DI after
-+ CALL runtime·memmove(SB)
-+
-+ // restore registers
-+ MOVQ 24(SP), DI
-+ MOVQ 32(SP), SI
-+ MOVQ 40(SP), CX
-+
-+ // recalc initial values
-+ MOVQ dst_base+0(FP), R8
-+ MOVQ R8, R11 // TODO: make these sensible numbers
-+ ADDQ dst_len+8(FP), R8
-+ MOVQ src_base+24(FP), R9
-+ ADDQ src_len+32(FP), R9
-+ MOVQ R8, R12
-+ SUBQ $32, R12
-+ MOVQ R9, R13
-+ SUBQ $16, R13
-+
-+ ADDQ CX, DI
-+ JMP loop
-+
-+err_corrupt:
-+ MOVQ $-1, ret+48(FP)
-+ RET
-+
-+err_short_buf:
-+ MOVQ $-2, ret+48(FP)
-+ RET
-+
-+end:
-+ SUBQ R11, DI
-+ MOVQ DI, ret+48(FP)
-+ RET
-diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go
-new file mode 100644
-index 0000000000000..919888edf7dcc
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/decode_other.go
-@@ -0,0 +1,98 @@
-+// +build !amd64 appengine !gc noasm
-+
-+package lz4
-+
-+func decodeBlock(dst, src []byte) (ret int) {
-+ const hasError = -2
-+ defer func() {
-+ if recover() != nil {
-+ ret = hasError
-+ }
-+ }()
-+
-+ var si, di int
-+ for {
-+ // Literals and match lengths (token).
-+ b := int(src[si])
-+ si++
-+
-+ // Literals.
-+ if lLen := b >> 4; lLen > 0 {
-+ switch {
-+ case lLen < 0xF && si+16 < len(src):
-+ // Shortcut 1
-+ // if we have enough room in src and dst, and the literals length
-+ // is small enough (0..14) then copy all 16 bytes, even if not all
-+ // are part of the literals.
-+ copy(dst[di:], src[si:si+16])
-+ si += lLen
-+ di += lLen
-+ if mLen := b & 0xF; mLen < 0xF {
-+ // Shortcut 2
-+ // if the match length (4..18) fits within the literals, then copy
-+ // all 18 bytes, even if not all are part of the literals.
-+ mLen += 4
-+ if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset {
-+ i := di - offset
-+ end := i + 18
-+ if end > len(dst) {
-+ // The remaining buffer may not hold 18 bytes.
-+ // See https://github.com/pierrec/lz4/issues/51.
-+ end = len(dst)
-+ }
-+ copy(dst[di:], dst[i:end])
-+ si += 2
-+ di += mLen
-+ continue
-+ }
-+ }
-+ case lLen == 0xF:
-+ for src[si] == 0xFF {
-+ lLen += 0xFF
-+ si++
-+ }
-+ lLen += int(src[si])
-+ si++
-+ fallthrough
-+ default:
-+ copy(dst[di:di+lLen], src[si:si+lLen])
-+ si += lLen
-+ di += lLen
-+ }
-+ }
-+ if si >= len(src) {
-+ return di
-+ }
-+
-+ offset := int(src[si]) | int(src[si+1])<<8
-+ if offset == 0 {
-+ return hasError
-+ }
-+ si += 2
-+
-+ // Match.
-+ mLen := b & 0xF
-+ if mLen == 0xF {
-+ for src[si] == 0xFF {
-+ mLen += 0xFF
-+ si++
-+ }
-+ mLen += int(src[si])
-+ si++
-+ }
-+ mLen += minMatch
-+
-+ // Copy the match.
-+ expanded := dst[di-offset:]
-+ if mLen > offset {
-+ // Efficiently copy the match dst[di-offset:di] into the dst slice.
-+ bytesToCopy := offset * (mLen / offset)
-+ for n := offset; n <= bytesToCopy+offset; n *= 2 {
-+ copy(expanded[n:], expanded[:n])
-+ }
-+ di += bytesToCopy
-+ mLen -= bytesToCopy
-+ }
-+ di += copy(dst[di:di+mLen], expanded[:mLen])
-+ }
-+}
-diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go
-new file mode 100644
-index 0000000000000..1c45d1813cef4
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/errors.go
-@@ -0,0 +1,30 @@
-+package lz4
-+
-+import (
-+ ""errors""
-+ ""fmt""
-+ ""os""
-+ rdebug ""runtime/debug""
-+)
-+
-+var (
-+ // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
-+ // block is corrupted or the destination buffer is not large enough for the uncompressed data.
-+ ErrInvalidSourceShortBuffer = errors.New(""lz4: invalid source or destination buffer too short"")
-+ // ErrInvalid is returned when reading an invalid LZ4 archive.
-+ ErrInvalid = errors.New(""lz4: bad magic number"")
-+ // ErrBlockDependency is returned when attempting to decompress an archive created with block dependency.
-+ ErrBlockDependency = errors.New(""lz4: block dependency not supported"")
-+ // ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position.
-+ ErrUnsupportedSeek = errors.New(""lz4: can only seek forward from io.SeekCurrent"")
-+)
-+
-+func recoverBlock(e *error) {
-+ if r := recover(); r != nil && *e == nil {
-+ if debugFlag {
-+ fmt.Fprintln(os.Stderr, r)
-+ rdebug.PrintStack()
-+ }
-+ *e = ErrInvalidSourceShortBuffer
-+ }
-+}
-diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
-new file mode 100644
-index 0000000000000..7a76a6bce2b58
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
-@@ -0,0 +1,223 @@
-+// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version).
-+// (https://github.com/Cyan4973/XXH/)
-+package xxh32
-+
-+import (
-+ ""encoding/binary""
-+)
-+
-+const (
-+ prime1 uint32 = 2654435761
-+ prime2 uint32 = 2246822519
-+ prime3 uint32 = 3266489917
-+ prime4 uint32 = 668265263
-+ prime5 uint32 = 374761393
-+
-+ primeMask = 0xFFFFFFFF
-+ prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984
-+ prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535
-+)
-+
-+// XXHZero represents an xxhash32 object with seed 0.
-+type XXHZero struct {
-+ v1 uint32
-+ v2 uint32
-+ v3 uint32
-+ v4 uint32
-+ totalLen uint64
-+ buf [16]byte
-+ bufused int
-+}
-+
-+// Sum appends the current hash to b and returns the resulting slice.
-+// It does not change the underlying hash state.
-+func (xxh XXHZero) Sum(b []byte) []byte {
-+ h32 := xxh.Sum32()
-+ return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
-+}
-+
-+// Reset resets the Hash to its initial state.
-+func (xxh *XXHZero) Reset() {
-+ xxh.v1 = prime1plus2
-+ xxh.v2 = prime2
-+ xxh.v3 = 0
-+ xxh.v4 = prime1minus
-+ xxh.totalLen = 0
-+ xxh.bufused = 0
-+}
-+
-+// Size returns the number of bytes returned by Sum().
-+func (xxh *XXHZero) Size() int {
-+ return 4
-+}
-+
-+// BlockSize gives the minimum number of bytes accepted by Write().
-+func (xxh *XXHZero) BlockSize() int {
-+ return 1
-+}
-+
-+// Write adds input bytes to the Hash.
-+// It never returns an error.
-+func (xxh *XXHZero) Write(input []byte) (int, error) {
-+ if xxh.totalLen == 0 {
-+ xxh.Reset()
-+ }
-+ n := len(input)
-+ m := xxh.bufused
-+
-+ xxh.totalLen += uint64(n)
-+
-+ r := len(xxh.buf) - m
-+ if n < r {
-+ copy(xxh.buf[m:], input)
-+ xxh.bufused += len(input)
-+ return n, nil
-+ }
-+
-+ p := 0
-+ // Causes compiler to work directly from registers instead of stack:
-+ v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4
-+ if m > 0 {
-+ // some data left from previous update
-+ copy(xxh.buf[xxh.bufused:], input[:r])
-+ xxh.bufused += len(input) - r
-+
-+ // fast rotl(13)
-+ buf := xxh.buf[:16] // BCE hint.
-+ v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
-+ v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
-+ v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
-+ v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
-+ p = r
-+ xxh.bufused = 0
-+ }
-+
-+ for n := n - 16; p <= n; p += 16 {
-+ sub := input[p:][:16] //BCE hint for compiler
-+ v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
-+ v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
-+ v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
-+ v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
-+ }
-+ xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4
-+
-+ copy(xxh.buf[xxh.bufused:], input[p:])
-+ xxh.bufused += len(input) - p
-+
-+ return n, nil
-+}
-+
-+// Sum32 returns the 32 bits Hash value.
-+func (xxh *XXHZero) Sum32() uint32 {
-+ h32 := uint32(xxh.totalLen)
-+ if h32 >= 16 {
-+ h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4)
-+ } else {
-+ h32 += prime5
-+ }
-+
-+ p := 0
-+ n := xxh.bufused
-+ buf := xxh.buf
-+ for n := n - 4; p <= n; p += 4 {
-+ h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3
-+ h32 = rol17(h32) * prime4
-+ }
-+ for ; p < n; p++ {
-+ h32 += uint32(buf[p]) * prime5
-+ h32 = rol11(h32) * prime1
-+ }
-+
-+ h32 ^= h32 >> 15
-+ h32 *= prime2
-+ h32 ^= h32 >> 13
-+ h32 *= prime3
-+ h32 ^= h32 >> 16
-+
-+ return h32
-+}
-+
-+// ChecksumZero returns the 32bits Hash value.
-+func ChecksumZero(input []byte) uint32 {
-+ n := len(input)
-+ h32 := uint32(n)
-+
-+ if n < 16 {
-+ h32 += prime5
-+ } else {
-+ v1 := prime1plus2
-+ v2 := prime2
-+ v3 := uint32(0)
-+ v4 := prime1minus
-+ p := 0
-+ for n := n - 16; p <= n; p += 16 {
-+ sub := input[p:][:16] //BCE hint for compiler
-+ v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
-+ v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
-+ v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
-+ v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
-+ }
-+ input = input[p:]
-+ n -= p
-+ h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
-+ }
-+
-+ p := 0
-+ for n := n - 4; p <= n; p += 4 {
-+ h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3
-+ h32 = rol17(h32) * prime4
-+ }
-+ for p < n {
-+ h32 += uint32(input[p]) * prime5
-+ h32 = rol11(h32) * prime1
-+ p++
-+ }
-+
-+ h32 ^= h32 >> 15
-+ h32 *= prime2
-+ h32 ^= h32 >> 13
-+ h32 *= prime3
-+ h32 ^= h32 >> 16
-+
-+ return h32
-+}
-+
-+// Uint32Zero hashes x with seed 0.
-+func Uint32Zero(x uint32) uint32 {
-+ h := prime5 + 4 + x*prime3
-+ h = rol17(h) * prime4
-+ h ^= h >> 15
-+ h *= prime2
-+ h ^= h >> 13
-+ h *= prime3
-+ h ^= h >> 16
-+ return h
-+}
-+
-+func rol1(u uint32) uint32 {
-+ return u<<1 | u>>31
-+}
-+
-+func rol7(u uint32) uint32 {
-+ return u<<7 | u>>25
-+}
-+
-+func rol11(u uint32) uint32 {
-+ return u<<11 | u>>21
-+}
-+
-+func rol12(u uint32) uint32 {
-+ return u<<12 | u>>20
-+}
-+
-+func rol13(u uint32) uint32 {
-+ return u<<13 | u>>19
-+}
-+
-+func rol17(u uint32) uint32 {
-+ return u<<17 | u>>15
-+}
-+
-+func rol18(u uint32) uint32 {
-+ return u<<18 | u>>14
-+}
-diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go
-new file mode 100644
-index 0000000000000..21dcfaeb93d16
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/lz4.go
-@@ -0,0 +1,113 @@
-+// Package lz4 implements reading and writing lz4 compressed data (a frame),
-+// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html.
-+//
-+// Although the block level compression and decompression functions are exposed and are fully compatible
-+// with the lz4 block format definition, they are low level and should not be used directly.
-+// For a complete description of an lz4 compressed block, see:
-+// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
-+//
-+// See https://github.com/Cyan4973/lz4 for the reference C implementation.
-+//
-+package lz4
-+
-+import ""math/bits""
-+
-+import ""sync""
-+
-+const (
-+ // Extension is the LZ4 frame file name extension
-+ Extension = "".lz4""
-+ // Version is the LZ4 frame format version
-+ Version = 1
-+
-+ frameMagic uint32 = 0x184D2204
-+ frameSkipMagic uint32 = 0x184D2A50
-+
-+ // The following constants are used to setup the compression algorithm.
-+ minMatch = 4 // the minimum size of the match sequence size (4 bytes)
-+ winSizeLog = 16 // LZ4 64Kb window size limit
-+ winSize = 1 << winSizeLog
-+ winMask = winSize - 1 // 64Kb window of previous data for dependent blocks
-+ compressedBlockFlag = 1 << 31
-+ compressedBlockMask = compressedBlockFlag - 1
-+
-+ // hashLog determines the size of the hash table used to quickly find a previous match position.
-+ // Its value influences the compression speed and memory usage, the lower the faster,
-+ // but at the expense of the compression ratio.
-+ // 16 seems to be the best compromise for fast compression.
-+ hashLog = 16
-+ htSize = 1 << hashLog
-+
-+ mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes.
-+)
-+
-+// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
-+const (
-+ blockSize64K = 1 << (16 + 2*iota)
-+ blockSize256K
-+ blockSize1M
-+ blockSize4M
-+)
-+
-+var (
-+ // Keep a pool of buffers for each valid block sizes.
-+ bsMapValue = [...]*sync.Pool{
-+ newBufferPool(2 * blockSize64K),
-+ newBufferPool(2 * blockSize256K),
-+ newBufferPool(2 * blockSize1M),
-+ newBufferPool(2 * blockSize4M),
-+ }
-+)
-+
-+// newBufferPool returns a pool for buffers of the given size.
-+func newBufferPool(size int) *sync.Pool {
-+ return &sync.Pool{
-+ New: func() interface{} {
-+ return make([]byte, size)
-+ },
-+ }
-+}
-+
-+// getBuffer returns a buffer to its pool.
-+func getBuffer(size int) []byte {
-+ idx := blockSizeValueToIndex(size) - 4
-+ return bsMapValue[idx].Get().([]byte)
-+}
-+
-+// putBuffer returns a buffer to its pool.
-+func putBuffer(size int, buf []byte) {
-+ if cap(buf) > 0 {
-+ idx := blockSizeValueToIndex(size) - 4
-+ bsMapValue[idx].Put(buf[:cap(buf)])
-+ }
-+}
-+func blockSizeIndexToValue(i byte) int {
-+ return 1 << (16 + 2*uint(i))
-+}
-+func isValidBlockSize(size int) bool {
-+ const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M
-+
-+ return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1
-+}
-+func blockSizeValueToIndex(size int) byte {
-+ return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2)
-+}
-+
-+// Header describes the various flags that can be set on a Writer or obtained from a Reader.
-+// The default values match those of the LZ4 frame format definition
-+// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
-+//
-+// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
-+// It is the caller responsibility to check them if necessary.
-+type Header struct {
-+ BlockChecksum bool // Compressed blocks checksum flag.
-+ NoChecksum bool // Frame checksum flag.
-+ BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB.
-+ Size uint64 // Frame total size. It is _not_ computed by the Writer.
-+ CompressionLevel int // Compression level (higher is better, use 0 for fastest compression).
-+ done bool // Header processed flag (Read or Write and checked).
-+}
-+
-+func (h *Header) Reset() {
-+ h.done = false
-+}
-diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go
-new file mode 100644
-index 0000000000000..9a0fb00709d56
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/lz4_go1.10.go
-@@ -0,0 +1,29 @@
-+//+build go1.10
-+
-+package lz4
-+
-+import (
-+ ""fmt""
-+ ""strings""
-+)
-+
-+func (h Header) String() string {
-+ var s strings.Builder
-+
-+ s.WriteString(fmt.Sprintf(""%T{"", h))
-+ if h.BlockChecksum {
-+ s.WriteString(""BlockChecksum: true "")
-+ }
-+ if h.NoChecksum {
-+ s.WriteString(""NoChecksum: true "")
-+ }
-+ if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
-+ s.WriteString(fmt.Sprintf(""BlockMaxSize: %d "", bs))
-+ }
-+ if l := h.CompressionLevel; l != 0 {
-+ s.WriteString(fmt.Sprintf(""CompressionLevel: %d "", l))
-+ }
-+ s.WriteByte('}')
-+
-+ return s.String()
-+}
-diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
-new file mode 100644
-index 0000000000000..12c761a2e7f97
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
-@@ -0,0 +1,29 @@
-+//+build !go1.10
-+
-+package lz4
-+
-+import (
-+ ""bytes""
-+ ""fmt""
-+)
-+
-+func (h Header) String() string {
-+ var s bytes.Buffer
-+
-+ s.WriteString(fmt.Sprintf(""%T{"", h))
-+ if h.BlockChecksum {
-+ s.WriteString(""BlockChecksum: true "")
-+ }
-+ if h.NoChecksum {
-+ s.WriteString(""NoChecksum: true "")
-+ }
-+ if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
-+ s.WriteString(fmt.Sprintf(""BlockMaxSize: %d "", bs))
-+ }
-+ if l := h.CompressionLevel; l != 0 {
-+ s.WriteString(fmt.Sprintf(""CompressionLevel: %d "", l))
-+ }
-+ s.WriteByte('}')
-+
-+ return s.String()
-+}
-diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go
-new file mode 100644
-index 0000000000000..87dd72bd0db3e
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/reader.go
-@@ -0,0 +1,335 @@
-+package lz4
-+
-+import (
-+ ""encoding/binary""
-+ ""fmt""
-+ ""io""
-+ ""io/ioutil""
-+
-+ ""github.com/pierrec/lz4/internal/xxh32""
-+)
-+
-+// Reader implements the LZ4 frame decoder.
-+// The Header is set after the first call to Read().
-+// The Header may change between Read() calls in case of concatenated frames.
-+type Reader struct {
-+ Header
-+ // Handler called when a block has been successfully read.
-+ // It provides the number of bytes read.
-+ OnBlockDone func(size int)
-+
-+ buf [8]byte // Scrap buffer.
-+ pos int64 // Current position in src.
-+ src io.Reader // Source.
-+ zdata []byte // Compressed data.
-+ data []byte // Uncompressed data.
-+ idx int // Index of unread bytes into data.
-+ checksum xxh32.XXHZero // Frame hash.
-+ skip int64 // Bytes to skip before next read.
-+ dpos int64 // Position in dest
-+}
-+
-+// NewReader returns a new LZ4 frame decoder.
-+// No access to the underlying io.Reader is performed.
-+func NewReader(src io.Reader) *Reader {
-+ r := &Reader{src: src}
-+ return r
-+}
-+
-+// readHeader checks the frame magic number and parses the frame descriptoz.
-+// Skippable frames are supported even as a first frame although the LZ4
-+// specifications recommends skippable frames not to be used as first frames.
-+func (z *Reader) readHeader(first bool) error {
-+ defer z.checksum.Reset()
-+
-+ buf := z.buf[:]
-+ for {
-+ magic, err := z.readUint32()
-+ if err != nil {
-+ z.pos += 4
-+ if !first && err == io.ErrUnexpectedEOF {
-+ return io.EOF
-+ }
-+ return err
-+ }
-+ if magic == frameMagic {
-+ break
-+ }
-+ if magic>>8 != frameSkipMagic>>8 {
-+ return ErrInvalid
-+ }
-+ skipSize, err := z.readUint32()
-+ if err != nil {
-+ return err
-+ }
-+ z.pos += 4
-+ m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
-+ if err != nil {
-+ return err
-+ }
-+ z.pos += m
-+ }
-+
-+ // Header.
-+ if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
-+ return err
-+ }
-+ z.pos += 8
-+
-+ b := buf[0]
-+ if v := b >> 6; v != Version {
-+ return fmt.Errorf(""lz4: invalid version: got %d; expected %d"", v, Version)
-+ }
-+ if b>>5&1 == 0 {
-+ return ErrBlockDependency
-+ }
-+ z.BlockChecksum = b>>4&1 > 0
-+ frameSize := b>>3&1 > 0
-+ z.NoChecksum = b>>2&1 == 0
-+
-+ bmsID := buf[1] >> 4 & 0x7
-+ if bmsID < 4 || bmsID > 7 {
-+ return fmt.Errorf(""lz4: invalid block max size ID: %d"", bmsID)
-+ }
-+ bSize := blockSizeIndexToValue(bmsID - 4)
-+ z.BlockMaxSize = bSize
-+
-+ // Allocate the compressed/uncompressed buffers.
-+ // The compressed buffer cannot exceed the uncompressed one.
-+ if n := 2 * bSize; cap(z.zdata) < n {
-+ z.zdata = make([]byte, n, n)
-+ }
-+ if debugFlag {
-+ debug(""header block max size id=%d size=%d"", bmsID, bSize)
-+ }
-+ z.zdata = z.zdata[:bSize]
-+ z.data = z.zdata[:cap(z.zdata)][bSize:]
-+ z.idx = len(z.data)
-+
-+ _, _ = z.checksum.Write(buf[0:2])
-+
-+ if frameSize {
-+ buf := buf[:8]
-+ if _, err := io.ReadFull(z.src, buf); err != nil {
-+ return err
-+ }
-+ z.Size = binary.LittleEndian.Uint64(buf)
-+ z.pos += 8
-+ _, _ = z.checksum.Write(buf)
-+ }
-+
-+ // Header checksum.
-+ if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
-+ return err
-+ }
-+ z.pos++
-+ if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
-+ return fmt.Errorf(""lz4: invalid header checksum: got %x; expected %x"", buf[0], h)
-+ }
-+
-+ z.Header.done = true
-+ if debugFlag {
-+ debug(""header read: %v"", z.Header)
-+ }
-+
-+ return nil
-+}
-+
-+// Read decompresses data from the underlying source into the supplied buffer.
-+//
-+// Since there can be multiple streams concatenated, Header values may
-+// change between calls to Read(). If that is the case, no data is actually read from
-+// the underlying io.Reader, to allow for potential input buffer resizing.
-+func (z *Reader) Read(buf []byte) (int, error) {
-+ if debugFlag {
-+ debug(""Read buf len=%d"", len(buf))
-+ }
-+ if !z.Header.done {
-+ if err := z.readHeader(true); err != nil {
-+ return 0, err
-+ }
-+ if debugFlag {
-+ debug(""header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d"",
-+ len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
-+ }
-+ }
-+
-+ if len(buf) == 0 {
-+ return 0, nil
-+ }
-+
-+ if z.idx == len(z.data) {
-+ // No data ready for reading, process the next block.
-+ if debugFlag {
-+ debug(""reading block from writer"")
-+ }
-+ // Reset uncompressed buffer
-+ z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
-+
-+ // Block length: 0 = end of frame, highest bit set: uncompressed.
-+ bLen, err := z.readUint32()
-+ if err != nil {
-+ return 0, err
-+ }
-+ z.pos += 4
-+
-+ if bLen == 0 {
-+ // End of frame reached.
-+ if !z.NoChecksum {
-+ // Validate the frame checksum.
-+ checksum, err := z.readUint32()
-+ if err != nil {
-+ return 0, err
-+ }
-+ if debugFlag {
-+ debug(""frame checksum got=%x / want=%x"", z.checksum.Sum32(), checksum)
-+ }
-+ z.pos += 4
-+ if h := z.checksum.Sum32(); checksum != h {
-+ return 0, fmt.Errorf(""lz4: invalid frame checksum: got %x; expected %x"", h, checksum)
-+ }
-+ }
-+
-+ // Get ready for the next concatenated frame and keep the position.
-+ pos := z.pos
-+ z.Reset(z.src)
-+ z.pos = pos
-+
-+ // Since multiple frames can be concatenated, check for more.
-+ return 0, z.readHeader(false)
-+ }
-+
-+ if debugFlag {
-+ debug(""raw block size %d"", bLen)
-+ }
-+ if bLen&compressedBlockFlag > 0 {
-+ // Uncompressed block.
-+ bLen &= compressedBlockMask
-+ if debugFlag {
-+ debug(""uncompressed block size %d"", bLen)
-+ }
-+ if int(bLen) > cap(z.data) {
-+ return 0, fmt.Errorf(""lz4: invalid block size: %d"", bLen)
-+ }
-+ z.data = z.data[:bLen]
-+ if _, err := io.ReadFull(z.src, z.data); err != nil {
-+ return 0, err
-+ }
-+ z.pos += int64(bLen)
-+ if z.OnBlockDone != nil {
-+ z.OnBlockDone(int(bLen))
-+ }
-+
-+ if z.BlockChecksum {
-+ checksum, err := z.readUint32()
-+ if err != nil {
-+ return 0, err
-+ }
-+ z.pos += 4
-+
-+ if h := xxh32.ChecksumZero(z.data); h != checksum {
-+ return 0, fmt.Errorf(""lz4: invalid block checksum: got %x; expected %x"", h, checksum)
-+ }
-+ }
-+
-+ } else {
-+ // Compressed block.
-+ if debugFlag {
-+ debug(""compressed block size %d"", bLen)
-+ }
-+ if int(bLen) > cap(z.data) {
-+ return 0, fmt.Errorf(""lz4: invalid block size: %d"", bLen)
-+ }
-+ zdata := z.zdata[:bLen]
-+ if _, err := io.ReadFull(z.src, zdata); err != nil {
-+ return 0, err
-+ }
-+ z.pos += int64(bLen)
-+
-+ if z.BlockChecksum {
-+ checksum, err := z.readUint32()
-+ if err != nil {
-+ return 0, err
-+ }
-+ z.pos += 4
-+
-+ if h := xxh32.ChecksumZero(zdata); h != checksum {
-+ return 0, fmt.Errorf(""lz4: invalid block checksum: got %x; expected %x"", h, checksum)
-+ }
-+ }
-+
-+ n, err := UncompressBlock(zdata, z.data)
-+ if err != nil {
-+ return 0, err
-+ }
-+ z.data = z.data[:n]
-+ if z.OnBlockDone != nil {
-+ z.OnBlockDone(n)
-+ }
-+ }
-+
-+ if !z.NoChecksum {
-+ _, _ = z.checksum.Write(z.data)
-+ if debugFlag {
-+ debug(""current frame checksum %x"", z.checksum.Sum32())
-+ }
-+ }
-+ z.idx = 0
-+ }
-+
-+ if z.skip > int64(len(z.data[z.idx:])) {
-+ z.skip -= int64(len(z.data[z.idx:]))
-+ z.dpos += int64(len(z.data[z.idx:]))
-+ z.idx = len(z.data)
-+ return 0, nil
-+ }
-+
-+ z.idx += int(z.skip)
-+ z.dpos += z.skip
-+ z.skip = 0
-+
-+ n := copy(buf, z.data[z.idx:])
-+ z.idx += n
-+ z.dpos += int64(n)
-+ if debugFlag {
-+ debug(""copied %d bytes to input"", n)
-+ }
-+
-+ return n, nil
-+}
-+
-+// Seek implements io.Seeker, but supports seeking forward from the current
-+// position only. Any other seek will return an error. Allows skipping output
-+// bytes which aren't needed, which in some scenarios is faster than reading
-+// and discarding them.
-+// Note this may cause future calls to Read() to read 0 bytes if all of the
-+// data they would have returned is skipped.
-+func (z *Reader) Seek(offset int64, whence int) (int64, error) {
-+ if offset < 0 || whence != io.SeekCurrent {
-+ return z.dpos + z.skip, ErrUnsupportedSeek
-+ }
-+ z.skip += offset
-+ return z.dpos + z.skip, nil
-+}
-+
-+// Reset discards the Reader's state and makes it equivalent to the
-+// result of its original state from NewReader, but reading from r instead.
-+// This permits reusing a Reader rather than allocating a new one.
-+func (z *Reader) Reset(r io.Reader) {
-+ z.Header = Header{}
-+ z.pos = 0
-+ z.src = r
-+ z.zdata = z.zdata[:0]
-+ z.data = z.data[:0]
-+ z.idx = 0
-+ z.checksum.Reset()
-+}
-+
-+// readUint32 reads an uint32 into the supplied buffer.
-+// The idea is to make use of the already allocated buffers avoiding additional allocations.
-+func (z *Reader) readUint32() (uint32, error) {
-+ buf := z.buf[:4]
-+ _, err := io.ReadFull(z.src, buf)
-+ x := binary.LittleEndian.Uint32(buf)
-+ return x, err
-+}
-diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go
-new file mode 100644
-index 0000000000000..324f1386b8ad8
---- /dev/null
-+++ b/vendor/github.com/pierrec/lz4/writer.go
-@@ -0,0 +1,408 @@
-+package lz4
-+
-+import (
-+ ""encoding/binary""
-+ ""fmt""
-+ ""github.com/pierrec/lz4/internal/xxh32""
-+ ""io""
-+ ""runtime""
-+)
-+
-+// zResult contains the results of compressing a block.
-+type zResult struct {
-+ size uint32 // Block header
-+ data []byte // Compressed data
-+ checksum uint32 // Data checksum
-+}
-+
-+// Writer implements the LZ4 frame encoder.
-+type Writer struct {
-+ Header
-+ // Handler called when a block has been successfully written out.
-+ // It provides the number of bytes written.
-+ OnBlockDone func(size int)
-+
-+ buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
-+ dst io.Writer // Destination.
-+ checksum xxh32.XXHZero // Frame checksum.
-+ data []byte // Data to be compressed + buffer for compressed data.
-+ idx int // Index into data.
-+ hashtable [winSize]int // Hash table used in CompressBlock().
-+
-+ // For concurrency.
-+ c chan chan zResult // Channel for block compression goroutines and writer goroutine.
-+ err error // Any error encountered while writing to the underlying destination.
-+}
-+
-+// NewWriter returns a new LZ4 frame encoder.
-+// No access to the underlying io.Writer is performed.
-+// The supplied Header is checked at the first Write.
-+// It is ok to change it before the first Write but then not until a Reset() is performed.
-+func NewWriter(dst io.Writer) *Writer {
-+ z := new(Writer)
-+ z.Reset(dst)
-+ return z
-+}
-+
-+// WithConcurrency sets the number of concurrent go routines used for compression.
-+// A negative value sets the concurrency to GOMAXPROCS.
-+func (z *Writer) WithConcurrency(n int) *Writer {
-+ switch {
-+ case n == 0 || n == 1:
-+ z.c = nil
-+ return z
-+ case n < 0:
-+ n = runtime.GOMAXPROCS(0)
-+ }
-+ z.c = make(chan chan zResult, n)
-+ // Writer goroutine managing concurrent block compression goroutines.
-+ go func() {
-+ // Process next block compression item.
-+ for c := range z.c {
-+ // Read the next compressed block result.
-+ // Waiting here ensures that the blocks are output in the order they were sent.
-+ // The incoming channel is always closed as it indicates to the caller that
-+ // the block has been processed.
-+ res := <-c
-+ n := len(res.data)
-+ if n == 0 {
-+ // Notify the block compression routine that we are done with its result.
-+ // This is used when a sentinel block is sent to terminate the compression.
-+ close(c)
-+ return
-+ }
-+ // Write the block.
-+ if err := z.writeUint32(res.size); err != nil && z.err == nil {
-+ z.err = err
-+ }
-+ if _, err := z.dst.Write(res.data); err != nil && z.err == nil {
-+ z.err = err
-+ }
-+ if z.BlockChecksum {
-+ if err := z.writeUint32(res.checksum); err != nil && z.err == nil {
-+ z.err = err
-+ }
-+ }
-+ if isCompressed := res.size&compressedBlockFlag == 0; isCompressed {
-+ // It is now safe to release the buffer as no longer in use by any goroutine.
-+ putBuffer(cap(res.data), res.data)
-+ }
-+ if h := z.OnBlockDone; h != nil {
-+ h(n)
-+ }
-+ close(c)
-+ }
-+ }()
-+ return z
-+}
-+
-+// newBuffers instantiates new buffers which size matches the one in Header.
-+// The returned buffers are for decompression and compression respectively.
-+func (z *Writer) newBuffers() {
-+ bSize := z.Header.BlockMaxSize
-+ buf := getBuffer(bSize)
-+ z.data = buf[:bSize] // Uncompressed buffer is the first half.
-+}
-+
-+// freeBuffers puts the writer's buffers back to the pool.
-+func (z *Writer) freeBuffers() {
-+ // Put the buffer back into the pool, if any.
-+ putBuffer(z.Header.BlockMaxSize, z.data)
-+ z.data = nil
-+}
-+
-+// writeHeader builds and writes the header (magic+header) to the underlying io.Writer.
-+func (z *Writer) writeHeader() error {
-+ // Default to 4Mb if BlockMaxSize is not set.
-+ if z.Header.BlockMaxSize == 0 {
-+ z.Header.BlockMaxSize = blockSize4M
-+ }
-+ // The only option that needs to be validated.
-+ bSize := z.Header.BlockMaxSize
-+ if !isValidBlockSize(z.Header.BlockMaxSize) {
-+ return fmt.Errorf(""lz4: invalid block max size: %d"", bSize)
-+ }
-+ // Allocate the compressed/uncompressed buffers.
-+ // The compressed buffer cannot exceed the uncompressed one.
-+ z.newBuffers()
-+ z.idx = 0
-+
-+ // Size is optional.
-+ buf := z.buf[:]
-+
-+ // Set the fixed size data: magic number, block max size and flags.
-+ binary.LittleEndian.PutUint32(buf[0:], frameMagic)
-+ flg := byte(Version << 6)
-+ flg |= 1 << 5 // No block dependency.
-+ if z.Header.BlockChecksum {
-+ flg |= 1 << 4
-+ }
-+ if z.Header.Size > 0 {
-+ flg |= 1 << 3
-+ }
-+ if !z.Header.NoChecksum {
-+ flg |= 1 << 2
-+ }
-+ buf[4] = flg
-+ buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4
-+
-+ // Current buffer size: magic(4) + flags(1) + block max size (1).
-+ n := 6
-+ // Optional items.
-+ if z.Header.Size > 0 {
-+ binary.LittleEndian.PutUint64(buf[n:], z.Header.Size)
-+ n += 8
-+ }
-+
-+ // The header checksum includes the flags, block max size and optional Size.
-+ buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF)
-+ z.checksum.Reset()
-+
-+ // Header ready, write it out.
-+ if _, err := z.dst.Write(buf[0 : n+1]); err != nil {
-+ return err
-+ }
-+ z.Header.done = true
-+ if debugFlag {
-+ debug(""wrote header %v"", z.Header)
-+ }
-+
-+ return nil
-+}
-+
-+// Write compresses data from the supplied buffer into the underlying io.Writer.
-+// Write does not return until the data has been written.
-+func (z *Writer) Write(buf []byte) (int, error) {
-+ if !z.Header.done {
-+ if err := z.writeHeader(); err != nil {
-+ return 0, err
-+ }
-+ }
-+ if debugFlag {
-+ debug(""input buffer len=%d index=%d"", len(buf), z.idx)
-+ }
-+
-+ zn := len(z.data)
-+ var n int
-+ for len(buf) > 0 {
-+ if z.idx == 0 && len(buf) >= zn {
-+ // Avoid a copy as there is enough data for a block.
-+ if err := z.compressBlock(buf[:zn]); err != nil {
-+ return n, err
-+ }
-+ n += zn
-+ buf = buf[zn:]
-+ continue
-+ }
-+ // Accumulate the data to be compressed.
-+ m := copy(z.data[z.idx:], buf)
-+ n += m
-+ z.idx += m
-+ buf = buf[m:]
-+ if debugFlag {
-+ debug(""%d bytes copied to buf, current index %d"", n, z.idx)
-+ }
-+
-+ if z.idx < len(z.data) {
-+ // Buffer not filled.
-+ if debugFlag {
-+ debug(""need more data for compression"")
-+ }
-+ return n, nil
-+ }
-+
-+ // Buffer full.
-+ if err := z.compressBlock(z.data); err != nil {
-+ return n, err
-+ }
-+ z.idx = 0
-+ }
-+
-+ return n, nil
-+}
-+
-+// compressBlock compresses a block.
-+func (z *Writer) compressBlock(data []byte) error {
-+ if !z.NoChecksum {
-+ _, _ = z.checksum.Write(data)
-+ }
-+
-+ if z.c != nil {
-+ c := make(chan zResult)
-+ z.c <- c // Send now to guarantee order
-+ go writerCompressBlock(c, z.Header, data)
-+ return nil
-+ }
-+
-+ zdata := z.data[z.Header.BlockMaxSize:cap(z.data)]
-+ // The compressed block size cannot exceed the input's.
-+ var zn int
-+
-+ if level := z.Header.CompressionLevel; level != 0 {
-+ zn, _ = CompressBlockHC(data, zdata, level)
-+ } else {
-+ zn, _ = CompressBlock(data, zdata, z.hashtable[:])
-+ }
-+
-+ var bLen uint32
-+ if debugFlag {
-+ debug(""block compression %d => %d"", len(data), zn)
-+ }
-+ if zn > 0 && zn < len(data) {
-+ // Compressible and compressed size smaller than uncompressed: ok!
-+ bLen = uint32(zn)
-+ zdata = zdata[:zn]
-+ } else {
-+ // Uncompressed block.
-+ bLen = uint32(len(data)) | compressedBlockFlag
-+ zdata = data
-+ }
-+ if debugFlag {
-+ debug(""block compression to be written len=%d data len=%d"", bLen, len(zdata))
-+ }
-+
-+ // Write the block.
-+ if err := z.writeUint32(bLen); err != nil {
-+ return err
-+ }
-+ written, err := z.dst.Write(zdata)
-+ if err != nil {
-+ return err
-+ }
-+ if h := z.OnBlockDone; h != nil {
-+ h(written)
-+ }
-+
-+ if !z.BlockChecksum {
-+ if debugFlag {
-+ debug(""current frame checksum %x"", z.checksum.Sum32())
-+ }
-+ return nil
-+ }
-+ checksum := xxh32.ChecksumZero(zdata)
-+ if debugFlag {
-+ debug(""block checksum %x"", checksum)
-+ defer func() { debug(""current frame checksum %x"", z.checksum.Sum32()) }()
-+ }
-+ return z.writeUint32(checksum)
-+}
-+
-+// Flush flushes any pending compressed data to the underlying writer.
-+// Flush does not return until the data has been written.
-+// If the underlying writer returns an error, Flush returns that error.
-+func (z *Writer) Flush() error {
-+ if debugFlag {
-+ debug(""flush with index %d"", z.idx)
-+ }
-+ if z.idx == 0 {
-+ return nil
-+ }
-+
-+ data := z.data[:z.idx]
-+ z.idx = 0
-+ if z.c == nil {
-+ return z.compressBlock(data)
-+ }
-+ if !z.NoChecksum {
-+ _, _ = z.checksum.Write(data)
-+ }
-+ c := make(chan zResult)
-+ z.c <- c
-+ writerCompressBlock(c, z.Header, data)
-+ return nil
-+}
-+
-+func (z *Writer) close() error {
-+ if z.c == nil {
-+ return nil
-+ }
-+ // Send a sentinel block (no data to compress) to terminate the writer main goroutine.
-+ c := make(chan zResult)
-+ z.c <- c
-+ c <- zResult{}
-+ // Wait for the main goroutine to complete.
-+ <-c
-+ // At this point the main goroutine has shut down or is about to return.
-+ z.c = nil
-+ return z.err
-+}
-+
-+// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
-+func (z *Writer) Close() error {
-+ if !z.Header.done {
-+ if err := z.writeHeader(); err != nil {
-+ return err
-+ }
-+ }
-+ if err := z.Flush(); err != nil {
-+ return err
-+ }
-+ if err := z.close(); err != nil {
-+ return err
-+ }
-+ z.freeBuffers()
-+
-+ if debugFlag {
-+ debug(""writing last empty block"")
-+ }
-+ if err := z.writeUint32(0); err != nil {
-+ return err
-+ }
-+ if z.NoChecksum {
-+ return nil
-+ }
-+ checksum := z.checksum.Sum32()
-+ if debugFlag {
-+ debug(""stream checksum %x"", checksum)
-+ }
-+ return z.writeUint32(checksum)
-+}
-+
-+// Reset clears the state of the Writer z such that it is equivalent to its
-+// initial state from NewWriter, but instead writing to w.
-+// No access to the underlying io.Writer is performed.
-+func (z *Writer) Reset(w io.Writer) {
-+ n := cap(z.c)
-+ _ = z.close()
-+ z.freeBuffers()
-+ z.Header.Reset()
-+ z.dst = w
-+ z.checksum.Reset()
-+ z.idx = 0
-+ z.err = nil
-+ z.WithConcurrency(n)
-+}
-+
-+// writeUint32 writes a uint32 to the underlying writer.
-+func (z *Writer) writeUint32(x uint32) error {
-+ buf := z.buf[:4]
-+ binary.LittleEndian.PutUint32(buf, x)
-+ _, err := z.dst.Write(buf)
-+ return err
-+}
-+
-+// writerCompressBlock compresses data into a pooled buffer and writes its result
-+// out to the input channel.
-+func writerCompressBlock(c chan zResult, header Header, data []byte) {
-+ zdata := getBuffer(header.BlockMaxSize)
-+ // The compressed block size cannot exceed the input's.
-+ var zn int
-+ if level := header.CompressionLevel; level != 0 {
-+ zn, _ = CompressBlockHC(data, zdata, level)
-+ } else {
-+ var hashTable [winSize]int
-+ zn, _ = CompressBlock(data, zdata, hashTable[:])
-+ }
-+ var res zResult
-+ if zn > 0 && zn < len(data) {
-+ res.size = uint32(zn)
-+ res.data = zdata[:zn]
-+ } else {
-+ res.size = uint32(len(data)) | compressedBlockFlag
-+ res.data = data
-+ }
-+ if header.BlockChecksum {
-+ res.checksum = xxh32.ChecksumZero(res.data)
-+ }
-+ c <- res
-+}
-diff --git a/vendor/modules.txt b/vendor/modules.txt
-index 30f1902a8266d..6d085fed8aa85 100644
---- a/vendor/modules.txt
-+++ b/vendor/modules.txt
-@@ -340,11 +340,9 @@ github.com/jonboulle/clockwork
- github.com/jpillora/backoff
- # github.com/json-iterator/go v1.1.7
- github.com/json-iterator/go
--# github.com/klauspost/compress v1.7.4
-+# github.com/klauspost/compress v1.9.4
- github.com/klauspost/compress/flate
- github.com/klauspost/compress/gzip
--# github.com/klauspost/cpuid v1.2.1
--github.com/klauspost/cpuid
- # github.com/konsorten/go-windows-terminal-sequences v1.0.2
- github.com/konsorten/go-windows-terminal-sequences
- # github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515
-@@ -387,6 +385,9 @@ github.com/opentracing-contrib/go-stdlib/nethttp
- github.com/opentracing/opentracing-go
- github.com/opentracing/opentracing-go/ext
- github.com/opentracing/opentracing-go/log
-+# github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible
-+github.com/pierrec/lz4
-+github.com/pierrec/lz4/internal/xxh32
- # github.com/pkg/errors v0.8.1
- github.com/pkg/errors
- # github.com/pmezard/go-difflib v1.0.0",unknown,"Adds configurable compression algorithms for chunks (#1411)
-
-* Adds L4Z encoding.
-
-Signed-off-by: Cyril Tovena
-
-* Adds encoding benchmarks
-
-Signed-off-by: Cyril Tovena
-
-* Adds snappy encoding.
-
-Signed-off-by: Cyril Tovena
-
-* Adds chunk size test
-
-Signed-off-by: Cyril Tovena
-
-* Adds snappy v2
-
-Signed-off-by: Cyril Tovena
-
-* Improve benchmarks
-
-Signed-off-by: Cyril Tovena
-
-* Remove chunkenc
-
-Signed-off-by: Cyril Tovena
-
-* Update lz4 to latest master version.
-
-Signed-off-by: Peter Štibraný
-
-* Use temporary buffer in serialise method to avoid allocations when doing string -> byte conversion.
-It also makes code little more readable. We pool those buffers for reuse.
-
-Signed-off-by: Peter Štibraný
-
-* Added gzip -1 for comparison.
-
-Signed-off-by: Peter Štibraný
-
-* Initialize reader and buffered reader lazily.
-
-This helps with reader/buffered reader reuse.
-
-Signed-off-by: Peter Štibraný
-
-* Don't keep entries, extracted generateData function
-
-(mostly to get more understandable profile)
-
-Signed-off-by: Peter Štibraný
-
-* Improve test and benchmark to cover all encodings.
-
-Signed-off-by: Cyril Tovena
-
-* Adds support for a new chunk format with encoding info.
-
-Signed-off-by: Cyril Tovena
-
-* Ingesters now support encoding config.
-
-Signed-off-by: Cyril Tovena
-
-* Add support for no compression.
-
-Signed-off-by: Cyril Tovena
-
-* Add docs
-
-Signed-off-by: Cyril Tovena
-
-* Remove default Gzip for ByteChunk.
-
-Signed-off-by: Cyril Tovena
-
-* Removes none, snappyv2 and gzip-1
-
-Signed-off-by: Cyril Tovena
-
-* Move log test lines to testdata and add supported encoding stringer
-
-Signed-off-by: Cyril Tovena
-
-* got linted
-
-Signed-off-by: Cyril Tovena "
-76e2a1402977069d6eca5eb525901a88dc577743,2021-10-29 17:29:48,lingpeng0314,"add group_{left,right} to LogQL (#4510)
-
-* Add group_left/group_right support
-
-* Update docs/sources/logql/_index.md
-
-* Minor change for a test case
-
-* removes CardManyToMany
-
-* removes now-unused IsSetOperator helper
-
-Co-authored-by: Owen Diehl